tcp_ipv4.c 74.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
32
 *					request_sock handling and moved
L
Linus Torvalds 已提交
33 34
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
35
 *					Added new listen semantics.
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

48
#define pr_fmt(fmt) "TCP: " fmt
L
Linus Torvalds 已提交
49

H
Herbert Xu 已提交
50
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
59
#include <linux/slab.h>
L
Linus Torvalds 已提交
60

61
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
62
#include <net/icmp.h>
63
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
64
#include <net/tcp.h>
65
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
66 67
#include <net/ipv6.h>
#include <net/inet_common.h>
68
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
69
#include <net/xfrm.h>
70
#include <net/secure_seq.h>
71
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
72 73 74 75 76 77

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
78
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
79

H
Herbert Xu 已提交
80
#include <crypto/hash.h>
81 82
#include <linux/scatterlist.h>

83 84
#include <trace/events/tcp.h>

85
#ifdef CONFIG_TCP_MD5SIG
E
Eric Dumazet 已提交
86
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
87
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
88 89
#endif

90
struct inet_hashinfo tcp_hashinfo;
E
Eric Dumazet 已提交
91
EXPORT_SYMBOL(tcp_hashinfo);
L
Linus Torvalds 已提交
92

93
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
L
Linus Torvalds 已提交
94
{
95 96 97 98 99 100
	return secure_tcp_seq(ip_hdr(skb)->daddr,
			      ip_hdr(skb)->saddr,
			      tcp_hdr(skb)->dest,
			      tcp_hdr(skb)->source);
}

101
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
102
{
103
	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
L
Linus Torvalds 已提交
104 105
}

106 107
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
108
	const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 110
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);
111 112 113 114 115 116 117 118 119 120 121 122 123
	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;

	if (reuse == 2) {
		/* Still does not detect *everything* that goes through
		 * lo, since we require a loopback src or dst address
		 * or direct binding to 'lo' interface.
		 */
		bool loopback = false;
		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
			loopback = true;
#if IS_ENABLED(CONFIG_IPV6)
		if (tw->tw_family == AF_INET6) {
			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
125
			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
126
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
127 128 129 130 131 132 133 134 135 136 137
				loopback = true;
		} else
#endif
		{
			if (ipv4_is_loopback(tw->tw_daddr) ||
			    ipv4_is_loopback(tw->tw_rcv_saddr))
				loopback = true;
		}
		if (!loopback)
			reuse = 0;
	}
138 139 140 141 142 143 144 145 146 147 148 149 150

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
151 152
	    (!twp || (reuse && time_after32(ktime_get_seconds(),
					    tcptw->tw_ts_recent_stamp)))) {
153 154 155 156 157 158 159 160 161 162 163 164
		/* In case of repair and re-using TIME-WAIT sockets we still
		 * want to be sure that it is safe as above but honor the
		 * sequence numbers and time stamps set as part of the repair
		 * process.
		 *
		 * Without this check re-using a TIME-WAIT socket with TCP
		 * repair would accumulate a -1 on the repair assigned
		 * sequence number. The first time it is reused the sequence
		 * is -1, the second time -2, etc. This fixes that issue
		 * without appearing to create any others.
		 */
		if (likely(!tp->repair)) {
165 166 167 168 169
			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;

			if (!seq)
				seq = 1;
			WRITE_ONCE(tp->write_seq, seq);
170 171 172
			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		}
173 174 175 176 177 178 179 180
		sock_hold(sktw);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);

A
Andrey Ignatov 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{
	/* This check is replicated from tcp_v4_connect() and intended to
	 * prevent BPF program called below from accessing bytes that are out
	 * of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	sock_owned_by_me(sk);

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
}

L
Linus Torvalds 已提交
196 197 198
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
199
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
L
Linus Torvalds 已提交
200 201
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
202
	__be16 orig_sport, orig_dport;
203
	__be32 daddr, nexthop;
204
	struct flowi4 *fl4;
205
	struct rtable *rt;
L
Linus Torvalds 已提交
206
	int err;
207
	struct ip_options_rcu *inet_opt;
208
	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
217
	inet_opt = rcu_dereference_protected(inet->inet_opt,
218
					     lockdep_sock_is_held(sk));
219
	if (inet_opt && inet_opt->opt.srr) {
L
Linus Torvalds 已提交
220 221
		if (!daddr)
			return -EINVAL;
222
		nexthop = inet_opt->opt.faddr;
L
Linus Torvalds 已提交
223 224
	}

225 226
	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
227 228
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
229 230
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_TCP,
231
			      orig_sport, orig_dport, sk);
232 233 234
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
235
			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
236
		return err;
237
	}
L
Linus Torvalds 已提交
238 239 240 241 242 243

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

244
	if (!inet_opt || !inet_opt->opt.srr)
245
		daddr = fl4->daddr;
L
Linus Torvalds 已提交
246

E
Eric Dumazet 已提交
247
	if (!inet->inet_saddr)
248
		inet->inet_saddr = fl4->saddr;
249
	sk_rcv_saddr_set(sk, inet->inet_saddr);
L
Linus Torvalds 已提交
250

E
Eric Dumazet 已提交
251
	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
L
Linus Torvalds 已提交
252 253 254
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
P
Pavel Emelyanov 已提交
255
		if (likely(!tp->repair))
256
			WRITE_ONCE(tp->write_seq, 0);
L
Linus Torvalds 已提交
257 258
	}

E
Eric Dumazet 已提交
259
	inet->inet_dport = usin->sin_port;
260
	sk_daddr_set(sk, daddr);
L
Linus Torvalds 已提交
261

262
	inet_csk(sk)->icsk_ext_hdr_len = 0;
263 264
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
L
Linus Torvalds 已提交
265

266
	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
L
Linus Torvalds 已提交
267 268 269 270 271 272 273

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
274
	err = inet_hash_connect(tcp_death_row, sk);
L
Linus Torvalds 已提交
275 276 277
	if (err)
		goto failure;

278
	sk_set_txhash(sk);
279

280
	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
281 282 283 284
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
L
Linus Torvalds 已提交
285
		goto failure;
286
	}
L
Linus Torvalds 已提交
287
	/* OK, now commit destination to socket.  */
288
	sk->sk_gso_type = SKB_GSO_TCPV4;
289
	sk_setup_caps(sk, &rt->dst);
W
Wei Wang 已提交
290
	rt = NULL;
L
Linus Torvalds 已提交
291

292 293
	if (likely(!tp->repair)) {
		if (!tp->write_seq)
294 295 296 297 298
			WRITE_ONCE(tp->write_seq,
				   secure_tcp_seq(inet->inet_saddr,
						  inet->inet_daddr,
						  inet->inet_sport,
						  usin->sin_port));
299 300
		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
						 inet->inet_saddr,
301
						 inet->inet_daddr);
302
	}
L
Linus Torvalds 已提交
303

304
	inet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
305

W
Wei Wang 已提交
306 307 308 309 310
	if (tcp_fastopen_defer_connect(sk, &err))
		return err;
	if (err)
		goto failure;

A
Andrey Vagin 已提交
311
	err = tcp_connect(sk);
P
Pavel Emelyanov 已提交
312

L
Linus Torvalds 已提交
313 314 315 316 317 318
	if (err)
		goto failure;

	return 0;

failure:
319 320 321 322
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
323 324 325
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
E
Eric Dumazet 已提交
326
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
327 328
	return err;
}
E
Eric Dumazet 已提交
329
EXPORT_SYMBOL(tcp_v4_connect);
L
Linus Torvalds 已提交
330 331

/*
332 333 334
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
L
Linus Torvalds 已提交
335
 */
336
void tcp_v4_mtu_reduced(struct sock *sk)
L
Linus Torvalds 已提交
337 338
{
	struct inet_sock *inet = inet_sk(sk);
339 340
	struct dst_entry *dst;
	u32 mtu;
L
Linus Torvalds 已提交
341

342 343 344
	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
		return;
	mtu = tcp_sk(sk)->mtu_info;
345 346
	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355 356 357
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
358
	    ip_sk_accept_pmtu(sk) &&
359
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}
370
EXPORT_SYMBOL(tcp_v4_mtu_reduced);
L
Linus Torvalds 已提交
371

372 373 374 375
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);

376
	if (dst)
377
		dst->ops->redirect(dst, sk, skb);
378 379
}

380 381

/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
382
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
383 384 385 386 387 388 389 390
{
	struct request_sock *req = inet_reqsk(sk);
	struct net *net = sock_net(sk);

	/* ICMPs are not backlogged, hence we cannot get
	 * an established socket here.
	 */
	if (seq != tcp_rsk(req)->snt_isn) {
391
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
392
	} else if (abort) {
393 394 395 396 397 398
		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
F
Fan Du 已提交
399
		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
400
		tcp_listendrop(req->rsk_listener);
401
	}
402
	reqsk_put(req);
403 404 405
}
EXPORT_SYMBOL(tcp_req_err);

L
Linus Torvalds 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

422
int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
L
Linus Torvalds 已提交
423
{
424
	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
425
	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
426
	struct inet_connection_sock *icsk;
L
Linus Torvalds 已提交
427 428
	struct tcp_sock *tp;
	struct inet_sock *inet;
429 430
	const int type = icmp_hdr(icmp_skb)->type;
	const int code = icmp_hdr(icmp_skb)->code;
L
Linus Torvalds 已提交
431
	struct sock *sk;
432
	struct sk_buff *skb;
433
	struct request_sock *fastopen;
434 435 436
	u32 seq, snd_una;
	s32 remaining;
	u32 delta_us;
L
Linus Torvalds 已提交
437
	int err;
438
	struct net *net = dev_net(icmp_skb->dev);
L
Linus Torvalds 已提交
439

440 441
	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
				       th->dest, iph->saddr, ntohs(th->source),
442
				       inet_iif(icmp_skb), 0);
L
Linus Torvalds 已提交
443
	if (!sk) {
E
Eric Dumazet 已提交
444
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
445
		return -ENOENT;
L
Linus Torvalds 已提交
446 447
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
448
		inet_twsk_put(inet_twsk(sk));
449
		return 0;
L
Linus Torvalds 已提交
450
	}
451
	seq = ntohl(th->seq);
452 453 454 455 456 457 458 459
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
				     type == ICMP_TIME_EXCEEDED ||
				     (type == ICMP_DEST_UNREACH &&
				      (code == ICMP_NET_UNREACH ||
				       code == ICMP_HOST_UNREACH)));
		return 0;
	}
L
Linus Torvalds 已提交
460 461 462 463

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
464 465
	 * We do take care of PMTU discovery (RFC1191) special case :
	 * we can receive locally generated ICMP messages while socket is held.
L
Linus Torvalds 已提交
466
	 */
467 468
	if (sock_owned_by_user(sk)) {
		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
469
			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
470
	}
L
Linus Torvalds 已提交
471 472 473
	if (sk->sk_state == TCP_CLOSE)
		goto out;

474
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
475
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
476 477 478
		goto out;
	}

479
	icsk = inet_csk(sk);
L
Linus Torvalds 已提交
480
	tp = tcp_sk(sk);
481
	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
482
	fastopen = rcu_dereference(tp->fastopen_rsk);
483
	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
L
Linus Torvalds 已提交
484
	if (sk->sk_state != TCP_LISTEN &&
485
	    !between(seq, snd_una, tp->snd_nxt)) {
486
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
487 488 489 490
		goto out;
	}

	switch (type) {
491
	case ICMP_REDIRECT:
492 493
		if (!sock_owned_by_user(sk))
			do_redirect(icmp_skb, sk);
494
		goto out;
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502 503 504 505
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
506 507 508 509 510 511 512
			/* We are not interested in TCP_LISTEN and open_requests
			 * (SYN-ACKs send out by Linux are always <576bytes so
			 * they should go through unfragmented).
			 */
			if (sk->sk_state == TCP_LISTEN)
				goto out;

513
			tp->mtu_info = info;
514
			if (!sock_owned_by_user(sk)) {
515
				tcp_v4_mtu_reduced(sk);
516
			} else {
517
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
518 519
					sock_hold(sk);
			}
L
Linus Torvalds 已提交
520 521 522 523
			goto out;
		}

		err = icmp_err_convert[code].errno;
524 525 526 527 528
		/* check if icmp_skb allows revert of backoff
		 * (see draft-zimmermann-tcp-lcd) */
		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
			break;
		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
529
		    !icsk->icsk_backoff || fastopen)
530 531
			break;

532 533 534
		if (sock_owned_by_user(sk))
			break;

535 536 537 538
		skb = tcp_rtx_queue_head(sk);
		if (WARN_ON_ONCE(!skb))
			break;

539
		icsk->icsk_backoff--;
540 541 542
		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
					       TCP_TIMEOUT_INIT;
		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
543 544


545
		tcp_mstamp_refresh(tp);
546
		delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
E
Eric Dumazet 已提交
547
		remaining = icsk->icsk_rto -
548
			    usecs_to_jiffies(delta_us);
549

550
		if (remaining > 0) {
551 552 553 554 555 556 557 558
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
						  remaining, TCP_RTO_MAX);
		} else {
			/* RTO revert clocked out retransmission.
			 * Will retransmit now */
			tcp_retransmit_timer(sk);
		}

L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567 568
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
	case TCP_SYN_SENT:
569 570 571 572
	case TCP_SYN_RECV:
		/* Only in fast or simultaneous open. If a fast open socket is
		 * is already accepted it is treated as a connected one below.
		 */
573
		if (fastopen && !fastopen->sk)
574 575
			break;

L
Linus Torvalds 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
615
	return 0;
L
Linus Torvalds 已提交
616 617
}

618
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
619
{
620
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
621

622 623 624
	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
	skb->csum_start = skb_transport_header(skb) - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
625 626
}

627
/* This routine computes an IPv4 TCP checksum. */
628
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
629
{
630
	const struct inet_sock *inet = inet_sk(sk);
631 632 633

	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
E
Eric Dumazet 已提交
634
EXPORT_SYMBOL(tcp_v4_send_check);
635

L
Linus Torvalds 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

649
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
650
{
651
	const struct tcphdr *th = tcp_hdr(skb);
652 653 654
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
655
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
656 657
#endif
	} rep;
L
Linus Torvalds 已提交
658
	struct ip_reply_arg arg;
659
#ifdef CONFIG_TCP_MD5SIG
660
	struct tcp_md5sig_key *key = NULL;
661 662 663 664
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
665
#endif
666
	u64 transmit_time = 0;
J
Jon Maxwell 已提交
667
	struct sock *ctl_sk;
668
	struct net *net;
L
Linus Torvalds 已提交
669 670 671 672 673

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

674 675 676 677
	/* If sk not NULL, it means we did a successful lookup and incoming
	 * route had to be correct. prequeue might have dropped our dst.
	 */
	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
678 679 680
		return;

	/* Swap the send and the receive. */
681 682 683 684 685
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
686 687

	if (th->ack) {
688
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
689
	} else {
690 691 692
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
693 694
	}

695
	memset(&arg, 0, sizeof(arg));
696 697 698
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

699
	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
700
#ifdef CONFIG_TCP_MD5SIG
701
	rcu_read_lock();
702
	hash_location = tcp_parse_md5sig_option(th);
703
	if (sk && sk_fullsock(sk)) {
704
		const union tcp_md5_addr *addr;
705
		int l3index;
706

707 708 709 710
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and inet_iif is set to it.
		 */
		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
711
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
712
		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
713
	} else if (hash_location) {
714
		const union tcp_md5_addr *addr;
715 716
		int sdif = tcp_v4_sdif(skb);
		int dif = inet_iif(skb);
717
		int l3index;
718

719 720 721 722 723 724 725
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
726 727
		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
					     ip_hdr(skb)->saddr,
728
					     th->source, ip_hdr(skb)->daddr,
729
					     ntohs(th->source), dif, sdif);
730 731
		/* don't send rst if it can't find key */
		if (!sk1)
732 733
			goto out;

734 735 736 737
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and dif is set to it.
		 */
		l3index = sdif ? dif : 0;
738
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
739
		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
740
		if (!key)
741 742
			goto out;

743

744
		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
745
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
746 747
			goto out;

748 749
	}

750 751 752 753 754 755 756 757 758
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

759
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
760 761
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
762 763
	}
#endif
764 765
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
766
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
767
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
768 769
	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;

770
	/* When socket is gone, all binding information is lost.
A
Alexey Kuznetsov 已提交
771 772
	 * routing might fail in this case. No choice here, if we choose to force
	 * input interface, we will misroute in case of asymmetric route.
773
	 */
774
	if (sk) {
A
Alexey Kuznetsov 已提交
775
		arg.bound_dev_if = sk->sk_bound_dev_if;
776 777
		if (sk_fullsock(sk))
			trace_tcp_send_reset(sk, skb);
778
	}
L
Linus Torvalds 已提交
779

780 781 782
	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));

783
	arg.tos = ip_hdr(skb)->tos;
784
	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
785
	local_bh_disable();
786
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
787
	if (sk) {
J
Jon Maxwell 已提交
788 789
		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_mark : sk->sk_mark;
790 791
		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_priority : sk->sk_priority;
792
		transmit_time = tcp_transmit_time(sk);
793
	}
J
Jon Maxwell 已提交
794
	ip_send_unicast_reply(ctl_sk,
795
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
796
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
797 798
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
799

J
Jon Maxwell 已提交
800
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
801 802
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
803
	local_bh_enable();
804 805

#ifdef CONFIG_TCP_MD5SIG
806 807
out:
	rcu_read_unlock();
808
#endif
L
Linus Torvalds 已提交
809 810 811 812 813 814
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

815
static void tcp_v4_send_ack(const struct sock *sk,
816
			    struct sk_buff *skb, u32 seq, u32 ack,
817
			    u32 win, u32 tsval, u32 tsecr, int oif,
818
			    struct tcp_md5sig_key *key,
819
			    int reply_flags, u8 tos)
L
Linus Torvalds 已提交
820
{
821
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
822 823
	struct {
		struct tcphdr th;
824
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
825
#ifdef CONFIG_TCP_MD5SIG
826
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
827 828
#endif
			];
L
Linus Torvalds 已提交
829
	} rep;
830
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
831
	struct ip_reply_arg arg;
J
Jon Maxwell 已提交
832
	struct sock *ctl_sk;
833
	u64 transmit_time;
L
Linus Torvalds 已提交
834 835

	memset(&rep.th, 0, sizeof(struct tcphdr));
836
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
837 838 839

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
840
	if (tsecr) {
841 842 843
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
844 845
		rep.opt[1] = htonl(tsval);
		rep.opt[2] = htonl(tsecr);
846
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
847 848 849 850 851 852 853 854 855 856 857
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

858 859
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
860
		int offset = (tsecr) ? 3 : 0;
861 862 863 864 865 866 867 868

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

869
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
870 871
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
872 873
	}
#endif
874
	arg.flags = reply_flags;
875 876
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
877 878
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
879 880
	if (oif)
		arg.bound_dev_if = oif;
881
	arg.tos = tos;
882
	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
883
	local_bh_disable();
884
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
885 886
	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_mark : sk->sk_mark;
887 888
	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_priority : sk->sk_priority;
889
	transmit_time = tcp_transmit_time(sk);
J
Jon Maxwell 已提交
890
	ip_send_unicast_reply(ctl_sk,
891
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
892
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
893 894
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
895

J
Jon Maxwell 已提交
896
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
897
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
898
	local_bh_enable();
L
Linus Torvalds 已提交
899 900 901 902
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
903
	struct inet_timewait_sock *tw = inet_twsk(sk);
904
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
905

906
	tcp_v4_send_ack(sk, skb,
907
			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
908
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
909
			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
910 911
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
912
			tcp_twsk_md5_key(tcptw),
913 914
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
			tw->tw_tos
915
			);
L
Linus Torvalds 已提交
916

917
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
918 919
}

920
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
921
				  struct request_sock *req)
L
Linus Torvalds 已提交
922
{
923
	const union tcp_md5_addr *addr;
924
	int l3index;
925

926 927 928
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
929 930 931
	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
					     tcp_sk(sk)->snd_nxt;

932 933 934 935 936
	/* RFC 7323 2.3
	 * The window field (SEG.WND) of every outgoing segment, with the
	 * exception of <SYN> segments, MUST be right-shifted by
	 * Rcv.Wind.Shift bits:
	 */
937
	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
938
	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
939
	tcp_v4_send_ack(sk, skb, seq,
940 941
			tcp_rsk(req)->rcv_nxt,
			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
942
			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
943 944
			req->ts_recent,
			0,
945
			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
946 947
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			ip_hdr(skb)->tos);
L
Linus Torvalds 已提交
948 949 950
}

/*
951
 *	Send a SYN-ACK after having received a SYN.
952
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
953 954
 *	socket.
 */
955
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
956
			      struct flowi *fl,
957
			      struct request_sock *req,
958
			      struct tcp_fastopen_cookie *foc,
959
			      enum tcp_synack_type synack_type)
L
Linus Torvalds 已提交
960
{
961
	const struct inet_request_sock *ireq = inet_rsk(req);
962
	struct flowi4 fl4;
L
Linus Torvalds 已提交
963
	int err = -1;
964
	struct sk_buff *skb;
L
Linus Torvalds 已提交
965 966

	/* First, grab a route. */
967
	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
968
		return -1;
L
Linus Torvalds 已提交
969

970
	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
L
Linus Torvalds 已提交
971 972

	if (skb) {
973
		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
L
Linus Torvalds 已提交
974

975
		rcu_read_lock();
976 977
		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
					    ireq->ir_rmt_addr,
978 979
					    rcu_dereference(ireq->ireq_opt));
		rcu_read_unlock();
980
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
981 982 983 984 985 986
	}

	return err;
}

/*
987
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
988
 */
989
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
990
{
E
Eric Dumazet 已提交
991
	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
L
Linus Torvalds 已提交
992 993
}

994 995 996 997 998 999 1000
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

1001
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1002 1003
EXPORT_SYMBOL(tcp_md5_needed);

1004
/* Find the Key structure for an address.  */
1005
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1006 1007
					   const union tcp_md5_addr *addr,
					   int family)
1008
{
1009
	const struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1010
	struct tcp_md5sig_key *key;
1011
	const struct tcp_md5sig_info *md5sig;
1012 1013 1014
	__be32 mask;
	struct tcp_md5sig_key *best_match = NULL;
	bool match;
1015

1016 1017
	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
1018
				       lockdep_sock_is_held(sk));
1019
	if (!md5sig)
1020
		return NULL;
A
Arnd Bergmann 已提交
1021

1022 1023
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
E
Eric Dumazet 已提交
1024 1025
		if (key->family != family)
			continue;
1026 1027
		if (key->l3index && key->l3index != l3index)
			continue;
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
		if (family == AF_INET) {
			mask = inet_make_mask(key->prefixlen);
			match = (key->addr.a4.s_addr & mask) ==
				(addr->a4.s_addr & mask);
#if IS_ENABLED(CONFIG_IPV6)
		} else if (family == AF_INET6) {
			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
						  key->prefixlen);
#endif
		} else {
			match = false;
		}

		if (match && (!best_match ||
			      key->prefixlen > best_match->prefixlen))
			best_match = key;
	}
	return best_match;
}
1047
EXPORT_SYMBOL(__tcp_md5_do_lookup);
1048

1049 1050
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
						      const union tcp_md5_addr *addr,
1051 1052
						      int family, u8 prefixlen,
						      int l3index)
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
{
	const struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_md5sig_key *key;
	unsigned int size = sizeof(struct in_addr);
	const struct tcp_md5sig_info *md5sig;

	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
				       lockdep_sock_is_held(sk));
	if (!md5sig)
		return NULL;
#if IS_ENABLED(CONFIG_IPV6)
	if (family == AF_INET6)
		size = sizeof(struct in6_addr);
#endif
1068 1069
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
1070 1071
		if (key->family != family)
			continue;
1072 1073
		if (key->l3index && key->l3index != l3index)
			continue;
1074 1075
		if (!memcmp(&key->addr, addr, size) &&
		    key->prefixlen == prefixlen)
E
Eric Dumazet 已提交
1076
			return key;
1077 1078 1079 1080
	}
	return NULL;
}

1081
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1082
					 const struct sock *addr_sk)
1083
{
1084
	const union tcp_md5_addr *addr;
1085
	int l3index;
E
Eric Dumazet 已提交
1086

1087 1088
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
						 addr_sk->sk_bound_dev_if);
1089
	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1090
	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1091 1092 1093 1094
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);

/* This can be called on a newly created socket, from other files */
E
Eric Dumazet 已提交
1095
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1096 1097
		   int family, u8 prefixlen, int l3index,
		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1098 1099
{
	/* Add Key to the list */
1100
	struct tcp_md5sig_key *key;
1101
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1102
	struct tcp_md5sig_info *md5sig;
1103

1104
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1105 1106
	if (key) {
		/* Pre-existing entry - just update that one. */
E
Eric Dumazet 已提交
1107
		memcpy(key->key, newkey, newkeylen);
1108
		key->keylen = newkeylen;
E
Eric Dumazet 已提交
1109 1110
		return 0;
	}
1111

1112
	md5sig = rcu_dereference_protected(tp->md5sig_info,
1113
					   lockdep_sock_is_held(sk));
E
Eric Dumazet 已提交
1114 1115 1116
	if (!md5sig) {
		md5sig = kmalloc(sizeof(*md5sig), gfp);
		if (!md5sig)
1117 1118
			return -ENOMEM;

E
Eric Dumazet 已提交
1119 1120
		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		INIT_HLIST_HEAD(&md5sig->head);
1121
		rcu_assign_pointer(tp->md5sig_info, md5sig);
E
Eric Dumazet 已提交
1122
	}
1123

1124
	key = sock_kmalloc(sk, sizeof(*key), gfp);
E
Eric Dumazet 已提交
1125 1126
	if (!key)
		return -ENOMEM;
1127
	if (!tcp_alloc_md5sig_pool()) {
1128
		sock_kfree_s(sk, key, sizeof(*key));
E
Eric Dumazet 已提交
1129
		return -ENOMEM;
1130
	}
E
Eric Dumazet 已提交
1131 1132 1133 1134

	memcpy(key->key, newkey, newkeylen);
	key->keylen = newkeylen;
	key->family = family;
1135
	key->prefixlen = prefixlen;
1136
	key->l3index = l3index;
E
Eric Dumazet 已提交
1137 1138 1139 1140
	memcpy(&key->addr, addr,
	       (family == AF_INET6) ? sizeof(struct in6_addr) :
				      sizeof(struct in_addr));
	hlist_add_head_rcu(&key->node, &md5sig->head);
1141 1142
	return 0;
}
E
Eric Dumazet 已提交
1143
EXPORT_SYMBOL(tcp_md5_do_add);
1144

1145
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1146
		   u8 prefixlen, int l3index)
1147
{
E
Eric Dumazet 已提交
1148 1149
	struct tcp_md5sig_key *key;

1150
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
E
Eric Dumazet 已提交
1151 1152 1153
	if (!key)
		return -ENOENT;
	hlist_del_rcu(&key->node);
1154
	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1155 1156
	kfree_rcu(key, rcu);
	return 0;
1157
}
E
Eric Dumazet 已提交
1158
EXPORT_SYMBOL(tcp_md5_do_del);
1159

1160
static void tcp_clear_md5_list(struct sock *sk)
1161 1162
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1163
	struct tcp_md5sig_key *key;
1164
	struct hlist_node *n;
1165
	struct tcp_md5sig_info *md5sig;
1166

1167 1168
	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);

1169
	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
E
Eric Dumazet 已提交
1170
		hlist_del_rcu(&key->node);
1171
		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1172
		kfree_rcu(key, rcu);
1173 1174 1175
	}
}

1176 1177
static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
				 char __user *optval, int optlen)
1178 1179 1180
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1181
	const union tcp_md5_addr *addr;
1182
	u8 prefixlen = 32;
1183
	int l3index = 0;
1184 1185 1186 1187

	if (optlen < sizeof(cmd))
		return -EINVAL;

1188
	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1189 1190 1191 1192 1193
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

1194 1195 1196 1197 1198 1199 1200
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
		prefixlen = cmd.tcpm_prefixlen;
		if (prefixlen > 32)
			return -EINVAL;
	}

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
		struct net_device *dev;

		rcu_read_lock();
		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
		if (dev && netif_is_l3_master(dev))
			l3index = dev->ifindex;

		rcu_read_unlock();

		/* ok to reference set/not set outside of rcu;
		 * right now device MUST be an L3 master
		 */
		if (!dev || !l3index)
			return -EINVAL;
	}

1219 1220
	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;

1221
	if (!cmd.tcpm_keylen)
1222
		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
1223 1224 1225 1226

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

1227
	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1228
			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1229 1230
}

1231 1232 1233
static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
				   __be32 daddr, __be32 saddr,
				   const struct tcphdr *th, int nbytes)
1234 1235
{
	struct tcp4_pseudohdr *bp;
1236
	struct scatterlist sg;
1237
	struct tcphdr *_th;
1238

1239
	bp = hp->scratch;
1240 1241 1242
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1243
	bp->protocol = IPPROTO_TCP;
1244
	bp->len = cpu_to_be16(nbytes);
1245

1246 1247 1248 1249 1250 1251 1252
	_th = (struct tcphdr *)(bp + 1);
	memcpy(_th, th, sizeof(*th));
	_th->check = 0;

	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
				sizeof(*bp) + sizeof(*th));
H
Herbert Xu 已提交
1253
	return crypto_ahash_update(hp->md5_req);
1254 1255
}

E
Eric Dumazet 已提交
1256
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1257
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1258 1259
{
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1260
	struct ahash_request *req;
1261 1262 1263 1264

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1265
	req = hp->md5_req;
1266

H
Herbert Xu 已提交
1267
	if (crypto_ahash_init(req))
1268
		goto clear_hash;
1269
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1270 1271 1272
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1273 1274
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1275 1276 1277 1278
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1279

1280 1281 1282 1283
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1284
	return 1;
1285 1286
}

1287 1288
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
			const struct sock *sk,
E
Eric Dumazet 已提交
1289
			const struct sk_buff *skb)
1290
{
1291
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1292
	struct ahash_request *req;
E
Eric Dumazet 已提交
1293
	const struct tcphdr *th = tcp_hdr(skb);
1294 1295
	__be32 saddr, daddr;

1296 1297 1298
	if (sk) { /* valid for establish/request sockets */
		saddr = sk->sk_rcv_saddr;
		daddr = sk->sk_daddr;
1299
	} else {
1300 1301 1302
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1303
	}
1304 1305 1306 1307

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1308
	req = hp->md5_req;
1309

H
Herbert Xu 已提交
1310
	if (crypto_ahash_init(req))
1311 1312
		goto clear_hash;

1313
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1314 1315 1316 1317 1318
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1319 1320
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1331
}
1332
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1333

1334 1335
#endif

1336
/* Called with rcu_read_lock() */
1337
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1338 1339
				    const struct sk_buff *skb,
				    int dif, int sdif)
1340
{
1341
#ifdef CONFIG_TCP_MD5SIG
1342 1343 1344 1345 1346 1347 1348 1349
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
1350
	const __u8 *hash_location = NULL;
1351
	struct tcp_md5sig_key *hash_expected;
1352
	const struct iphdr *iph = ip_hdr(skb);
1353
	const struct tcphdr *th = tcp_hdr(skb);
1354
	const union tcp_md5_addr *addr;
1355
	unsigned char newhash[16];
1356 1357 1358 1359 1360 1361
	int genhash, l3index;

	/* sdif set, means packet ingressed via a device
	 * in an L3 domain and dif is set to the l3mdev
	 */
	l3index = sdif ? dif : 0;
1362

1363
	addr = (union tcp_md5_addr *)&iph->saddr;
1364
	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1365
	hash_location = tcp_parse_md5sig_option(th);
1366 1367 1368

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
E
Eric Dumazet 已提交
1369
		return false;
1370 1371

	if (hash_expected && !hash_location) {
1372
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
E
Eric Dumazet 已提交
1373
		return true;
1374 1375 1376
	}

	if (!hash_expected && hash_location) {
1377
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
E
Eric Dumazet 已提交
1378
		return true;
1379 1380 1381 1382 1383
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1384 1385
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
1386
				      NULL, skb);
1387 1388

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1389
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1390
		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1391 1392 1393
				     &iph->saddr, ntohs(th->source),
				     &iph->daddr, ntohs(th->dest),
				     genhash ? " tcp_v4_calc_md5_hash failed"
1394
				     : "", l3index);
E
Eric Dumazet 已提交
1395
		return true;
1396
	}
E
Eric Dumazet 已提交
1397
	return false;
1398
#endif
1399 1400
	return false;
}
1401

1402 1403
static void tcp_v4_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
1404 1405 1406
			    struct sk_buff *skb)
{
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
1407
	struct net *net = sock_net(sk_listener);
1408

1409 1410
	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
E
Eric Dumazet 已提交
1411
	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1412 1413
}

1414 1415
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
					  struct flowi *fl,
1416
					  const struct request_sock *req)
1417
{
1418
	return inet_csk_route_req(sk, &fl->u.ip4, req);
1419 1420
}

1421
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1422
	.family		=	PF_INET,
1423
	.obj_size	=	sizeof(struct tcp_request_sock),
1424
	.rtx_syn_ack	=	tcp_rtx_synack,
1425 1426
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1427
	.send_reset	=	tcp_v4_send_reset,
S
stephen hemminger 已提交
1428
	.syn_ack_timeout =	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
1429 1430
};

1431
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1432
	.mss_clamp	=	TCP_MSS_DEFAULT,
1433
#ifdef CONFIG_TCP_MD5SIG
1434
	.req_md5_lookup	=	tcp_v4_md5_lookup,
1435
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1436
#endif
1437
	.init_req	=	tcp_v4_init_req,
1438 1439 1440
#ifdef CONFIG_SYN_COOKIES
	.cookie_init_seq =	cookie_v4_init_sequence,
#endif
1441
	.route_req	=	tcp_v4_route_req,
1442 1443
	.init_seq	=	tcp_v4_init_seq,
	.init_ts_off	=	tcp_v4_init_ts_off,
1444
	.send_synack	=	tcp_v4_send_synack,
1445
};
1446

L
Linus Torvalds 已提交
1447 1448 1449
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1450
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1451 1452
		goto drop;

O
Octavian Purdila 已提交
1453 1454
	return tcp_conn_request(&tcp_request_sock_ops,
				&tcp_request_sock_ipv4_ops, sk, skb);
L
Linus Torvalds 已提交
1455 1456

drop:
1457
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1458 1459
	return 0;
}
E
Eric Dumazet 已提交
1460
EXPORT_SYMBOL(tcp_v4_conn_request);
L
Linus Torvalds 已提交
1461 1462 1463 1464 1465 1466


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
1467
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1468
				  struct request_sock *req,
1469 1470 1471
				  struct dst_entry *dst,
				  struct request_sock *req_unhash,
				  bool *own_req)
L
Linus Torvalds 已提交
1472
{
1473
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1474 1475 1476
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1477
#ifdef CONFIG_TCP_MD5SIG
1478
	const union tcp_md5_addr *addr;
1479
	struct tcp_md5sig_key *key;
1480
	int l3index;
1481
#endif
1482
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
1483 1484 1485 1486 1487 1488

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
1489
		goto exit_nonewsk;
L
Linus Torvalds 已提交
1490

1491
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1492
	inet_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1493 1494 1495

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1496
	ireq		      = inet_rsk(req);
1497 1498
	sk_daddr_set(newsk, ireq->ir_rmt_addr);
	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1499
	newsk->sk_bound_dev_if = ireq->ir_iif;
E
Eric Dumazet 已提交
1500 1501 1502
	newinet->inet_saddr   = ireq->ir_loc_addr;
	inet_opt	      = rcu_dereference(ireq->ireq_opt);
	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1503
	newinet->mc_index     = inet_iif(skb);
1504
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1505
	newinet->rcv_tos      = ip_hdr(skb)->tos;
1506
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1507 1508
	if (inet_opt)
		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1509
	newinet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
1510

E
Eric Dumazet 已提交
1511 1512 1513 1514 1515 1516 1517
	if (!dst) {
		dst = inet_csk_route_child_sock(sk, newsk, req);
		if (!dst)
			goto put_and_exit;
	} else {
		/* syncookie case : see end of cookie_v4_check() */
	}
1518 1519
	sk_setup_caps(newsk, dst);

1520 1521
	tcp_ca_openreq_child(newsk, dst);

L
Linus Torvalds 已提交
1522
	tcp_sync_mss(newsk, dst_mtu(dst));
E
Eric Dumazet 已提交
1523
	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1524

L
Linus Torvalds 已提交
1525 1526
	tcp_initialize_rcv_mss(newsk);

1527
#ifdef CONFIG_TCP_MD5SIG
1528
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1529
	/* Copy over the MD5 key from the original socket */
1530
	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1531
	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1532
	if (key) {
1533 1534 1535 1536 1537 1538
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1539
		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1540
			       key->key, key->keylen, GFP_ATOMIC);
E
Eric Dumazet 已提交
1541
		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1542 1543 1544
	}
#endif

1545 1546
	if (__inet_inherit_port(sk, newsk) < 0)
		goto put_and_exit;
1547
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
E
Eric Dumazet 已提交
1548
	if (likely(*own_req)) {
1549
		tcp_move_syn(newtp, req);
E
Eric Dumazet 已提交
1550 1551 1552 1553
		ireq->ireq_opt = NULL;
	} else {
		newinet->inet_opt = NULL;
	}
L
Linus Torvalds 已提交
1554 1555 1556
	return newsk;

exit_overflow:
1557
	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1558 1559
exit_nonewsk:
	dst_release(dst);
L
Linus Torvalds 已提交
1560
exit:
1561
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1562
	return NULL;
1563
put_and_exit:
E
Eric Dumazet 已提交
1564
	newinet->inet_opt = NULL;
1565 1566
	inet_csk_prepare_forced_close(newsk);
	tcp_done(newsk);
1567
	goto exit;
L
Linus Torvalds 已提交
1568
}
E
Eric Dumazet 已提交
1569
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
L
Linus Torvalds 已提交
1570

1571
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1572
{
1573
#ifdef CONFIG_SYN_COOKIES
1574
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1575

1576
	if (!th->syn)
C
Cong Wang 已提交
1577
		sk = cookie_v4_check(sk, skb);
L
Linus Torvalds 已提交
1578 1579 1580 1581
#endif
	return sk;
}

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
			 struct tcphdr *th, u32 *cookie)
{
	u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
				    &tcp_request_sock_ipv4_ops, sk, th);
	if (mss) {
		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
		tcp_synq_overflow(sk);
	}
#endif
	return mss;
}

L
Linus Torvalds 已提交
1597
/* The socket must have it's spinlock held when we get
1598
 * here, unless it is a TCP_LISTEN socket.
L
Linus Torvalds 已提交
1599 1600 1601 1602 1603 1604 1605 1606
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1607 1608
	struct sock *rsk;

L
Linus Torvalds 已提交
1609
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1610 1611
		struct dst_entry *dst = sk->sk_rx_dst;

1612
		sock_rps_save_rxhash(sk, skb);
1613
		sk_mark_napi_id(sk, skb);
1614
		if (dst) {
E
Eric Dumazet 已提交
1615
			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1616
			    !dst->ops->check(dst, 0)) {
1617 1618 1619 1620
				dst_release(dst);
				sk->sk_rx_dst = NULL;
			}
		}
1621
		tcp_rcv_established(sk, skb);
L
Linus Torvalds 已提交
1622 1623 1624
		return 0;
	}

E
Eric Dumazet 已提交
1625
	if (tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1626 1627 1628
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
1629 1630
		struct sock *nsk = tcp_v4_cookie_check(sk, skb);

L
Linus Torvalds 已提交
1631 1632 1633
		if (!nsk)
			goto discard;
		if (nsk != sk) {
1634 1635
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1636
				goto reset;
1637
			}
L
Linus Torvalds 已提交
1638 1639
			return 0;
		}
1640
	} else
1641
		sock_rps_save_rxhash(sk, skb);
1642

1643
	if (tcp_rcv_state_process(sk, skb)) {
1644
		rsk = sk;
L
Linus Torvalds 已提交
1645
		goto reset;
1646
	}
L
Linus Torvalds 已提交
1647 1648 1649
	return 0;

reset:
1650
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1661 1662
	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1663 1664
	goto discard;
}
E
Eric Dumazet 已提交
1665
EXPORT_SYMBOL(tcp_v4_do_rcv);
L
Linus Torvalds 已提交
1666

1667
int tcp_v4_early_demux(struct sk_buff *skb)
D
David S. Miller 已提交
1668 1669 1670 1671 1672 1673
{
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
1674
		return 0;
D
David S. Miller 已提交
1675

1676
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1677
		return 0;
D
David S. Miller 已提交
1678 1679

	iph = ip_hdr(skb);
1680
	th = tcp_hdr(skb);
D
David S. Miller 已提交
1681 1682

	if (th->doff < sizeof(struct tcphdr) / 4)
1683
		return 0;
D
David S. Miller 已提交
1684

1685
	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
D
David S. Miller 已提交
1686
				       iph->saddr, th->source,
1687
				       iph->daddr, ntohs(th->dest),
1688
				       skb->skb_iif, inet_sdif(skb));
D
David S. Miller 已提交
1689 1690 1691
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
1692
		if (sk_fullsock(sk)) {
1693
			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
E
Eric Dumazet 已提交
1694

D
David S. Miller 已提交
1695 1696
			if (dst)
				dst = dst_check(dst, 0);
1697
			if (dst &&
E
Eric Dumazet 已提交
1698
			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1699
				skb_dst_set_noref(skb, dst);
D
David S. Miller 已提交
1700 1701
		}
	}
1702
	return 0;
D
David S. Miller 已提交
1703 1704
}

E
Eric Dumazet 已提交
1705 1706
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
1707
	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1708 1709 1710 1711 1712 1713 1714 1715
	struct skb_shared_info *shinfo;
	const struct tcphdr *th;
	struct tcphdr *thtail;
	struct sk_buff *tail;
	unsigned int hdrlen;
	bool fragstolen;
	u32 gso_segs;
	int delta;
E
Eric Dumazet 已提交
1716 1717 1718 1719 1720 1721 1722

	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
	 * we can fix skb->truesize to its real value to avoid future drops.
	 * This is valid because skb is not yet charged to the socket.
	 * It has been noticed pure SACK packets were sometimes dropped
	 * (if cooked by drivers without copybreak feature).
	 */
1723
	skb_condense(skb);
E
Eric Dumazet 已提交
1724

E
Eric Dumazet 已提交
1725 1726
	skb_dst_drop(skb);

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
	if (unlikely(tcp_checksum_complete(skb))) {
		bh_unlock_sock(sk);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
		return true;
	}

	/* Attempt coalescing to last skb in backlog, even if we are
	 * above the limits.
	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
	 */
	th = (const struct tcphdr *)skb->data;
	hdrlen = th->doff * 4;
	shinfo = skb_shinfo(skb);

	if (!shinfo->gso_size)
		shinfo->gso_size = skb->len - hdrlen;

	if (!shinfo->gso_segs)
		shinfo->gso_segs = 1;

	tail = sk->sk_backlog.tail;
	if (!tail)
		goto no_coalesce;
	thtail = (struct tcphdr *)tail->data;

	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
	    ((TCP_SKB_CB(tail)->tcp_flags |
1756 1757 1758
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
	    !((TCP_SKB_CB(tail)->tcp_flags &
	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	    ((TCP_SKB_CB(tail)->tcp_flags ^
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
	    tail->decrypted != skb->decrypted ||
#endif
	    thtail->doff != th->doff ||
	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
		goto no_coalesce;

	__skb_pull(skb, hdrlen);
	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
		thtail->window = th->window;

		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;

		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;

1777 1778 1779 1780 1781 1782 1783 1784 1785
		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
		 * thtail->fin, so that the fast path in tcp_rcv_established()
		 * is not entered if we append a packet with a FIN.
		 * SYN, RST, URG are not present.
		 * ACK is set on both packets.
		 * PSH : we do not really care in TCP stack,
		 *       at least for 'GRO' packets.
		 */
		thtail->fin |= th->fin;
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;

		if (TCP_SKB_CB(skb)->has_rxtstamp) {
			TCP_SKB_CB(tail)->has_rxtstamp = true;
			tail->tstamp = skb->tstamp;
			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
		}

		/* Not as strict as GRO. We only need to carry mss max value */
		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
						 skb_shinfo(tail)->gso_size);

		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);

		sk->sk_backlog.len += delta;
		__NET_INC_STATS(sock_net(sk),
				LINUX_MIB_TCPBACKLOGCOALESCE);
		kfree_skb_partial(skb, fragstolen);
		return false;
	}
	__skb_push(skb, hdrlen);

no_coalesce:
	/* Only socket owner can try to collapse/prune rx queues
	 * to reduce memory overhead, so add a little headroom here.
	 * Few sockets backlog are possibly concurrently non empty.
	 */
	limit += 64*1024;

E
Eric Dumazet 已提交
1816 1817 1818 1819 1820 1821 1822 1823 1824
	if (unlikely(sk_add_backlog(sk, skb, limit))) {
		bh_unlock_sock(sk);
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
		return true;
	}
	return false;
}
EXPORT_SYMBOL(tcp_add_backlog);

1825 1826 1827 1828
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
	struct tcphdr *th = (struct tcphdr *)skb->data;

1829
	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1830 1831 1832
}
EXPORT_SYMBOL(tcp_filter);

1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
static void tcp_v4_restore_cb(struct sk_buff *skb)
{
	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
		sizeof(struct inet_skb_parm));
}

static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
			   const struct tcphdr *th)
{
	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
	 * barrier() makes sure compiler wont play fool^Waliasing games.
	 */
	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
		sizeof(struct inet_skb_parm));
	barrier();

	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
	TCP_SKB_CB(skb)->sacked	 = 0;
	TCP_SKB_CB(skb)->has_rxtstamp =
			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
}

L
Linus Torvalds 已提交
1861 1862 1863 1864 1865 1866
/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1867
	struct net *net = dev_net(skb->dev);
E
Eric Dumazet 已提交
1868
	struct sk_buff *skb_to_free;
1869
	int sdif = inet_sdif(skb);
1870
	int dif = inet_iif(skb);
1871
	const struct iphdr *iph;
1872
	const struct tcphdr *th;
1873
	bool refcounted;
L
Linus Torvalds 已提交
1874 1875 1876 1877 1878 1879 1880
	struct sock *sk;
	int ret;

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
E
Eric Dumazet 已提交
1881
	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1882 1883 1884 1885

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1886
	th = (const struct tcphdr *)skb->data;
L
Linus Torvalds 已提交
1887

1888
	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
L
Linus Torvalds 已提交
1889 1890 1891 1892 1893 1894
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1895
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1896
	 * So, we defer the checks. */
1897 1898

	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1899
		goto csum_error;
L
Linus Torvalds 已提交
1900

1901
	th = (const struct tcphdr *)skb->data;
1902
	iph = ip_hdr(skb);
1903
lookup:
1904
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1905
			       th->dest, sdif, &refcounted);
L
Linus Torvalds 已提交
1906 1907 1908
	if (!sk)
		goto no_tcp_socket;

E
Eric Dumazet 已提交
1909 1910 1911 1912
process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

1913 1914
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		struct request_sock *req = inet_reqsk(sk);
1915
		bool req_stolen = false;
1916
		struct sock *nsk;
1917 1918

		sk = req->rsk_listener;
1919
		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1920
			sk_drops_add(sk, skb);
1921 1922 1923
			reqsk_put(req);
			goto discard_it;
		}
1924 1925 1926 1927
		if (tcp_checksum_complete(skb)) {
			reqsk_put(req);
			goto csum_error;
		}
1928
		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1929
			inet_csk_reqsk_queue_drop_and_put(sk, req);
1930 1931
			goto lookup;
		}
1932 1933 1934
		/* We own a reference on the listener, increase it again
		 * as we might lose it too soon.
		 */
1935
		sock_hold(sk);
1936
		refcounted = true;
E
Eric Dumazet 已提交
1937
		nsk = NULL;
1938 1939 1940 1941
		if (!tcp_filter(sk, skb)) {
			th = (const struct tcphdr *)skb->data;
			iph = ip_hdr(skb);
			tcp_v4_fill_cb(skb, iph, th);
1942
			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1943
		}
1944 1945
		if (!nsk) {
			reqsk_put(req);
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
			if (req_stolen) {
				/* Another cpu got exclusive access to req
				 * and created a full blown socket.
				 * Try to feed this packet to this socket
				 * instead of discarding it.
				 */
				tcp_v4_restore_cb(skb);
				sock_put(sk);
				goto lookup;
			}
1956
			goto discard_and_relse;
1957 1958 1959
		}
		if (nsk == sk) {
			reqsk_put(req);
1960
			tcp_v4_restore_cb(skb);
1961 1962
		} else if (tcp_child_process(sk, nsk, skb)) {
			tcp_v4_send_reset(nsk, skb);
1963
			goto discard_and_relse;
1964
		} else {
1965
			sock_put(sk);
1966 1967 1968
			return 0;
		}
	}
1969
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1970
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1971
		goto discard_and_relse;
1972
	}
1973

L
Linus Torvalds 已提交
1974 1975
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
1976

1977
	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
1978 1979
		goto discard_and_relse;

1980
	nf_reset_ct(skb);
L
Linus Torvalds 已提交
1981

1982
	if (tcp_filter(sk, skb))
L
Linus Torvalds 已提交
1983
		goto discard_and_relse;
1984 1985
	th = (const struct tcphdr *)skb->data;
	iph = ip_hdr(skb);
1986
	tcp_v4_fill_cb(skb, iph, th);
L
Linus Torvalds 已提交
1987 1988 1989

	skb->dev = NULL;

1990 1991 1992 1993 1994 1995 1996
	if (sk->sk_state == TCP_LISTEN) {
		ret = tcp_v4_do_rcv(sk, skb);
		goto put_and_return;
	}

	sk_incoming_cpu_update(sk);

1997
	bh_lock_sock_nested(sk);
1998
	tcp_segs_in(tcp_sk(sk), skb);
L
Linus Torvalds 已提交
1999 2000
	ret = 0;
	if (!sock_owned_by_user(sk)) {
E
Eric Dumazet 已提交
2001 2002
		skb_to_free = sk->sk_rx_skb_cache;
		sk->sk_rx_skb_cache = NULL;
F
Florian Westphal 已提交
2003
		ret = tcp_v4_do_rcv(sk, skb);
E
Eric Dumazet 已提交
2004 2005 2006 2007
	} else {
		if (tcp_add_backlog(sk, skb))
			goto discard_and_relse;
		skb_to_free = NULL;
Z
Zhu Yi 已提交
2008
	}
L
Linus Torvalds 已提交
2009
	bh_unlock_sock(sk);
E
Eric Dumazet 已提交
2010 2011
	if (skb_to_free)
		__kfree_skb(skb_to_free);
L
Linus Torvalds 已提交
2012

2013
put_and_return:
2014 2015
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2016 2017 2018 2019 2020 2021 2022

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

2023 2024
	tcp_v4_fill_cb(skb, iph, th);

E
Eric Dumazet 已提交
2025
	if (tcp_checksum_complete(skb)) {
2026
csum_error:
E
Eric Dumazet 已提交
2027
		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
2028
bad_packet:
E
Eric Dumazet 已提交
2029
		__TCP_INC_STATS(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
2030
	} else {
2031
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
2032 2033 2034 2035 2036
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
2037
	return 0;
L
Linus Torvalds 已提交
2038 2039

discard_and_relse:
2040
	sk_drops_add(sk, skb);
2041 2042
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2043 2044 2045 2046
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2047
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2048 2049 2050
		goto discard_it;
	}

2051 2052
	tcp_v4_fill_cb(skb, iph, th);

2053 2054 2055
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
2056
	}
2057
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
2058
	case TCP_TW_SYN: {
2059
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2060 2061
							&tcp_hashinfo, skb,
							__tcp_hdrlen(th),
2062
							iph->saddr, th->source,
2063
							iph->daddr, th->dest,
2064 2065
							inet_iif(skb),
							sdif);
L
Linus Torvalds 已提交
2066
		if (sk2) {
2067
			inet_twsk_deschedule_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2068
			sk = sk2;
2069
			tcp_v4_restore_cb(skb);
2070
			refcounted = false;
L
Linus Torvalds 已提交
2071 2072 2073
			goto process;
		}
	}
2074
		/* to ACK */
J
Joe Perches 已提交
2075
		fallthrough;
L
Linus Torvalds 已提交
2076 2077 2078 2079
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
2080 2081 2082
		tcp_v4_send_reset(sk, skb);
		inet_twsk_deschedule_put(inet_twsk(sk));
		goto discard_it;
L
Linus Torvalds 已提交
2083 2084 2085 2086 2087
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

2088 2089 2090 2091 2092
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
	.twsk_destructor= tcp_twsk_destructor,
};
L
Linus Torvalds 已提交
2093

2094
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
E
Eric Dumazet 已提交
2095 2096 2097
{
	struct dst_entry *dst = skb_dst(skb);

E
Eric Dumazet 已提交
2098
	if (dst && dst_hold_safe(dst)) {
2099 2100 2101
		sk->sk_rx_dst = dst;
		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
	}
E
Eric Dumazet 已提交
2102
}
2103
EXPORT_SYMBOL(inet_sk_rx_dst_set);
E
Eric Dumazet 已提交
2104

2105
const struct inet_connection_sock_af_ops ipv4_specific = {
2106 2107 2108
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
E
Eric Dumazet 已提交
2109
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2110 2111 2112 2113 2114 2115 2116
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
2117
#ifdef CONFIG_COMPAT
2118 2119
	.compat_setsockopt = compat_ip_setsockopt,
	.compat_getsockopt = compat_ip_getsockopt,
2120
#endif
2121
	.mtu_reduced	   = tcp_v4_mtu_reduced,
L
Linus Torvalds 已提交
2122
};
E
Eric Dumazet 已提交
2123
EXPORT_SYMBOL(ipv4_specific);
L
Linus Torvalds 已提交
2124

2125
#ifdef CONFIG_TCP_MD5SIG
2126
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2127
	.md5_lookup		= tcp_v4_md5_lookup,
2128
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2129 2130
	.md5_parse		= tcp_v4_parse_md5_keys,
};
2131
#endif
2132

L
Linus Torvalds 已提交
2133 2134 2135 2136 2137
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
2138
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
2139

2140
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
2141

2142
	icsk->icsk_af_ops = &ipv4_specific;
2143

2144
#ifdef CONFIG_TCP_MD5SIG
2145
	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2146
#endif
L
Linus Torvalds 已提交
2147 2148 2149 2150

	return 0;
}

2151
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
2152 2153 2154
{
	struct tcp_sock *tp = tcp_sk(sk);

2155 2156
	trace_tcp_destroy_sock(sk);

L
Linus Torvalds 已提交
2157 2158
	tcp_clear_xmit_timers(sk);

2159
	tcp_cleanup_congestion_control(sk);
2160

D
Dave Watson 已提交
2161 2162
	tcp_cleanup_ulp(sk);

L
Linus Torvalds 已提交
2163
	/* Cleanup up the write buffer. */
2164
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
2165

2166 2167 2168
	/* Check if we want to disable active TFO */
	tcp_fastopen_active_disable_ofo_check(sk);

L
Linus Torvalds 已提交
2169
	/* Cleans up our, hopefully empty, out_of_order_queue. */
2170
	skb_rbtree_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
2171

2172 2173 2174
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
E
Eric Dumazet 已提交
2175
		tcp_clear_md5_list(sk);
2176
		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2177 2178 2179
		tp->md5sig_info = NULL;
	}
#endif
C
Chris Leech 已提交
2180

L
Linus Torvalds 已提交
2181
	/* Clean up a referenced TCP bind bucket. */
2182
	if (inet_csk(sk)->icsk_bind_hash)
2183
		inet_put_port(sk);
L
Linus Torvalds 已提交
2184

2185
	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2186

2187 2188
	/* If socket is aborted during connect operation */
	tcp_free_fastopen_req(tp);
2189
	tcp_fastopen_destroy_cipher(sk);
2190
	tcp_saved_syn_free(tp);
2191

2192
	sk_sockets_allocated_dec(sk);
L
Linus Torvalds 已提交
2193 2194 2195 2196 2197 2198
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

2199 2200 2201 2202 2203
/*
 * Get next listener socket follow cur.  If cur is NULL, get first socket
 * starting from bucket given in st->bucket; when st->bucket is zero the
 * very first socket in the hash table is returned.
 */
L
Linus Torvalds 已提交
2204 2205
static void *listening_get_next(struct seq_file *seq, void *cur)
{
2206
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
J
Jianjun Kong 已提交
2207
	struct tcp_iter_state *st = seq->private;
2208
	struct net *net = seq_file_net(seq);
2209
	struct inet_listen_hashbucket *ilb;
2210
	struct hlist_nulls_node *node;
2211
	struct sock *sk = cur;
L
Linus Torvalds 已提交
2212 2213

	if (!sk) {
2214
get_head:
2215
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2216
		spin_lock(&ilb->lock);
2217
		sk = sk_nulls_head(&ilb->nulls_head);
2218
		st->offset = 0;
L
Linus Torvalds 已提交
2219 2220
		goto get_sk;
	}
2221
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
2222
	++st->num;
2223
	++st->offset;
L
Linus Torvalds 已提交
2224

2225
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2226
get_sk:
2227
	sk_nulls_for_each_from(sk, node) {
2228 2229
		if (!net_eq(sock_net(sk), net))
			continue;
2230
		if (sk->sk_family == afinfo->family)
2231
			return sk;
L
Linus Torvalds 已提交
2232
	}
2233
	spin_unlock(&ilb->lock);
2234
	st->offset = 0;
2235 2236 2237
	if (++st->bucket < INET_LHTABLE_SIZE)
		goto get_head;
	return NULL;
L
Linus Torvalds 已提交
2238 2239 2240 2241
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
2242 2243 2244 2245 2246 2247
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	st->offset = 0;
	rc = listening_get_next(seq, NULL);
L
Linus Torvalds 已提交
2248 2249 2250 2251 2252 2253 2254 2255

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

E
Eric Dumazet 已提交
2256
static inline bool empty_bucket(const struct tcp_iter_state *st)
2257
{
E
Eric Dumazet 已提交
2258
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2259 2260
}

2261 2262 2263 2264
/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
L
Linus Torvalds 已提交
2265 2266
static void *established_get_first(struct seq_file *seq)
{
2267
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
J
Jianjun Kong 已提交
2268
	struct tcp_iter_state *st = seq->private;
2269
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2270 2271
	void *rc = NULL;

2272 2273
	st->offset = 0;
	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2274
		struct sock *sk;
2275
		struct hlist_nulls_node *node;
2276
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2277

2278 2279 2280 2281
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2282
		spin_lock_bh(lock);
2283
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2284
			if (sk->sk_family != afinfo->family ||
2285
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2286 2287 2288 2289 2290
				continue;
			}
			rc = sk;
			goto out;
		}
2291
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2292 2293 2294 2295 2296 2297 2298
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
2299
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2300
	struct sock *sk = cur;
2301
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2302
	struct tcp_iter_state *st = seq->private;
2303
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2304 2305

	++st->num;
2306
	++st->offset;
L
Linus Torvalds 已提交
2307

E
Eric Dumazet 已提交
2308
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2309

2310
	sk_nulls_for_each_from(sk, node) {
2311 2312
		if (sk->sk_family == afinfo->family &&
		    net_eq(sock_net(sk), net))
E
Eric Dumazet 已提交
2313
			return sk;
L
Linus Torvalds 已提交
2314 2315
	}

E
Eric Dumazet 已提交
2316 2317 2318
	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
	++st->bucket;
	return established_get_first(seq);
L
Linus Torvalds 已提交
2319 2320 2321 2322
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
2323 2324 2325 2326 2327
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	rc = established_get_first(seq);
L
Linus Torvalds 已提交
2328 2329 2330 2331

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2332
	}
L
Linus Torvalds 已提交
2333 2334 2335 2336 2337 2338
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2339
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369
static void *tcp_seek_last_pos(struct seq_file *seq)
{
	struct tcp_iter_state *st = seq->private;
	int offset = st->offset;
	int orig_num = st->num;
	void *rc = NULL;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (st->bucket >= INET_LHTABLE_SIZE)
			break;
		st->state = TCP_SEQ_STATE_LISTENING;
		rc = listening_get_next(seq, NULL);
		while (offset-- && rc)
			rc = listening_get_next(seq, rc);
		if (rc)
			break;
		st->bucket = 0;
E
Eric Dumazet 已提交
2370
		st->state = TCP_SEQ_STATE_ESTABLISHED;
J
Joe Perches 已提交
2371
		fallthrough;
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
	case TCP_SEQ_STATE_ESTABLISHED:
		if (st->bucket > tcp_hashinfo.ehash_mask)
			break;
		rc = established_get_first(seq);
		while (offset-- && rc)
			rc = established_get_next(seq, rc);
	}

	st->num = orig_num;

	return rc;
}

2385
void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2386
{
J
Jianjun Kong 已提交
2387
	struct tcp_iter_state *st = seq->private;
2388 2389 2390 2391 2392 2393 2394 2395
	void *rc;

	if (*pos && *pos == st->last_pos) {
		rc = tcp_seek_last_pos(seq);
		if (rc)
			goto out;
	}

L
Linus Torvalds 已提交
2396 2397
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
2398 2399 2400 2401 2402 2403 2404
	st->bucket = 0;
	st->offset = 0;
	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;

out:
	st->last_pos = *pos;
	return rc;
L
Linus Torvalds 已提交
2405
}
2406
EXPORT_SYMBOL(tcp_seq_start);
L
Linus Torvalds 已提交
2407

2408
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2409
{
2410
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
	void *rc = NULL;

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
2423 2424
			st->bucket = 0;
			st->offset = 0;
L
Linus Torvalds 已提交
2425 2426 2427 2428 2429 2430 2431 2432 2433
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
2434
	st->last_pos = *pos;
L
Linus Torvalds 已提交
2435 2436
	return rc;
}
2437
EXPORT_SYMBOL(tcp_seq_next);
L
Linus Torvalds 已提交
2438

2439
void tcp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2440
{
J
Jianjun Kong 已提交
2441
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2442 2443 2444 2445

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2446
			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2447 2448 2449
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2450
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2451 2452 2453
		break;
	}
}
2454
EXPORT_SYMBOL(tcp_seq_stop);
L
Linus Torvalds 已提交
2455

2456
static void get_openreq4(const struct request_sock *req,
E
Eric Dumazet 已提交
2457
			 struct seq_file *f, int i)
L
Linus Torvalds 已提交
2458
{
2459
	const struct inet_request_sock *ireq = inet_rsk(req);
2460
	long delta = req->rsk_timer.expires - jiffies;
L
Linus Torvalds 已提交
2461

2462
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2463
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
L
Linus Torvalds 已提交
2464
		i,
2465
		ireq->ir_loc_addr,
2466
		ireq->ir_num,
2467 2468
		ireq->ir_rmt_addr,
		ntohs(ireq->ir_rmt_port),
L
Linus Torvalds 已提交
2469 2470 2471
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
2472
		jiffies_delta_to_clock_t(delta),
2473
		req->num_timeout,
E
Eric Dumazet 已提交
2474 2475
		from_kuid_munged(seq_user_ns(f),
				 sock_i_uid(req->rsk_listener)),
L
Linus Torvalds 已提交
2476 2477
		0,  /* non standard timer */
		0, /* open_requests have no inode */
2478
		0,
2479
		req);
L
Linus Torvalds 已提交
2480 2481
}

2482
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
L
Linus Torvalds 已提交
2483 2484 2485
{
	int timer_active;
	unsigned long timer_expires;
2486
	const struct tcp_sock *tp = tcp_sk(sk);
2487
	const struct inet_connection_sock *icsk = inet_csk(sk);
2488
	const struct inet_sock *inet = inet_sk(sk);
2489
	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
E
Eric Dumazet 已提交
2490 2491 2492 2493
	__be32 dest = inet->inet_daddr;
	__be32 src = inet->inet_rcv_saddr;
	__u16 destp = ntohs(inet->inet_dport);
	__u16 srcp = ntohs(inet->inet_sport);
2494
	int rx_queue;
2495
	int state;
L
Linus Torvalds 已提交
2496

N
Nandita Dukkipati 已提交
2497
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2498
	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
N
Nandita Dukkipati 已提交
2499
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2500
		timer_active	= 1;
2501 2502
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2503
		timer_active	= 4;
2504
		timer_expires	= icsk->icsk_timeout;
2505
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2506
		timer_active	= 2;
2507
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2508 2509 2510 2511 2512
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2513
	state = inet_sk_state_load(sk);
2514
	if (state == TCP_LISTEN)
2515
		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2516
	else
2517 2518
		/* Because we don't lock the socket,
		 * we might find a transient negative value.
2519
		 */
2520
		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2521
				      READ_ONCE(tp->copied_seq), 0);
2522

2523
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2524
			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2525
		i, src, srcp, dest, destp, state,
2526
		READ_ONCE(tp->write_seq) - tp->snd_una,
2527
		rx_queue,
L
Linus Torvalds 已提交
2528
		timer_active,
2529
		jiffies_delta_to_clock_t(timer_expires - jiffies),
2530
		icsk->icsk_retransmits,
2531
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2532
		icsk->icsk_probes_out,
2533
		sock_i_ino(sk),
2534
		refcount_read(&sk->sk_refcnt), sk,
2535 2536
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
W
Wei Wang 已提交
2537
		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
L
Linus Torvalds 已提交
2538
		tp->snd_cwnd,
2539 2540
		state == TCP_LISTEN ?
		    fastopenq->max_qlen :
2541
		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
L
Linus Torvalds 已提交
2542 2543
}

2544
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2545
			       struct seq_file *f, int i)
L
Linus Torvalds 已提交
2546
{
2547
	long delta = tw->tw_timer.expires - jiffies;
2548
	__be32 dest, src;
L
Linus Torvalds 已提交
2549 2550 2551 2552 2553 2554 2555
	__u16 destp, srcp;

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2556
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2557
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
L
Linus Torvalds 已提交
2558
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2559
		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2560
		refcount_read(&tw->tw_refcnt), tw);
L
Linus Torvalds 已提交
2561 2562 2563 2564 2565 2566
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2567
	struct tcp_iter_state *st;
E
Eric Dumazet 已提交
2568
	struct sock *sk = v;
L
Linus Torvalds 已提交
2569

2570
	seq_setwidth(seq, TMPSZ - 1);
L
Linus Torvalds 已提交
2571
	if (v == SEQ_START_TOKEN) {
2572
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
L
Linus Torvalds 已提交
2573 2574 2575 2576 2577 2578
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

2579 2580 2581
	if (sk->sk_state == TCP_TIME_WAIT)
		get_timewait4_sock(v, seq, st->num);
	else if (sk->sk_state == TCP_NEW_SYN_RECV)
E
Eric Dumazet 已提交
2582
		get_openreq4(v, seq, st->num);
2583 2584
	else
		get_tcp4_sock(v, seq, st->num);
L
Linus Torvalds 已提交
2585
out:
2586
	seq_pad(seq, '\n');
L
Linus Torvalds 已提交
2587 2588 2589
	return 0;
}

2590 2591 2592 2593 2594 2595 2596
static const struct seq_operations tcp4_seq_ops = {
	.show		= tcp4_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= tcp_seq_stop,
};

L
Linus Torvalds 已提交
2597 2598 2599 2600
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.family		= AF_INET,
};

2601
static int __net_init tcp4_proc_init_net(struct net *net)
2602
{
2603 2604
	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2605 2606
		return -ENOMEM;
	return 0;
2607 2608
}

2609
static void __net_exit tcp4_proc_exit_net(struct net *net)
2610
{
2611
	remove_proc_entry("tcp", net->proc_net);
2612 2613 2614 2615 2616 2617 2618
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2619 2620
int __init tcp4_proc_init(void)
{
2621
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2622 2623 2624 2625
}

void tcp4_proc_exit(void)
{
2626
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2627 2628 2629 2630 2631 2632 2633
}
#endif /* CONFIG_PROC_FS */

struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
A
Andrey Ignatov 已提交
2634
	.pre_connect		= tcp_v4_pre_connect,
L
Linus Torvalds 已提交
2635 2636
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2637
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2638 2639 2640 2641 2642 2643
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
2644
	.keepalive		= tcp_set_keepalive,
L
Linus Torvalds 已提交
2645
	.recvmsg		= tcp_recvmsg,
2646 2647
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2648
	.backlog_rcv		= tcp_v4_do_rcv,
E
Eric Dumazet 已提交
2649
	.release_cb		= tcp_release_cb,
2650 2651 2652
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2653
	.enter_memory_pressure	= tcp_enter_memory_pressure,
2654
	.leave_memory_pressure	= tcp_leave_memory_pressure,
2655
	.stream_memory_free	= tcp_stream_memory_free,
L
Linus Torvalds 已提交
2656
	.sockets_allocated	= &tcp_sockets_allocated,
2657
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2658 2659
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2660
	.sysctl_mem		= sysctl_tcp_mem,
2661 2662
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
L
Linus Torvalds 已提交
2663 2664
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2665
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2666
	.twsk_prot		= &tcp_timewait_sock_ops,
2667
	.rsk_prot		= &tcp_request_sock_ops,
2668
	.h.hashinfo		= &tcp_hashinfo,
2669
	.no_autobind		= true,
2670 2671 2672
#ifdef CONFIG_COMPAT
	.compat_setsockopt	= compat_tcp_setsockopt,
	.compat_getsockopt	= compat_tcp_getsockopt,
G
Glauber Costa 已提交
2673
#endif
2674
	.diag_destroy		= tcp_abort,
L
Linus Torvalds 已提交
2675
};
E
Eric Dumazet 已提交
2676
EXPORT_SYMBOL(tcp_prot);
L
Linus Torvalds 已提交
2677

2678 2679 2680 2681
static void __net_exit tcp_sk_exit(struct net *net)
{
	int cpu;

2682
	if (net->ipv4.tcp_congestion_control)
2683 2684
		bpf_module_put(net->ipv4.tcp_congestion_control,
			       net->ipv4.tcp_congestion_control->owner);
2685

2686 2687 2688 2689 2690
	for_each_possible_cpu(cpu)
		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
	free_percpu(net->ipv4.tcp_sk);
}

2691 2692
static int __net_init tcp_sk_init(struct net *net)
{
2693
	int res, cpu, cnt;
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705

	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
	if (!net->ipv4.tcp_sk)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		struct sock *sk;

		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
					   IPPROTO_TCP, net);
		if (res)
			goto fail;
2706
		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2707 2708 2709 2710 2711 2712

		/* Please enforce IP_DF and IPID==0 for RST and
		 * ACK sent in SYN-RECV and TIME-WAIT state.
		 */
		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;

2713 2714
		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
	}
2715

2716
	net->ipv4.sysctl_tcp_ecn = 2;
2717 2718
	net->ipv4.sysctl_tcp_ecn_fallback = 1;

F
Fan Du 已提交
2719
	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
E
Eric Dumazet 已提交
2720
	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2721
	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2722
	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2723
	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2724

2725
	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2726
	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2727
	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2728

2729
	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2730
	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2731
	net->ipv4.sysctl_tcp_syncookies = 1;
2732
	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2733
	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2734
	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2735
	net->ipv4.sysctl_tcp_orphan_retries = 0;
2736
	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2737
	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2738
	net->ipv4.sysctl_tcp_tw_reuse = 2;
2739
	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2740

2741
	cnt = tcp_hashinfo.ehash_mask + 1;
2742
	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2743 2744
	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;

2745
	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
E
Eric Dumazet 已提交
2746
	net->ipv4.sysctl_tcp_sack = 1;
2747
	net->ipv4.sysctl_tcp_window_scaling = 1;
2748
	net->ipv4.sysctl_tcp_timestamps = 1;
2749
	net->ipv4.sysctl_tcp_early_retrans = 3;
2750
	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2751
	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2752
	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2753
	net->ipv4.sysctl_tcp_max_reordering = 300;
E
Eric Dumazet 已提交
2754
	net->ipv4.sysctl_tcp_dsack = 1;
2755
	net->ipv4.sysctl_tcp_app_win = 31;
2756
	net->ipv4.sysctl_tcp_adv_win_scale = 1;
E
Eric Dumazet 已提交
2757
	net->ipv4.sysctl_tcp_frto = 2;
2758
	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2759 2760 2761 2762 2763
	/* This limits the percentage of the congestion window which we
	 * will allow a single TSO frame to consume.  Building TSO frames
	 * which are too large can cause TCP streams to be bursty.
	 */
	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2764 2765
	/* Default TSQ limit of 16 TSO segments */
	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2766 2767
	/* rfc5961 challenge ack rate limiting */
	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2768
	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2769
	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2770
	net->ipv4.sysctl_tcp_autocorking = 1;
2771
	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2772
	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2773
	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2774 2775 2776 2777 2778 2779 2780 2781
	if (net != &init_net) {
		memcpy(net->ipv4.sysctl_tcp_rmem,
		       init_net.ipv4.sysctl_tcp_rmem,
		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
		memcpy(net->ipv4.sysctl_tcp_wmem,
		       init_net.ipv4.sysctl_tcp_wmem,
		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
	}
2782
	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2783
	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
E
Eric Dumazet 已提交
2784
	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2785
	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2786
	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2787 2788
	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2789

2790 2791
	/* Reno is always built in */
	if (!net_eq(net, &init_net) &&
2792 2793
	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
			       init_net.ipv4.tcp_congestion_control->owner))
2794 2795 2796 2797
		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
	else
		net->ipv4.tcp_congestion_control = &tcp_reno;

2798
	return 0;
2799 2800 2801 2802
fail:
	tcp_sk_exit(net);

	return res;
E
Eric W. Biederman 已提交
2803 2804 2805 2806
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
2807 2808
	struct net *net;

2809
	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2810 2811 2812

	list_for_each_entry(net, net_exit_list, exit_list)
		tcp_fastopen_ctx_destroy(net);
2813 2814 2815
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
E
Eric W. Biederman 已提交
2816 2817 2818
       .init	   = tcp_sk_init,
       .exit	   = tcp_sk_exit,
       .exit_batch = tcp_sk_exit_batch,
2819 2820
};

2821
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2822
{
2823
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2824 2825
		panic("Failed to create the TCP control socket.\n");
}