tcp_ipv4.c 74.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
32
 *					request_sock handling and moved
L
Linus Torvalds 已提交
33 34
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
35
 *					Added new listen semantics.
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

48
#define pr_fmt(fmt) "TCP: " fmt
L
Linus Torvalds 已提交
49

H
Herbert Xu 已提交
50
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
59
#include <linux/slab.h>
L
Linus Torvalds 已提交
60

61
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
62
#include <net/icmp.h>
63
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
64
#include <net/tcp.h>
65
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
66 67
#include <net/ipv6.h>
#include <net/inet_common.h>
68
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
69
#include <net/xfrm.h>
70
#include <net/secure_seq.h>
71
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
72 73 74 75 76 77

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
78
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
79

H
Herbert Xu 已提交
80
#include <crypto/hash.h>
81 82
#include <linux/scatterlist.h>

83 84
#include <trace/events/tcp.h>

85
#ifdef CONFIG_TCP_MD5SIG
E
Eric Dumazet 已提交
86
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
87
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
88 89
#endif

90
struct inet_hashinfo tcp_hashinfo;
E
Eric Dumazet 已提交
91
EXPORT_SYMBOL(tcp_hashinfo);
L
Linus Torvalds 已提交
92

93
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
L
Linus Torvalds 已提交
94
{
95 96 97 98 99 100
	return secure_tcp_seq(ip_hdr(skb)->daddr,
			      ip_hdr(skb)->saddr,
			      tcp_hdr(skb)->dest,
			      tcp_hdr(skb)->source);
}

101
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
102
{
103
	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
L
Linus Torvalds 已提交
104 105
}

106 107
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
108
	const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 110
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);
111 112 113 114 115 116 117 118 119 120 121 122 123
	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;

	if (reuse == 2) {
		/* Still does not detect *everything* that goes through
		 * lo, since we require a loopback src or dst address
		 * or direct binding to 'lo' interface.
		 */
		bool loopback = false;
		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
			loopback = true;
#if IS_ENABLED(CONFIG_IPV6)
		if (tw->tw_family == AF_INET6) {
			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
125
			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
126
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
127 128 129 130 131 132 133 134 135 136 137
				loopback = true;
		} else
#endif
		{
			if (ipv4_is_loopback(tw->tw_daddr) ||
			    ipv4_is_loopback(tw->tw_rcv_saddr))
				loopback = true;
		}
		if (!loopback)
			reuse = 0;
	}
138 139 140 141 142 143 144 145 146 147 148 149 150

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
151 152
	    (!twp || (reuse && time_after32(ktime_get_seconds(),
					    tcptw->tw_ts_recent_stamp)))) {
153 154 155 156 157 158 159 160 161 162 163 164
		/* In case of repair and re-using TIME-WAIT sockets we still
		 * want to be sure that it is safe as above but honor the
		 * sequence numbers and time stamps set as part of the repair
		 * process.
		 *
		 * Without this check re-using a TIME-WAIT socket with TCP
		 * repair would accumulate a -1 on the repair assigned
		 * sequence number. The first time it is reused the sequence
		 * is -1, the second time -2, etc. This fixes that issue
		 * without appearing to create any others.
		 */
		if (likely(!tp->repair)) {
165 166 167 168 169
			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;

			if (!seq)
				seq = 1;
			WRITE_ONCE(tp->write_seq, seq);
170 171 172
			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		}
173 174 175 176 177 178 179 180
		sock_hold(sktw);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);

A
Andrey Ignatov 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{
	/* This check is replicated from tcp_v4_connect() and intended to
	 * prevent BPF program called below from accessing bytes that are out
	 * of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	sock_owned_by_me(sk);

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
}

L
Linus Torvalds 已提交
196 197 198
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
199
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
L
Linus Torvalds 已提交
200 201
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
202
	__be16 orig_sport, orig_dport;
203
	__be32 daddr, nexthop;
204
	struct flowi4 *fl4;
205
	struct rtable *rt;
L
Linus Torvalds 已提交
206
	int err;
207
	struct ip_options_rcu *inet_opt;
208
	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
217
	inet_opt = rcu_dereference_protected(inet->inet_opt,
218
					     lockdep_sock_is_held(sk));
219
	if (inet_opt && inet_opt->opt.srr) {
L
Linus Torvalds 已提交
220 221
		if (!daddr)
			return -EINVAL;
222
		nexthop = inet_opt->opt.faddr;
L
Linus Torvalds 已提交
223 224
	}

225 226
	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
227 228
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
229 230
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_TCP,
231
			      orig_sport, orig_dport, sk);
232 233 234
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
235
			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
236
		return err;
237
	}
L
Linus Torvalds 已提交
238 239 240 241 242 243

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

244
	if (!inet_opt || !inet_opt->opt.srr)
245
		daddr = fl4->daddr;
L
Linus Torvalds 已提交
246

E
Eric Dumazet 已提交
247
	if (!inet->inet_saddr)
248
		inet->inet_saddr = fl4->saddr;
249
	sk_rcv_saddr_set(sk, inet->inet_saddr);
L
Linus Torvalds 已提交
250

E
Eric Dumazet 已提交
251
	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
L
Linus Torvalds 已提交
252 253 254
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
P
Pavel Emelyanov 已提交
255
		if (likely(!tp->repair))
256
			WRITE_ONCE(tp->write_seq, 0);
L
Linus Torvalds 已提交
257 258
	}

E
Eric Dumazet 已提交
259
	inet->inet_dport = usin->sin_port;
260
	sk_daddr_set(sk, daddr);
L
Linus Torvalds 已提交
261

262
	inet_csk(sk)->icsk_ext_hdr_len = 0;
263 264
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
L
Linus Torvalds 已提交
265

266
	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
L
Linus Torvalds 已提交
267 268 269 270 271 272 273

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
274
	err = inet_hash_connect(tcp_death_row, sk);
L
Linus Torvalds 已提交
275 276 277
	if (err)
		goto failure;

278
	sk_set_txhash(sk);
279

280
	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
281 282 283 284
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
L
Linus Torvalds 已提交
285
		goto failure;
286
	}
L
Linus Torvalds 已提交
287
	/* OK, now commit destination to socket.  */
288
	sk->sk_gso_type = SKB_GSO_TCPV4;
289
	sk_setup_caps(sk, &rt->dst);
W
Wei Wang 已提交
290
	rt = NULL;
L
Linus Torvalds 已提交
291

292 293
	if (likely(!tp->repair)) {
		if (!tp->write_seq)
294 295 296 297 298
			WRITE_ONCE(tp->write_seq,
				   secure_tcp_seq(inet->inet_saddr,
						  inet->inet_daddr,
						  inet->inet_sport,
						  usin->sin_port));
299 300
		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
						 inet->inet_saddr,
301
						 inet->inet_daddr);
302
	}
L
Linus Torvalds 已提交
303

304
	inet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
305

W
Wei Wang 已提交
306 307 308 309 310
	if (tcp_fastopen_defer_connect(sk, &err))
		return err;
	if (err)
		goto failure;

A
Andrey Vagin 已提交
311
	err = tcp_connect(sk);
P
Pavel Emelyanov 已提交
312

L
Linus Torvalds 已提交
313 314 315 316 317 318
	if (err)
		goto failure;

	return 0;

failure:
319 320 321 322
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
323 324 325
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
E
Eric Dumazet 已提交
326
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
327 328
	return err;
}
E
Eric Dumazet 已提交
329
EXPORT_SYMBOL(tcp_v4_connect);
L
Linus Torvalds 已提交
330 331

/*
332 333 334
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
L
Linus Torvalds 已提交
335
 */
336
void tcp_v4_mtu_reduced(struct sock *sk)
L
Linus Torvalds 已提交
337 338
{
	struct inet_sock *inet = inet_sk(sk);
339 340
	struct dst_entry *dst;
	u32 mtu;
L
Linus Torvalds 已提交
341

342 343 344
	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
		return;
	mtu = tcp_sk(sk)->mtu_info;
345 346
	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355 356 357
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
358
	    ip_sk_accept_pmtu(sk) &&
359
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}
370
EXPORT_SYMBOL(tcp_v4_mtu_reduced);
L
Linus Torvalds 已提交
371

372 373 374 375
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);

376
	if (dst)
377
		dst->ops->redirect(dst, sk, skb);
378 379
}

380 381

/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
382
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
383 384 385 386 387 388 389 390
{
	struct request_sock *req = inet_reqsk(sk);
	struct net *net = sock_net(sk);

	/* ICMPs are not backlogged, hence we cannot get
	 * an established socket here.
	 */
	if (seq != tcp_rsk(req)->snt_isn) {
391
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
392
	} else if (abort) {
393 394 395 396 397 398
		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
F
Fan Du 已提交
399
		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
400
		tcp_listendrop(req->rsk_listener);
401
	}
402
	reqsk_put(req);
403 404 405
}
EXPORT_SYMBOL(tcp_req_err);

L
Linus Torvalds 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

422
int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
L
Linus Torvalds 已提交
423
{
424
	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
425
	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
426
	struct inet_connection_sock *icsk;
L
Linus Torvalds 已提交
427 428
	struct tcp_sock *tp;
	struct inet_sock *inet;
429 430
	const int type = icmp_hdr(icmp_skb)->type;
	const int code = icmp_hdr(icmp_skb)->code;
L
Linus Torvalds 已提交
431
	struct sock *sk;
432
	struct sk_buff *skb;
433
	struct request_sock *fastopen;
434 435 436
	u32 seq, snd_una;
	s32 remaining;
	u32 delta_us;
L
Linus Torvalds 已提交
437
	int err;
438
	struct net *net = dev_net(icmp_skb->dev);
L
Linus Torvalds 已提交
439

440 441
	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
				       th->dest, iph->saddr, ntohs(th->source),
442
				       inet_iif(icmp_skb), 0);
L
Linus Torvalds 已提交
443
	if (!sk) {
E
Eric Dumazet 已提交
444
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
445
		return -ENOENT;
L
Linus Torvalds 已提交
446 447
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
448
		inet_twsk_put(inet_twsk(sk));
449
		return 0;
L
Linus Torvalds 已提交
450
	}
451
	seq = ntohl(th->seq);
452 453 454 455 456 457 458 459
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
				     type == ICMP_TIME_EXCEEDED ||
				     (type == ICMP_DEST_UNREACH &&
				      (code == ICMP_NET_UNREACH ||
				       code == ICMP_HOST_UNREACH)));
		return 0;
	}
L
Linus Torvalds 已提交
460 461 462 463

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
464 465
	 * We do take care of PMTU discovery (RFC1191) special case :
	 * we can receive locally generated ICMP messages while socket is held.
L
Linus Torvalds 已提交
466
	 */
467 468
	if (sock_owned_by_user(sk)) {
		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
469
			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
470
	}
L
Linus Torvalds 已提交
471 472 473
	if (sk->sk_state == TCP_CLOSE)
		goto out;

474
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
475
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
476 477 478
		goto out;
	}

479
	icsk = inet_csk(sk);
L
Linus Torvalds 已提交
480
	tp = tcp_sk(sk);
481
	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
482
	fastopen = rcu_dereference(tp->fastopen_rsk);
483
	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
L
Linus Torvalds 已提交
484
	if (sk->sk_state != TCP_LISTEN &&
485
	    !between(seq, snd_una, tp->snd_nxt)) {
486
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
487 488 489 490
		goto out;
	}

	switch (type) {
491
	case ICMP_REDIRECT:
492 493
		if (!sock_owned_by_user(sk))
			do_redirect(icmp_skb, sk);
494
		goto out;
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502 503 504 505
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
506 507 508 509 510 511 512
			/* We are not interested in TCP_LISTEN and open_requests
			 * (SYN-ACKs send out by Linux are always <576bytes so
			 * they should go through unfragmented).
			 */
			if (sk->sk_state == TCP_LISTEN)
				goto out;

513
			tp->mtu_info = info;
514
			if (!sock_owned_by_user(sk)) {
515
				tcp_v4_mtu_reduced(sk);
516
			} else {
517
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
518 519
					sock_hold(sk);
			}
L
Linus Torvalds 已提交
520 521 522 523
			goto out;
		}

		err = icmp_err_convert[code].errno;
524 525 526 527 528
		/* check if icmp_skb allows revert of backoff
		 * (see draft-zimmermann-tcp-lcd) */
		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
			break;
		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
529
		    !icsk->icsk_backoff || fastopen)
530 531
			break;

532 533 534
		if (sock_owned_by_user(sk))
			break;

535 536 537 538
		skb = tcp_rtx_queue_head(sk);
		if (WARN_ON_ONCE(!skb))
			break;

539
		icsk->icsk_backoff--;
540 541 542
		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
					       TCP_TIMEOUT_INIT;
		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
543 544


545
		tcp_mstamp_refresh(tp);
546
		delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
E
Eric Dumazet 已提交
547
		remaining = icsk->icsk_rto -
548
			    usecs_to_jiffies(delta_us);
549

550
		if (remaining > 0) {
551 552 553 554 555 556 557 558
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
						  remaining, TCP_RTO_MAX);
		} else {
			/* RTO revert clocked out retransmission.
			 * Will retransmit now */
			tcp_retransmit_timer(sk);
		}

L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567 568
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
	case TCP_SYN_SENT:
569 570 571 572
	case TCP_SYN_RECV:
		/* Only in fast or simultaneous open. If a fast open socket is
		 * is already accepted it is treated as a connected one below.
		 */
573
		if (fastopen && !fastopen->sk)
574 575
			break;

576 577
		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);

L
Linus Torvalds 已提交
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
617
	return 0;
L
Linus Torvalds 已提交
618 619
}

620
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
621
{
622
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
623

624 625 626
	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
	skb->csum_start = skb_transport_header(skb) - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
627 628
}

629
/* This routine computes an IPv4 TCP checksum. */
630
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
631
{
632
	const struct inet_sock *inet = inet_sk(sk);
633 634 635

	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
E
Eric Dumazet 已提交
636
EXPORT_SYMBOL(tcp_v4_send_check);
637

L
Linus Torvalds 已提交
638 639 640 641 642 643 644 645 646 647 648 649 650
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

651
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
652
{
653
	const struct tcphdr *th = tcp_hdr(skb);
654 655 656
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
657
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
658 659
#endif
	} rep;
L
Linus Torvalds 已提交
660
	struct ip_reply_arg arg;
661
#ifdef CONFIG_TCP_MD5SIG
662
	struct tcp_md5sig_key *key = NULL;
663 664 665 666
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
667
#endif
668
	u64 transmit_time = 0;
J
Jon Maxwell 已提交
669
	struct sock *ctl_sk;
670
	struct net *net;
L
Linus Torvalds 已提交
671 672 673 674 675

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

676 677 678 679
	/* If sk not NULL, it means we did a successful lookup and incoming
	 * route had to be correct. prequeue might have dropped our dst.
	 */
	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
680 681 682
		return;

	/* Swap the send and the receive. */
683 684 685 686 687
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
688 689

	if (th->ack) {
690
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
691
	} else {
692 693 694
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
695 696
	}

697
	memset(&arg, 0, sizeof(arg));
698 699 700
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

701
	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
702
#ifdef CONFIG_TCP_MD5SIG
703
	rcu_read_lock();
704
	hash_location = tcp_parse_md5sig_option(th);
705
	if (sk && sk_fullsock(sk)) {
706
		const union tcp_md5_addr *addr;
707
		int l3index;
708

709 710 711 712
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and inet_iif is set to it.
		 */
		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
713
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
714
		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
715
	} else if (hash_location) {
716
		const union tcp_md5_addr *addr;
717 718
		int sdif = tcp_v4_sdif(skb);
		int dif = inet_iif(skb);
719
		int l3index;
720

721 722 723 724 725 726 727
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
728 729
		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
					     ip_hdr(skb)->saddr,
730
					     th->source, ip_hdr(skb)->daddr,
731
					     ntohs(th->source), dif, sdif);
732 733
		/* don't send rst if it can't find key */
		if (!sk1)
734 735
			goto out;

736 737 738 739
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and dif is set to it.
		 */
		l3index = sdif ? dif : 0;
740
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
741
		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
742
		if (!key)
743 744
			goto out;

745

746
		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
747
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
748 749
			goto out;

750 751
	}

752 753 754 755 756 757 758 759 760
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

761
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
762 763
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
764 765
	}
#endif
766 767
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
768
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
769
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
770 771
	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;

772
	/* When socket is gone, all binding information is lost.
A
Alexey Kuznetsov 已提交
773 774
	 * routing might fail in this case. No choice here, if we choose to force
	 * input interface, we will misroute in case of asymmetric route.
775
	 */
776
	if (sk) {
A
Alexey Kuznetsov 已提交
777
		arg.bound_dev_if = sk->sk_bound_dev_if;
778 779
		if (sk_fullsock(sk))
			trace_tcp_send_reset(sk, skb);
780
	}
L
Linus Torvalds 已提交
781

782 783 784
	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));

785
	arg.tos = ip_hdr(skb)->tos;
786
	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
787
	local_bh_disable();
788
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
789
	if (sk) {
J
Jon Maxwell 已提交
790 791
		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_mark : sk->sk_mark;
792 793
		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_priority : sk->sk_priority;
794
		transmit_time = tcp_transmit_time(sk);
795
	}
J
Jon Maxwell 已提交
796
	ip_send_unicast_reply(ctl_sk,
797
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
798
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
799 800
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
801

J
Jon Maxwell 已提交
802
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
803 804
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
805
	local_bh_enable();
806 807

#ifdef CONFIG_TCP_MD5SIG
808 809
out:
	rcu_read_unlock();
810
#endif
L
Linus Torvalds 已提交
811 812 813 814 815 816
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

817
static void tcp_v4_send_ack(const struct sock *sk,
818
			    struct sk_buff *skb, u32 seq, u32 ack,
819
			    u32 win, u32 tsval, u32 tsecr, int oif,
820
			    struct tcp_md5sig_key *key,
821
			    int reply_flags, u8 tos)
L
Linus Torvalds 已提交
822
{
823
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
824 825
	struct {
		struct tcphdr th;
826
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
827
#ifdef CONFIG_TCP_MD5SIG
828
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
829 830
#endif
			];
L
Linus Torvalds 已提交
831
	} rep;
832
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
833
	struct ip_reply_arg arg;
J
Jon Maxwell 已提交
834
	struct sock *ctl_sk;
835
	u64 transmit_time;
L
Linus Torvalds 已提交
836 837

	memset(&rep.th, 0, sizeof(struct tcphdr));
838
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
839 840 841

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
842
	if (tsecr) {
843 844 845
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
846 847
		rep.opt[1] = htonl(tsval);
		rep.opt[2] = htonl(tsecr);
848
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
849 850 851 852 853 854 855 856 857 858 859
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

860 861
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
862
		int offset = (tsecr) ? 3 : 0;
863 864 865 866 867 868 869 870

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

871
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
872 873
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
874 875
	}
#endif
876
	arg.flags = reply_flags;
877 878
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
879 880
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
881 882
	if (oif)
		arg.bound_dev_if = oif;
883
	arg.tos = tos;
884
	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
885
	local_bh_disable();
886
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
887 888
	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_mark : sk->sk_mark;
889 890
	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_priority : sk->sk_priority;
891
	transmit_time = tcp_transmit_time(sk);
J
Jon Maxwell 已提交
892
	ip_send_unicast_reply(ctl_sk,
893
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
894
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
895 896
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
897

J
Jon Maxwell 已提交
898
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
899
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
900
	local_bh_enable();
L
Linus Torvalds 已提交
901 902 903 904
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
905
	struct inet_timewait_sock *tw = inet_twsk(sk);
906
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
907

908
	tcp_v4_send_ack(sk, skb,
909
			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
910
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
911
			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
912 913
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
914
			tcp_twsk_md5_key(tcptw),
915 916
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
			tw->tw_tos
917
			);
L
Linus Torvalds 已提交
918

919
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
920 921
}

922
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
923
				  struct request_sock *req)
L
Linus Torvalds 已提交
924
{
925
	const union tcp_md5_addr *addr;
926
	int l3index;
927

928 929 930
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
931 932 933
	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
					     tcp_sk(sk)->snd_nxt;

934 935 936 937 938
	/* RFC 7323 2.3
	 * The window field (SEG.WND) of every outgoing segment, with the
	 * exception of <SYN> segments, MUST be right-shifted by
	 * Rcv.Wind.Shift bits:
	 */
939
	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
940
	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
941
	tcp_v4_send_ack(sk, skb, seq,
942 943
			tcp_rsk(req)->rcv_nxt,
			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
944
			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
945 946
			req->ts_recent,
			0,
947
			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
948 949
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			ip_hdr(skb)->tos);
L
Linus Torvalds 已提交
950 951 952
}

/*
953
 *	Send a SYN-ACK after having received a SYN.
954
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
955 956
 *	socket.
 */
957
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
958
			      struct flowi *fl,
959
			      struct request_sock *req,
960
			      struct tcp_fastopen_cookie *foc,
961
			      enum tcp_synack_type synack_type)
L
Linus Torvalds 已提交
962
{
963
	const struct inet_request_sock *ireq = inet_rsk(req);
964
	struct flowi4 fl4;
L
Linus Torvalds 已提交
965
	int err = -1;
966
	struct sk_buff *skb;
L
Linus Torvalds 已提交
967 968

	/* First, grab a route. */
969
	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
970
		return -1;
L
Linus Torvalds 已提交
971

972
	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
L
Linus Torvalds 已提交
973 974

	if (skb) {
975
		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
L
Linus Torvalds 已提交
976

977
		rcu_read_lock();
978 979
		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
					    ireq->ir_rmt_addr,
980 981
					    rcu_dereference(ireq->ireq_opt));
		rcu_read_unlock();
982
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
983 984 985 986 987 988
	}

	return err;
}

/*
989
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
990
 */
991
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
992
{
E
Eric Dumazet 已提交
993
	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
L
Linus Torvalds 已提交
994 995
}

996 997 998 999 1000 1001 1002
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

1003
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1004 1005
EXPORT_SYMBOL(tcp_md5_needed);

1006
/* Find the Key structure for an address.  */
1007
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1008 1009
					   const union tcp_md5_addr *addr,
					   int family)
1010
{
1011
	const struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1012
	struct tcp_md5sig_key *key;
1013
	const struct tcp_md5sig_info *md5sig;
1014 1015 1016
	__be32 mask;
	struct tcp_md5sig_key *best_match = NULL;
	bool match;
1017

1018 1019
	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
1020
				       lockdep_sock_is_held(sk));
1021
	if (!md5sig)
1022
		return NULL;
A
Arnd Bergmann 已提交
1023

1024 1025
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
E
Eric Dumazet 已提交
1026 1027
		if (key->family != family)
			continue;
1028 1029
		if (key->l3index && key->l3index != l3index)
			continue;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
		if (family == AF_INET) {
			mask = inet_make_mask(key->prefixlen);
			match = (key->addr.a4.s_addr & mask) ==
				(addr->a4.s_addr & mask);
#if IS_ENABLED(CONFIG_IPV6)
		} else if (family == AF_INET6) {
			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
						  key->prefixlen);
#endif
		} else {
			match = false;
		}

		if (match && (!best_match ||
			      key->prefixlen > best_match->prefixlen))
			best_match = key;
	}
	return best_match;
}
1049
EXPORT_SYMBOL(__tcp_md5_do_lookup);
1050

1051 1052
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
						      const union tcp_md5_addr *addr,
1053 1054
						      int family, u8 prefixlen,
						      int l3index)
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
{
	const struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_md5sig_key *key;
	unsigned int size = sizeof(struct in_addr);
	const struct tcp_md5sig_info *md5sig;

	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
				       lockdep_sock_is_held(sk));
	if (!md5sig)
		return NULL;
#if IS_ENABLED(CONFIG_IPV6)
	if (family == AF_INET6)
		size = sizeof(struct in6_addr);
#endif
1070 1071
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
1072 1073
		if (key->family != family)
			continue;
1074 1075
		if (key->l3index && key->l3index != l3index)
			continue;
1076 1077
		if (!memcmp(&key->addr, addr, size) &&
		    key->prefixlen == prefixlen)
E
Eric Dumazet 已提交
1078
			return key;
1079 1080 1081 1082
	}
	return NULL;
}

1083
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1084
					 const struct sock *addr_sk)
1085
{
1086
	const union tcp_md5_addr *addr;
1087
	int l3index;
E
Eric Dumazet 已提交
1088

1089 1090
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
						 addr_sk->sk_bound_dev_if);
1091
	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1092
	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1093 1094 1095 1096
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);

/* This can be called on a newly created socket, from other files */
E
Eric Dumazet 已提交
1097
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1098 1099
		   int family, u8 prefixlen, int l3index,
		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1100 1101
{
	/* Add Key to the list */
1102
	struct tcp_md5sig_key *key;
1103
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1104
	struct tcp_md5sig_info *md5sig;
1105

1106
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1107 1108
	if (key) {
		/* Pre-existing entry - just update that one. */
E
Eric Dumazet 已提交
1109
		memcpy(key->key, newkey, newkeylen);
1110
		key->keylen = newkeylen;
E
Eric Dumazet 已提交
1111 1112
		return 0;
	}
1113

1114
	md5sig = rcu_dereference_protected(tp->md5sig_info,
1115
					   lockdep_sock_is_held(sk));
E
Eric Dumazet 已提交
1116 1117 1118
	if (!md5sig) {
		md5sig = kmalloc(sizeof(*md5sig), gfp);
		if (!md5sig)
1119 1120
			return -ENOMEM;

E
Eric Dumazet 已提交
1121 1122
		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		INIT_HLIST_HEAD(&md5sig->head);
1123
		rcu_assign_pointer(tp->md5sig_info, md5sig);
E
Eric Dumazet 已提交
1124
	}
1125

1126
	key = sock_kmalloc(sk, sizeof(*key), gfp);
E
Eric Dumazet 已提交
1127 1128
	if (!key)
		return -ENOMEM;
1129
	if (!tcp_alloc_md5sig_pool()) {
1130
		sock_kfree_s(sk, key, sizeof(*key));
E
Eric Dumazet 已提交
1131
		return -ENOMEM;
1132
	}
E
Eric Dumazet 已提交
1133 1134 1135 1136

	memcpy(key->key, newkey, newkeylen);
	key->keylen = newkeylen;
	key->family = family;
1137
	key->prefixlen = prefixlen;
1138
	key->l3index = l3index;
E
Eric Dumazet 已提交
1139 1140 1141 1142
	memcpy(&key->addr, addr,
	       (family == AF_INET6) ? sizeof(struct in6_addr) :
				      sizeof(struct in_addr));
	hlist_add_head_rcu(&key->node, &md5sig->head);
1143 1144
	return 0;
}
E
Eric Dumazet 已提交
1145
EXPORT_SYMBOL(tcp_md5_do_add);
1146

1147
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1148
		   u8 prefixlen, int l3index)
1149
{
E
Eric Dumazet 已提交
1150 1151
	struct tcp_md5sig_key *key;

1152
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
E
Eric Dumazet 已提交
1153 1154 1155
	if (!key)
		return -ENOENT;
	hlist_del_rcu(&key->node);
1156
	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1157 1158
	kfree_rcu(key, rcu);
	return 0;
1159
}
E
Eric Dumazet 已提交
1160
EXPORT_SYMBOL(tcp_md5_do_del);
1161

1162
static void tcp_clear_md5_list(struct sock *sk)
1163 1164
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1165
	struct tcp_md5sig_key *key;
1166
	struct hlist_node *n;
1167
	struct tcp_md5sig_info *md5sig;
1168

1169 1170
	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);

1171
	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
E
Eric Dumazet 已提交
1172
		hlist_del_rcu(&key->node);
1173
		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1174
		kfree_rcu(key, rcu);
1175 1176 1177
	}
}

1178 1179
static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
				 char __user *optval, int optlen)
1180 1181 1182
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1183
	const union tcp_md5_addr *addr;
1184
	u8 prefixlen = 32;
1185
	int l3index = 0;
1186 1187 1188 1189

	if (optlen < sizeof(cmd))
		return -EINVAL;

1190
	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1191 1192 1193 1194 1195
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

1196 1197 1198 1199 1200 1201 1202
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
		prefixlen = cmd.tcpm_prefixlen;
		if (prefixlen > 32)
			return -EINVAL;
	}

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
		struct net_device *dev;

		rcu_read_lock();
		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
		if (dev && netif_is_l3_master(dev))
			l3index = dev->ifindex;

		rcu_read_unlock();

		/* ok to reference set/not set outside of rcu;
		 * right now device MUST be an L3 master
		 */
		if (!dev || !l3index)
			return -EINVAL;
	}

1221 1222
	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;

1223
	if (!cmd.tcpm_keylen)
1224
		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
1225 1226 1227 1228

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

1229
	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1230
			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1231 1232
}

1233 1234 1235
static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
				   __be32 daddr, __be32 saddr,
				   const struct tcphdr *th, int nbytes)
1236 1237
{
	struct tcp4_pseudohdr *bp;
1238
	struct scatterlist sg;
1239
	struct tcphdr *_th;
1240

1241
	bp = hp->scratch;
1242 1243 1244
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1245
	bp->protocol = IPPROTO_TCP;
1246
	bp->len = cpu_to_be16(nbytes);
1247

1248 1249 1250 1251 1252 1253 1254
	_th = (struct tcphdr *)(bp + 1);
	memcpy(_th, th, sizeof(*th));
	_th->check = 0;

	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
				sizeof(*bp) + sizeof(*th));
H
Herbert Xu 已提交
1255
	return crypto_ahash_update(hp->md5_req);
1256 1257
}

E
Eric Dumazet 已提交
1258
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1259
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1260 1261
{
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1262
	struct ahash_request *req;
1263 1264 1265 1266

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1267
	req = hp->md5_req;
1268

H
Herbert Xu 已提交
1269
	if (crypto_ahash_init(req))
1270
		goto clear_hash;
1271
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1272 1273 1274
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1275 1276
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1277 1278 1279 1280
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1281

1282 1283 1284 1285
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1286
	return 1;
1287 1288
}

1289 1290
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
			const struct sock *sk,
E
Eric Dumazet 已提交
1291
			const struct sk_buff *skb)
1292
{
1293
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1294
	struct ahash_request *req;
E
Eric Dumazet 已提交
1295
	const struct tcphdr *th = tcp_hdr(skb);
1296 1297
	__be32 saddr, daddr;

1298 1299 1300
	if (sk) { /* valid for establish/request sockets */
		saddr = sk->sk_rcv_saddr;
		daddr = sk->sk_daddr;
1301
	} else {
1302 1303 1304
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1305
	}
1306 1307 1308 1309

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1310
	req = hp->md5_req;
1311

H
Herbert Xu 已提交
1312
	if (crypto_ahash_init(req))
1313 1314
		goto clear_hash;

1315
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1316 1317 1318 1319 1320
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1321 1322
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1333
}
1334
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1335

1336 1337
#endif

1338
/* Called with rcu_read_lock() */
1339
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1340 1341
				    const struct sk_buff *skb,
				    int dif, int sdif)
1342
{
1343
#ifdef CONFIG_TCP_MD5SIG
1344 1345 1346 1347 1348 1349 1350 1351
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
1352
	const __u8 *hash_location = NULL;
1353
	struct tcp_md5sig_key *hash_expected;
1354
	const struct iphdr *iph = ip_hdr(skb);
1355
	const struct tcphdr *th = tcp_hdr(skb);
1356
	const union tcp_md5_addr *addr;
1357
	unsigned char newhash[16];
1358 1359 1360 1361 1362 1363
	int genhash, l3index;

	/* sdif set, means packet ingressed via a device
	 * in an L3 domain and dif is set to the l3mdev
	 */
	l3index = sdif ? dif : 0;
1364

1365
	addr = (union tcp_md5_addr *)&iph->saddr;
1366
	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1367
	hash_location = tcp_parse_md5sig_option(th);
1368 1369 1370

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
E
Eric Dumazet 已提交
1371
		return false;
1372 1373

	if (hash_expected && !hash_location) {
1374
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
E
Eric Dumazet 已提交
1375
		return true;
1376 1377 1378
	}

	if (!hash_expected && hash_location) {
1379
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
E
Eric Dumazet 已提交
1380
		return true;
1381 1382 1383 1384 1385
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1386 1387
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
1388
				      NULL, skb);
1389 1390

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1391
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1392
		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1393 1394 1395
				     &iph->saddr, ntohs(th->source),
				     &iph->daddr, ntohs(th->dest),
				     genhash ? " tcp_v4_calc_md5_hash failed"
1396
				     : "", l3index);
E
Eric Dumazet 已提交
1397
		return true;
1398
	}
E
Eric Dumazet 已提交
1399
	return false;
1400
#endif
1401 1402
	return false;
}
1403

1404 1405
static void tcp_v4_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
1406 1407 1408
			    struct sk_buff *skb)
{
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
1409
	struct net *net = sock_net(sk_listener);
1410

1411 1412
	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
E
Eric Dumazet 已提交
1413
	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1414 1415
}

1416 1417
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
					  struct flowi *fl,
1418
					  const struct request_sock *req)
1419
{
1420
	return inet_csk_route_req(sk, &fl->u.ip4, req);
1421 1422
}

1423
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1424
	.family		=	PF_INET,
1425
	.obj_size	=	sizeof(struct tcp_request_sock),
1426
	.rtx_syn_ack	=	tcp_rtx_synack,
1427 1428
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1429
	.send_reset	=	tcp_v4_send_reset,
S
stephen hemminger 已提交
1430
	.syn_ack_timeout =	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
1431 1432
};

1433
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1434
	.mss_clamp	=	TCP_MSS_DEFAULT,
1435
#ifdef CONFIG_TCP_MD5SIG
1436
	.req_md5_lookup	=	tcp_v4_md5_lookup,
1437
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1438
#endif
1439
	.init_req	=	tcp_v4_init_req,
1440 1441 1442
#ifdef CONFIG_SYN_COOKIES
	.cookie_init_seq =	cookie_v4_init_sequence,
#endif
1443
	.route_req	=	tcp_v4_route_req,
1444 1445
	.init_seq	=	tcp_v4_init_seq,
	.init_ts_off	=	tcp_v4_init_ts_off,
1446
	.send_synack	=	tcp_v4_send_synack,
1447
};
1448

L
Linus Torvalds 已提交
1449 1450 1451
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1452
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1453 1454
		goto drop;

O
Octavian Purdila 已提交
1455 1456
	return tcp_conn_request(&tcp_request_sock_ops,
				&tcp_request_sock_ipv4_ops, sk, skb);
L
Linus Torvalds 已提交
1457 1458

drop:
1459
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1460 1461
	return 0;
}
E
Eric Dumazet 已提交
1462
EXPORT_SYMBOL(tcp_v4_conn_request);
L
Linus Torvalds 已提交
1463 1464 1465 1466 1467 1468


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
1469
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1470
				  struct request_sock *req,
1471 1472 1473
				  struct dst_entry *dst,
				  struct request_sock *req_unhash,
				  bool *own_req)
L
Linus Torvalds 已提交
1474
{
1475
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1476 1477 1478
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1479
#ifdef CONFIG_TCP_MD5SIG
1480
	const union tcp_md5_addr *addr;
1481
	struct tcp_md5sig_key *key;
1482
	int l3index;
1483
#endif
1484
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
1485 1486 1487 1488 1489 1490

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
1491
		goto exit_nonewsk;
L
Linus Torvalds 已提交
1492

1493
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1494
	inet_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1495 1496 1497

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1498
	ireq		      = inet_rsk(req);
1499 1500
	sk_daddr_set(newsk, ireq->ir_rmt_addr);
	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1501
	newsk->sk_bound_dev_if = ireq->ir_iif;
E
Eric Dumazet 已提交
1502 1503 1504
	newinet->inet_saddr   = ireq->ir_loc_addr;
	inet_opt	      = rcu_dereference(ireq->ireq_opt);
	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1505
	newinet->mc_index     = inet_iif(skb);
1506
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1507
	newinet->rcv_tos      = ip_hdr(skb)->tos;
1508
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1509 1510
	if (inet_opt)
		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1511
	newinet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
1512

E
Eric Dumazet 已提交
1513 1514 1515 1516 1517 1518 1519
	if (!dst) {
		dst = inet_csk_route_child_sock(sk, newsk, req);
		if (!dst)
			goto put_and_exit;
	} else {
		/* syncookie case : see end of cookie_v4_check() */
	}
1520 1521
	sk_setup_caps(newsk, dst);

1522 1523
	tcp_ca_openreq_child(newsk, dst);

L
Linus Torvalds 已提交
1524
	tcp_sync_mss(newsk, dst_mtu(dst));
E
Eric Dumazet 已提交
1525
	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1526

L
Linus Torvalds 已提交
1527 1528
	tcp_initialize_rcv_mss(newsk);

1529
#ifdef CONFIG_TCP_MD5SIG
1530
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1531
	/* Copy over the MD5 key from the original socket */
1532
	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1533
	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1534
	if (key) {
1535 1536 1537 1538 1539 1540
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1541
		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1542
			       key->key, key->keylen, GFP_ATOMIC);
E
Eric Dumazet 已提交
1543
		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1544 1545 1546
	}
#endif

1547 1548
	if (__inet_inherit_port(sk, newsk) < 0)
		goto put_and_exit;
1549
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
E
Eric Dumazet 已提交
1550
	if (likely(*own_req)) {
1551
		tcp_move_syn(newtp, req);
E
Eric Dumazet 已提交
1552 1553 1554 1555
		ireq->ireq_opt = NULL;
	} else {
		newinet->inet_opt = NULL;
	}
L
Linus Torvalds 已提交
1556 1557 1558
	return newsk;

exit_overflow:
1559
	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1560 1561
exit_nonewsk:
	dst_release(dst);
L
Linus Torvalds 已提交
1562
exit:
1563
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1564
	return NULL;
1565
put_and_exit:
E
Eric Dumazet 已提交
1566
	newinet->inet_opt = NULL;
1567 1568
	inet_csk_prepare_forced_close(newsk);
	tcp_done(newsk);
1569
	goto exit;
L
Linus Torvalds 已提交
1570
}
E
Eric Dumazet 已提交
1571
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
L
Linus Torvalds 已提交
1572

1573
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1574
{
1575
#ifdef CONFIG_SYN_COOKIES
1576
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1577

1578
	if (!th->syn)
C
Cong Wang 已提交
1579
		sk = cookie_v4_check(sk, skb);
L
Linus Torvalds 已提交
1580 1581 1582 1583
#endif
	return sk;
}

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
			 struct tcphdr *th, u32 *cookie)
{
	u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
				    &tcp_request_sock_ipv4_ops, sk, th);
	if (mss) {
		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
		tcp_synq_overflow(sk);
	}
#endif
	return mss;
}

L
Linus Torvalds 已提交
1599
/* The socket must have it's spinlock held when we get
1600
 * here, unless it is a TCP_LISTEN socket.
L
Linus Torvalds 已提交
1601 1602 1603 1604 1605 1606 1607 1608
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1609 1610
	struct sock *rsk;

L
Linus Torvalds 已提交
1611
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1612 1613
		struct dst_entry *dst = sk->sk_rx_dst;

1614
		sock_rps_save_rxhash(sk, skb);
1615
		sk_mark_napi_id(sk, skb);
1616
		if (dst) {
E
Eric Dumazet 已提交
1617
			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1618
			    !dst->ops->check(dst, 0)) {
1619 1620 1621 1622
				dst_release(dst);
				sk->sk_rx_dst = NULL;
			}
		}
1623
		tcp_rcv_established(sk, skb);
L
Linus Torvalds 已提交
1624 1625 1626
		return 0;
	}

E
Eric Dumazet 已提交
1627
	if (tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1628 1629 1630
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
1631 1632
		struct sock *nsk = tcp_v4_cookie_check(sk, skb);

L
Linus Torvalds 已提交
1633 1634 1635
		if (!nsk)
			goto discard;
		if (nsk != sk) {
1636 1637
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1638
				goto reset;
1639
			}
L
Linus Torvalds 已提交
1640 1641
			return 0;
		}
1642
	} else
1643
		sock_rps_save_rxhash(sk, skb);
1644

1645
	if (tcp_rcv_state_process(sk, skb)) {
1646
		rsk = sk;
L
Linus Torvalds 已提交
1647
		goto reset;
1648
	}
L
Linus Torvalds 已提交
1649 1650 1651
	return 0;

reset:
1652
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1663 1664
	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1665 1666
	goto discard;
}
E
Eric Dumazet 已提交
1667
EXPORT_SYMBOL(tcp_v4_do_rcv);
L
Linus Torvalds 已提交
1668

1669
int tcp_v4_early_demux(struct sk_buff *skb)
D
David S. Miller 已提交
1670 1671 1672 1673 1674 1675
{
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
1676
		return 0;
D
David S. Miller 已提交
1677

1678
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1679
		return 0;
D
David S. Miller 已提交
1680 1681

	iph = ip_hdr(skb);
1682
	th = tcp_hdr(skb);
D
David S. Miller 已提交
1683 1684

	if (th->doff < sizeof(struct tcphdr) / 4)
1685
		return 0;
D
David S. Miller 已提交
1686

1687
	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
D
David S. Miller 已提交
1688
				       iph->saddr, th->source,
1689
				       iph->daddr, ntohs(th->dest),
1690
				       skb->skb_iif, inet_sdif(skb));
D
David S. Miller 已提交
1691 1692 1693
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
1694
		if (sk_fullsock(sk)) {
1695
			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
E
Eric Dumazet 已提交
1696

D
David S. Miller 已提交
1697 1698
			if (dst)
				dst = dst_check(dst, 0);
1699
			if (dst &&
E
Eric Dumazet 已提交
1700
			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1701
				skb_dst_set_noref(skb, dst);
D
David S. Miller 已提交
1702 1703
		}
	}
1704
	return 0;
D
David S. Miller 已提交
1705 1706
}

E
Eric Dumazet 已提交
1707 1708
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
1709
	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1710 1711 1712 1713 1714 1715 1716 1717
	struct skb_shared_info *shinfo;
	const struct tcphdr *th;
	struct tcphdr *thtail;
	struct sk_buff *tail;
	unsigned int hdrlen;
	bool fragstolen;
	u32 gso_segs;
	int delta;
E
Eric Dumazet 已提交
1718 1719 1720 1721 1722 1723 1724

	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
	 * we can fix skb->truesize to its real value to avoid future drops.
	 * This is valid because skb is not yet charged to the socket.
	 * It has been noticed pure SACK packets were sometimes dropped
	 * (if cooked by drivers without copybreak feature).
	 */
1725
	skb_condense(skb);
E
Eric Dumazet 已提交
1726

E
Eric Dumazet 已提交
1727 1728
	skb_dst_drop(skb);

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
	if (unlikely(tcp_checksum_complete(skb))) {
		bh_unlock_sock(sk);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
		return true;
	}

	/* Attempt coalescing to last skb in backlog, even if we are
	 * above the limits.
	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
	 */
	th = (const struct tcphdr *)skb->data;
	hdrlen = th->doff * 4;
	shinfo = skb_shinfo(skb);

	if (!shinfo->gso_size)
		shinfo->gso_size = skb->len - hdrlen;

	if (!shinfo->gso_segs)
		shinfo->gso_segs = 1;

	tail = sk->sk_backlog.tail;
	if (!tail)
		goto no_coalesce;
	thtail = (struct tcphdr *)tail->data;

	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
	    ((TCP_SKB_CB(tail)->tcp_flags |
1758 1759 1760
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
	    !((TCP_SKB_CB(tail)->tcp_flags &
	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
	    ((TCP_SKB_CB(tail)->tcp_flags ^
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
	    tail->decrypted != skb->decrypted ||
#endif
	    thtail->doff != th->doff ||
	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
		goto no_coalesce;

	__skb_pull(skb, hdrlen);
	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
		thtail->window = th->window;

		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;

		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;

1779 1780 1781 1782 1783 1784 1785 1786 1787
		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
		 * thtail->fin, so that the fast path in tcp_rcv_established()
		 * is not entered if we append a packet with a FIN.
		 * SYN, RST, URG are not present.
		 * ACK is set on both packets.
		 * PSH : we do not really care in TCP stack,
		 *       at least for 'GRO' packets.
		 */
		thtail->fin |= th->fin;
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;

		if (TCP_SKB_CB(skb)->has_rxtstamp) {
			TCP_SKB_CB(tail)->has_rxtstamp = true;
			tail->tstamp = skb->tstamp;
			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
		}

		/* Not as strict as GRO. We only need to carry mss max value */
		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
						 skb_shinfo(tail)->gso_size);

		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);

		sk->sk_backlog.len += delta;
		__NET_INC_STATS(sock_net(sk),
				LINUX_MIB_TCPBACKLOGCOALESCE);
		kfree_skb_partial(skb, fragstolen);
		return false;
	}
	__skb_push(skb, hdrlen);

no_coalesce:
	/* Only socket owner can try to collapse/prune rx queues
	 * to reduce memory overhead, so add a little headroom here.
	 * Few sockets backlog are possibly concurrently non empty.
	 */
	limit += 64*1024;

E
Eric Dumazet 已提交
1818 1819 1820 1821 1822 1823 1824 1825 1826
	if (unlikely(sk_add_backlog(sk, skb, limit))) {
		bh_unlock_sock(sk);
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
		return true;
	}
	return false;
}
EXPORT_SYMBOL(tcp_add_backlog);

1827 1828 1829 1830
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
	struct tcphdr *th = (struct tcphdr *)skb->data;

1831
	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1832 1833 1834
}
EXPORT_SYMBOL(tcp_filter);

1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
static void tcp_v4_restore_cb(struct sk_buff *skb)
{
	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
		sizeof(struct inet_skb_parm));
}

static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
			   const struct tcphdr *th)
{
	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
	 * barrier() makes sure compiler wont play fool^Waliasing games.
	 */
	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
		sizeof(struct inet_skb_parm));
	barrier();

	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
	TCP_SKB_CB(skb)->sacked	 = 0;
	TCP_SKB_CB(skb)->has_rxtstamp =
			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
}

L
Linus Torvalds 已提交
1863 1864 1865 1866 1867 1868
/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1869
	struct net *net = dev_net(skb->dev);
E
Eric Dumazet 已提交
1870
	struct sk_buff *skb_to_free;
1871
	int sdif = inet_sdif(skb);
1872
	int dif = inet_iif(skb);
1873
	const struct iphdr *iph;
1874
	const struct tcphdr *th;
1875
	bool refcounted;
L
Linus Torvalds 已提交
1876 1877 1878 1879 1880 1881 1882
	struct sock *sk;
	int ret;

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
E
Eric Dumazet 已提交
1883
	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1884 1885 1886 1887

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1888
	th = (const struct tcphdr *)skb->data;
L
Linus Torvalds 已提交
1889

1890
	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
L
Linus Torvalds 已提交
1891 1892 1893 1894 1895 1896
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1897
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1898
	 * So, we defer the checks. */
1899 1900

	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1901
		goto csum_error;
L
Linus Torvalds 已提交
1902

1903
	th = (const struct tcphdr *)skb->data;
1904
	iph = ip_hdr(skb);
1905
lookup:
1906
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1907
			       th->dest, sdif, &refcounted);
L
Linus Torvalds 已提交
1908 1909 1910
	if (!sk)
		goto no_tcp_socket;

E
Eric Dumazet 已提交
1911 1912 1913 1914
process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

1915 1916
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		struct request_sock *req = inet_reqsk(sk);
1917
		bool req_stolen = false;
1918
		struct sock *nsk;
1919 1920

		sk = req->rsk_listener;
1921
		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1922
			sk_drops_add(sk, skb);
1923 1924 1925
			reqsk_put(req);
			goto discard_it;
		}
1926 1927 1928 1929
		if (tcp_checksum_complete(skb)) {
			reqsk_put(req);
			goto csum_error;
		}
1930
		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1931
			inet_csk_reqsk_queue_drop_and_put(sk, req);
1932 1933
			goto lookup;
		}
1934 1935 1936
		/* We own a reference on the listener, increase it again
		 * as we might lose it too soon.
		 */
1937
		sock_hold(sk);
1938
		refcounted = true;
E
Eric Dumazet 已提交
1939
		nsk = NULL;
1940 1941 1942 1943
		if (!tcp_filter(sk, skb)) {
			th = (const struct tcphdr *)skb->data;
			iph = ip_hdr(skb);
			tcp_v4_fill_cb(skb, iph, th);
1944
			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1945
		}
1946 1947
		if (!nsk) {
			reqsk_put(req);
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
			if (req_stolen) {
				/* Another cpu got exclusive access to req
				 * and created a full blown socket.
				 * Try to feed this packet to this socket
				 * instead of discarding it.
				 */
				tcp_v4_restore_cb(skb);
				sock_put(sk);
				goto lookup;
			}
1958
			goto discard_and_relse;
1959 1960 1961
		}
		if (nsk == sk) {
			reqsk_put(req);
1962
			tcp_v4_restore_cb(skb);
1963 1964
		} else if (tcp_child_process(sk, nsk, skb)) {
			tcp_v4_send_reset(nsk, skb);
1965
			goto discard_and_relse;
1966
		} else {
1967
			sock_put(sk);
1968 1969 1970
			return 0;
		}
	}
1971
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1972
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1973
		goto discard_and_relse;
1974
	}
1975

L
Linus Torvalds 已提交
1976 1977
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
1978

1979
	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
1980 1981
		goto discard_and_relse;

1982
	nf_reset_ct(skb);
L
Linus Torvalds 已提交
1983

1984
	if (tcp_filter(sk, skb))
L
Linus Torvalds 已提交
1985
		goto discard_and_relse;
1986 1987
	th = (const struct tcphdr *)skb->data;
	iph = ip_hdr(skb);
1988
	tcp_v4_fill_cb(skb, iph, th);
L
Linus Torvalds 已提交
1989 1990 1991

	skb->dev = NULL;

1992 1993 1994 1995 1996 1997 1998
	if (sk->sk_state == TCP_LISTEN) {
		ret = tcp_v4_do_rcv(sk, skb);
		goto put_and_return;
	}

	sk_incoming_cpu_update(sk);

1999
	bh_lock_sock_nested(sk);
2000
	tcp_segs_in(tcp_sk(sk), skb);
L
Linus Torvalds 已提交
2001 2002
	ret = 0;
	if (!sock_owned_by_user(sk)) {
E
Eric Dumazet 已提交
2003 2004
		skb_to_free = sk->sk_rx_skb_cache;
		sk->sk_rx_skb_cache = NULL;
F
Florian Westphal 已提交
2005
		ret = tcp_v4_do_rcv(sk, skb);
E
Eric Dumazet 已提交
2006 2007 2008 2009
	} else {
		if (tcp_add_backlog(sk, skb))
			goto discard_and_relse;
		skb_to_free = NULL;
Z
Zhu Yi 已提交
2010
	}
L
Linus Torvalds 已提交
2011
	bh_unlock_sock(sk);
E
Eric Dumazet 已提交
2012 2013
	if (skb_to_free)
		__kfree_skb(skb_to_free);
L
Linus Torvalds 已提交
2014

2015
put_and_return:
2016 2017
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2018 2019 2020 2021 2022 2023 2024

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

2025 2026
	tcp_v4_fill_cb(skb, iph, th);

E
Eric Dumazet 已提交
2027
	if (tcp_checksum_complete(skb)) {
2028
csum_error:
E
Eric Dumazet 已提交
2029
		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
2030
bad_packet:
E
Eric Dumazet 已提交
2031
		__TCP_INC_STATS(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
2032
	} else {
2033
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
2034 2035 2036 2037 2038
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
2039
	return 0;
L
Linus Torvalds 已提交
2040 2041

discard_and_relse:
2042
	sk_drops_add(sk, skb);
2043 2044
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2045 2046 2047 2048
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2049
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2050 2051 2052
		goto discard_it;
	}

2053 2054
	tcp_v4_fill_cb(skb, iph, th);

2055 2056 2057
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
2058
	}
2059
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
2060
	case TCP_TW_SYN: {
2061
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2062 2063
							&tcp_hashinfo, skb,
							__tcp_hdrlen(th),
2064
							iph->saddr, th->source,
2065
							iph->daddr, th->dest,
2066 2067
							inet_iif(skb),
							sdif);
L
Linus Torvalds 已提交
2068
		if (sk2) {
2069
			inet_twsk_deschedule_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2070
			sk = sk2;
2071
			tcp_v4_restore_cb(skb);
2072
			refcounted = false;
L
Linus Torvalds 已提交
2073 2074 2075
			goto process;
		}
	}
2076
		/* to ACK */
J
Joe Perches 已提交
2077
		fallthrough;
L
Linus Torvalds 已提交
2078 2079 2080 2081
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
2082 2083 2084
		tcp_v4_send_reset(sk, skb);
		inet_twsk_deschedule_put(inet_twsk(sk));
		goto discard_it;
L
Linus Torvalds 已提交
2085 2086 2087 2088 2089
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

2090 2091 2092 2093 2094
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
	.twsk_destructor= tcp_twsk_destructor,
};
L
Linus Torvalds 已提交
2095

2096
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
E
Eric Dumazet 已提交
2097 2098 2099
{
	struct dst_entry *dst = skb_dst(skb);

E
Eric Dumazet 已提交
2100
	if (dst && dst_hold_safe(dst)) {
2101 2102 2103
		sk->sk_rx_dst = dst;
		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
	}
E
Eric Dumazet 已提交
2104
}
2105
EXPORT_SYMBOL(inet_sk_rx_dst_set);
E
Eric Dumazet 已提交
2106

2107
const struct inet_connection_sock_af_ops ipv4_specific = {
2108 2109 2110
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
E
Eric Dumazet 已提交
2111
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2112 2113 2114 2115 2116 2117 2118
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
2119
#ifdef CONFIG_COMPAT
2120 2121
	.compat_setsockopt = compat_ip_setsockopt,
	.compat_getsockopt = compat_ip_getsockopt,
2122
#endif
2123
	.mtu_reduced	   = tcp_v4_mtu_reduced,
L
Linus Torvalds 已提交
2124
};
E
Eric Dumazet 已提交
2125
EXPORT_SYMBOL(ipv4_specific);
L
Linus Torvalds 已提交
2126

2127
#ifdef CONFIG_TCP_MD5SIG
2128
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2129
	.md5_lookup		= tcp_v4_md5_lookup,
2130
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2131 2132
	.md5_parse		= tcp_v4_parse_md5_keys,
};
2133
#endif
2134

L
Linus Torvalds 已提交
2135 2136 2137 2138 2139
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
2140
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
2141

2142
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
2143

2144
	icsk->icsk_af_ops = &ipv4_specific;
2145

2146
#ifdef CONFIG_TCP_MD5SIG
2147
	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2148
#endif
L
Linus Torvalds 已提交
2149 2150 2151 2152

	return 0;
}

2153
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
2154 2155 2156
{
	struct tcp_sock *tp = tcp_sk(sk);

2157 2158
	trace_tcp_destroy_sock(sk);

L
Linus Torvalds 已提交
2159 2160
	tcp_clear_xmit_timers(sk);

2161
	tcp_cleanup_congestion_control(sk);
2162

D
Dave Watson 已提交
2163 2164
	tcp_cleanup_ulp(sk);

L
Linus Torvalds 已提交
2165
	/* Cleanup up the write buffer. */
2166
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
2167

2168 2169 2170
	/* Check if we want to disable active TFO */
	tcp_fastopen_active_disable_ofo_check(sk);

L
Linus Torvalds 已提交
2171
	/* Cleans up our, hopefully empty, out_of_order_queue. */
2172
	skb_rbtree_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
2173

2174 2175 2176
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
E
Eric Dumazet 已提交
2177
		tcp_clear_md5_list(sk);
2178
		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2179 2180 2181
		tp->md5sig_info = NULL;
	}
#endif
C
Chris Leech 已提交
2182

L
Linus Torvalds 已提交
2183
	/* Clean up a referenced TCP bind bucket. */
2184
	if (inet_csk(sk)->icsk_bind_hash)
2185
		inet_put_port(sk);
L
Linus Torvalds 已提交
2186

2187
	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2188

2189 2190
	/* If socket is aborted during connect operation */
	tcp_free_fastopen_req(tp);
2191
	tcp_fastopen_destroy_cipher(sk);
2192
	tcp_saved_syn_free(tp);
2193

2194
	sk_sockets_allocated_dec(sk);
L
Linus Torvalds 已提交
2195 2196 2197 2198 2199 2200
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

2201 2202 2203 2204 2205
/*
 * Get next listener socket follow cur.  If cur is NULL, get first socket
 * starting from bucket given in st->bucket; when st->bucket is zero the
 * very first socket in the hash table is returned.
 */
L
Linus Torvalds 已提交
2206 2207
static void *listening_get_next(struct seq_file *seq, void *cur)
{
2208
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
J
Jianjun Kong 已提交
2209
	struct tcp_iter_state *st = seq->private;
2210
	struct net *net = seq_file_net(seq);
2211
	struct inet_listen_hashbucket *ilb;
2212
	struct hlist_nulls_node *node;
2213
	struct sock *sk = cur;
L
Linus Torvalds 已提交
2214 2215

	if (!sk) {
2216
get_head:
2217
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2218
		spin_lock(&ilb->lock);
2219
		sk = sk_nulls_head(&ilb->nulls_head);
2220
		st->offset = 0;
L
Linus Torvalds 已提交
2221 2222
		goto get_sk;
	}
2223
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
2224
	++st->num;
2225
	++st->offset;
L
Linus Torvalds 已提交
2226

2227
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2228
get_sk:
2229
	sk_nulls_for_each_from(sk, node) {
2230 2231
		if (!net_eq(sock_net(sk), net))
			continue;
2232
		if (sk->sk_family == afinfo->family)
2233
			return sk;
L
Linus Torvalds 已提交
2234
	}
2235
	spin_unlock(&ilb->lock);
2236
	st->offset = 0;
2237 2238 2239
	if (++st->bucket < INET_LHTABLE_SIZE)
		goto get_head;
	return NULL;
L
Linus Torvalds 已提交
2240 2241 2242 2243
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
2244 2245 2246 2247 2248 2249
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	st->offset = 0;
	rc = listening_get_next(seq, NULL);
L
Linus Torvalds 已提交
2250 2251 2252 2253 2254 2255 2256 2257

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

E
Eric Dumazet 已提交
2258
static inline bool empty_bucket(const struct tcp_iter_state *st)
2259
{
E
Eric Dumazet 已提交
2260
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2261 2262
}

2263 2264 2265 2266
/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
L
Linus Torvalds 已提交
2267 2268
static void *established_get_first(struct seq_file *seq)
{
2269
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
J
Jianjun Kong 已提交
2270
	struct tcp_iter_state *st = seq->private;
2271
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2272 2273
	void *rc = NULL;

2274 2275
	st->offset = 0;
	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2276
		struct sock *sk;
2277
		struct hlist_nulls_node *node;
2278
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2279

2280 2281 2282 2283
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2284
		spin_lock_bh(lock);
2285
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2286
			if (sk->sk_family != afinfo->family ||
2287
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2288 2289 2290 2291 2292
				continue;
			}
			rc = sk;
			goto out;
		}
2293
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2294 2295 2296 2297 2298 2299 2300
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
2301
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2302
	struct sock *sk = cur;
2303
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2304
	struct tcp_iter_state *st = seq->private;
2305
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2306 2307

	++st->num;
2308
	++st->offset;
L
Linus Torvalds 已提交
2309

E
Eric Dumazet 已提交
2310
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2311

2312
	sk_nulls_for_each_from(sk, node) {
2313 2314
		if (sk->sk_family == afinfo->family &&
		    net_eq(sock_net(sk), net))
E
Eric Dumazet 已提交
2315
			return sk;
L
Linus Torvalds 已提交
2316 2317
	}

E
Eric Dumazet 已提交
2318 2319 2320
	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
	++st->bucket;
	return established_get_first(seq);
L
Linus Torvalds 已提交
2321 2322 2323 2324
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
2325 2326 2327 2328 2329
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	rc = established_get_first(seq);
L
Linus Torvalds 已提交
2330 2331 2332 2333

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2334
	}
L
Linus Torvalds 已提交
2335 2336 2337 2338 2339 2340
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2341
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
static void *tcp_seek_last_pos(struct seq_file *seq)
{
	struct tcp_iter_state *st = seq->private;
	int offset = st->offset;
	int orig_num = st->num;
	void *rc = NULL;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (st->bucket >= INET_LHTABLE_SIZE)
			break;
		st->state = TCP_SEQ_STATE_LISTENING;
		rc = listening_get_next(seq, NULL);
		while (offset-- && rc)
			rc = listening_get_next(seq, rc);
		if (rc)
			break;
		st->bucket = 0;
E
Eric Dumazet 已提交
2372
		st->state = TCP_SEQ_STATE_ESTABLISHED;
J
Joe Perches 已提交
2373
		fallthrough;
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
	case TCP_SEQ_STATE_ESTABLISHED:
		if (st->bucket > tcp_hashinfo.ehash_mask)
			break;
		rc = established_get_first(seq);
		while (offset-- && rc)
			rc = established_get_next(seq, rc);
	}

	st->num = orig_num;

	return rc;
}

2387
void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2388
{
J
Jianjun Kong 已提交
2389
	struct tcp_iter_state *st = seq->private;
2390 2391 2392 2393 2394 2395 2396 2397
	void *rc;

	if (*pos && *pos == st->last_pos) {
		rc = tcp_seek_last_pos(seq);
		if (rc)
			goto out;
	}

L
Linus Torvalds 已提交
2398 2399
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
2400 2401 2402 2403 2404 2405 2406
	st->bucket = 0;
	st->offset = 0;
	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;

out:
	st->last_pos = *pos;
	return rc;
L
Linus Torvalds 已提交
2407
}
2408
EXPORT_SYMBOL(tcp_seq_start);
L
Linus Torvalds 已提交
2409

2410
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2411
{
2412
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
	void *rc = NULL;

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
2425 2426
			st->bucket = 0;
			st->offset = 0;
L
Linus Torvalds 已提交
2427 2428 2429 2430 2431 2432 2433 2434 2435
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
2436
	st->last_pos = *pos;
L
Linus Torvalds 已提交
2437 2438
	return rc;
}
2439
EXPORT_SYMBOL(tcp_seq_next);
L
Linus Torvalds 已提交
2440

2441
void tcp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2442
{
J
Jianjun Kong 已提交
2443
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2444 2445 2446 2447

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2448
			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2449 2450 2451
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2452
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2453 2454 2455
		break;
	}
}
2456
EXPORT_SYMBOL(tcp_seq_stop);
L
Linus Torvalds 已提交
2457

2458
static void get_openreq4(const struct request_sock *req,
E
Eric Dumazet 已提交
2459
			 struct seq_file *f, int i)
L
Linus Torvalds 已提交
2460
{
2461
	const struct inet_request_sock *ireq = inet_rsk(req);
2462
	long delta = req->rsk_timer.expires - jiffies;
L
Linus Torvalds 已提交
2463

2464
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2465
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
L
Linus Torvalds 已提交
2466
		i,
2467
		ireq->ir_loc_addr,
2468
		ireq->ir_num,
2469 2470
		ireq->ir_rmt_addr,
		ntohs(ireq->ir_rmt_port),
L
Linus Torvalds 已提交
2471 2472 2473
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
2474
		jiffies_delta_to_clock_t(delta),
2475
		req->num_timeout,
E
Eric Dumazet 已提交
2476 2477
		from_kuid_munged(seq_user_ns(f),
				 sock_i_uid(req->rsk_listener)),
L
Linus Torvalds 已提交
2478 2479
		0,  /* non standard timer */
		0, /* open_requests have no inode */
2480
		0,
2481
		req);
L
Linus Torvalds 已提交
2482 2483
}

2484
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
L
Linus Torvalds 已提交
2485 2486 2487
{
	int timer_active;
	unsigned long timer_expires;
2488
	const struct tcp_sock *tp = tcp_sk(sk);
2489
	const struct inet_connection_sock *icsk = inet_csk(sk);
2490
	const struct inet_sock *inet = inet_sk(sk);
2491
	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
E
Eric Dumazet 已提交
2492 2493 2494 2495
	__be32 dest = inet->inet_daddr;
	__be32 src = inet->inet_rcv_saddr;
	__u16 destp = ntohs(inet->inet_dport);
	__u16 srcp = ntohs(inet->inet_sport);
2496
	int rx_queue;
2497
	int state;
L
Linus Torvalds 已提交
2498

N
Nandita Dukkipati 已提交
2499
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2500
	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
N
Nandita Dukkipati 已提交
2501
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2502
		timer_active	= 1;
2503 2504
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2505
		timer_active	= 4;
2506
		timer_expires	= icsk->icsk_timeout;
2507
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2508
		timer_active	= 2;
2509
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2510 2511 2512 2513 2514
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2515
	state = inet_sk_state_load(sk);
2516
	if (state == TCP_LISTEN)
2517
		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2518
	else
2519 2520
		/* Because we don't lock the socket,
		 * we might find a transient negative value.
2521
		 */
2522
		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2523
				      READ_ONCE(tp->copied_seq), 0);
2524

2525
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2526
			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2527
		i, src, srcp, dest, destp, state,
2528
		READ_ONCE(tp->write_seq) - tp->snd_una,
2529
		rx_queue,
L
Linus Torvalds 已提交
2530
		timer_active,
2531
		jiffies_delta_to_clock_t(timer_expires - jiffies),
2532
		icsk->icsk_retransmits,
2533
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2534
		icsk->icsk_probes_out,
2535
		sock_i_ino(sk),
2536
		refcount_read(&sk->sk_refcnt), sk,
2537 2538
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
W
Wei Wang 已提交
2539
		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
L
Linus Torvalds 已提交
2540
		tp->snd_cwnd,
2541 2542
		state == TCP_LISTEN ?
		    fastopenq->max_qlen :
2543
		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
L
Linus Torvalds 已提交
2544 2545
}

2546
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2547
			       struct seq_file *f, int i)
L
Linus Torvalds 已提交
2548
{
2549
	long delta = tw->tw_timer.expires - jiffies;
2550
	__be32 dest, src;
L
Linus Torvalds 已提交
2551 2552 2553 2554 2555 2556 2557
	__u16 destp, srcp;

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2558
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2559
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
L
Linus Torvalds 已提交
2560
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2561
		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2562
		refcount_read(&tw->tw_refcnt), tw);
L
Linus Torvalds 已提交
2563 2564 2565 2566 2567 2568
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2569
	struct tcp_iter_state *st;
E
Eric Dumazet 已提交
2570
	struct sock *sk = v;
L
Linus Torvalds 已提交
2571

2572
	seq_setwidth(seq, TMPSZ - 1);
L
Linus Torvalds 已提交
2573
	if (v == SEQ_START_TOKEN) {
2574
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
L
Linus Torvalds 已提交
2575 2576 2577 2578 2579 2580
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

2581 2582 2583
	if (sk->sk_state == TCP_TIME_WAIT)
		get_timewait4_sock(v, seq, st->num);
	else if (sk->sk_state == TCP_NEW_SYN_RECV)
E
Eric Dumazet 已提交
2584
		get_openreq4(v, seq, st->num);
2585 2586
	else
		get_tcp4_sock(v, seq, st->num);
L
Linus Torvalds 已提交
2587
out:
2588
	seq_pad(seq, '\n');
L
Linus Torvalds 已提交
2589 2590 2591
	return 0;
}

2592 2593 2594 2595 2596 2597 2598
static const struct seq_operations tcp4_seq_ops = {
	.show		= tcp4_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= tcp_seq_stop,
};

L
Linus Torvalds 已提交
2599 2600 2601 2602
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.family		= AF_INET,
};

2603
static int __net_init tcp4_proc_init_net(struct net *net)
2604
{
2605 2606
	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2607 2608
		return -ENOMEM;
	return 0;
2609 2610
}

2611
static void __net_exit tcp4_proc_exit_net(struct net *net)
2612
{
2613
	remove_proc_entry("tcp", net->proc_net);
2614 2615 2616 2617 2618 2619 2620
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2621 2622
int __init tcp4_proc_init(void)
{
2623
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2624 2625 2626 2627
}

void tcp4_proc_exit(void)
{
2628
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2629 2630 2631 2632 2633 2634 2635
}
#endif /* CONFIG_PROC_FS */

struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
A
Andrey Ignatov 已提交
2636
	.pre_connect		= tcp_v4_pre_connect,
L
Linus Torvalds 已提交
2637 2638
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2639
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2640 2641 2642 2643 2644 2645
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
2646
	.keepalive		= tcp_set_keepalive,
L
Linus Torvalds 已提交
2647
	.recvmsg		= tcp_recvmsg,
2648 2649
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2650
	.backlog_rcv		= tcp_v4_do_rcv,
E
Eric Dumazet 已提交
2651
	.release_cb		= tcp_release_cb,
2652 2653 2654
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2655
	.enter_memory_pressure	= tcp_enter_memory_pressure,
2656
	.leave_memory_pressure	= tcp_leave_memory_pressure,
2657
	.stream_memory_free	= tcp_stream_memory_free,
L
Linus Torvalds 已提交
2658
	.sockets_allocated	= &tcp_sockets_allocated,
2659
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2660 2661
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2662
	.sysctl_mem		= sysctl_tcp_mem,
2663 2664
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
L
Linus Torvalds 已提交
2665 2666
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2667
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2668
	.twsk_prot		= &tcp_timewait_sock_ops,
2669
	.rsk_prot		= &tcp_request_sock_ops,
2670
	.h.hashinfo		= &tcp_hashinfo,
2671
	.no_autobind		= true,
2672 2673 2674
#ifdef CONFIG_COMPAT
	.compat_setsockopt	= compat_tcp_setsockopt,
	.compat_getsockopt	= compat_tcp_getsockopt,
G
Glauber Costa 已提交
2675
#endif
2676
	.diag_destroy		= tcp_abort,
L
Linus Torvalds 已提交
2677
};
E
Eric Dumazet 已提交
2678
EXPORT_SYMBOL(tcp_prot);
L
Linus Torvalds 已提交
2679

2680 2681 2682 2683
static void __net_exit tcp_sk_exit(struct net *net)
{
	int cpu;

2684
	if (net->ipv4.tcp_congestion_control)
2685 2686
		bpf_module_put(net->ipv4.tcp_congestion_control,
			       net->ipv4.tcp_congestion_control->owner);
2687

2688 2689 2690 2691 2692
	for_each_possible_cpu(cpu)
		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
	free_percpu(net->ipv4.tcp_sk);
}

2693 2694
static int __net_init tcp_sk_init(struct net *net)
{
2695
	int res, cpu, cnt;
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707

	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
	if (!net->ipv4.tcp_sk)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		struct sock *sk;

		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
					   IPPROTO_TCP, net);
		if (res)
			goto fail;
2708
		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2709 2710 2711 2712 2713 2714

		/* Please enforce IP_DF and IPID==0 for RST and
		 * ACK sent in SYN-RECV and TIME-WAIT state.
		 */
		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;

2715 2716
		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
	}
2717

2718
	net->ipv4.sysctl_tcp_ecn = 2;
2719 2720
	net->ipv4.sysctl_tcp_ecn_fallback = 1;

F
Fan Du 已提交
2721
	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
E
Eric Dumazet 已提交
2722
	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2723
	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2724
	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2725
	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2726

2727
	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2728
	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2729
	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2730

2731
	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2732
	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2733
	net->ipv4.sysctl_tcp_syncookies = 1;
2734
	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2735
	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2736
	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2737
	net->ipv4.sysctl_tcp_orphan_retries = 0;
2738
	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2739
	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2740
	net->ipv4.sysctl_tcp_tw_reuse = 2;
2741
	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2742

2743
	cnt = tcp_hashinfo.ehash_mask + 1;
2744
	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2745 2746
	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;

2747
	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
E
Eric Dumazet 已提交
2748
	net->ipv4.sysctl_tcp_sack = 1;
2749
	net->ipv4.sysctl_tcp_window_scaling = 1;
2750
	net->ipv4.sysctl_tcp_timestamps = 1;
2751
	net->ipv4.sysctl_tcp_early_retrans = 3;
2752
	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2753
	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2754
	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2755
	net->ipv4.sysctl_tcp_max_reordering = 300;
E
Eric Dumazet 已提交
2756
	net->ipv4.sysctl_tcp_dsack = 1;
2757
	net->ipv4.sysctl_tcp_app_win = 31;
2758
	net->ipv4.sysctl_tcp_adv_win_scale = 1;
E
Eric Dumazet 已提交
2759
	net->ipv4.sysctl_tcp_frto = 2;
2760
	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2761 2762 2763 2764 2765
	/* This limits the percentage of the congestion window which we
	 * will allow a single TSO frame to consume.  Building TSO frames
	 * which are too large can cause TCP streams to be bursty.
	 */
	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2766 2767
	/* Default TSQ limit of 16 TSO segments */
	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2768 2769
	/* rfc5961 challenge ack rate limiting */
	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2770
	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2771
	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2772
	net->ipv4.sysctl_tcp_autocorking = 1;
2773
	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2774
	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2775
	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2776 2777 2778 2779 2780 2781 2782 2783
	if (net != &init_net) {
		memcpy(net->ipv4.sysctl_tcp_rmem,
		       init_net.ipv4.sysctl_tcp_rmem,
		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
		memcpy(net->ipv4.sysctl_tcp_wmem,
		       init_net.ipv4.sysctl_tcp_wmem,
		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
	}
2784
	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2785
	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
E
Eric Dumazet 已提交
2786
	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2787
	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2788
	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2789 2790
	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2791

2792 2793
	/* Reno is always built in */
	if (!net_eq(net, &init_net) &&
2794 2795
	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
			       init_net.ipv4.tcp_congestion_control->owner))
2796 2797 2798 2799
		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
	else
		net->ipv4.tcp_congestion_control = &tcp_reno;

2800
	return 0;
2801 2802 2803 2804
fail:
	tcp_sk_exit(net);

	return res;
E
Eric W. Biederman 已提交
2805 2806 2807 2808
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
2809 2810
	struct net *net;

2811
	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2812 2813 2814

	list_for_each_entry(net, net_exit_list, exit_list)
		tcp_fastopen_ctx_destroy(net);
2815 2816 2817
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
E
Eric W. Biederman 已提交
2818 2819 2820
       .init	   = tcp_sk_init,
       .exit	   = tcp_sk_exit,
       .exit_batch = tcp_sk_exit_batch,
2821 2822
};

2823
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2824
{
2825
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2826 2827
		panic("Failed to create the TCP control socket.\n");
}