tcp_ipv4.c 74.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
32
 *					request_sock handling and moved
L
Linus Torvalds 已提交
33 34
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
35
 *					Added new listen semantics.
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

48
#define pr_fmt(fmt) "TCP: " fmt
L
Linus Torvalds 已提交
49

H
Herbert Xu 已提交
50
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
59
#include <linux/slab.h>
L
Linus Torvalds 已提交
60

61
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
62
#include <net/icmp.h>
63
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
64
#include <net/tcp.h>
65
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
66 67
#include <net/ipv6.h>
#include <net/inet_common.h>
68
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
69
#include <net/xfrm.h>
70
#include <net/secure_seq.h>
71
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
72 73 74 75 76 77

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
78
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
79

H
Herbert Xu 已提交
80
#include <crypto/hash.h>
81 82
#include <linux/scatterlist.h>

83 84
#include <trace/events/tcp.h>

85
#ifdef CONFIG_TCP_MD5SIG
E
Eric Dumazet 已提交
86
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
87
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
88 89
#endif

90
struct inet_hashinfo tcp_hashinfo;
E
Eric Dumazet 已提交
91
EXPORT_SYMBOL(tcp_hashinfo);
L
Linus Torvalds 已提交
92

93
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
L
Linus Torvalds 已提交
94
{
95 96 97 98 99 100
	return secure_tcp_seq(ip_hdr(skb)->daddr,
			      ip_hdr(skb)->saddr,
			      tcp_hdr(skb)->dest,
			      tcp_hdr(skb)->source);
}

101
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
102
{
103
	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
L
Linus Torvalds 已提交
104 105
}

106 107
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
108
	const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 110
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);
111 112 113 114 115 116 117 118 119 120 121 122 123
	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;

	if (reuse == 2) {
		/* Still does not detect *everything* that goes through
		 * lo, since we require a loopback src or dst address
		 * or direct binding to 'lo' interface.
		 */
		bool loopback = false;
		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
			loopback = true;
#if IS_ENABLED(CONFIG_IPV6)
		if (tw->tw_family == AF_INET6) {
			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
125
			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
126
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
127 128 129 130 131 132 133 134 135 136 137
				loopback = true;
		} else
#endif
		{
			if (ipv4_is_loopback(tw->tw_daddr) ||
			    ipv4_is_loopback(tw->tw_rcv_saddr))
				loopback = true;
		}
		if (!loopback)
			reuse = 0;
	}
138 139 140 141 142 143 144 145 146 147 148 149 150

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
151 152
	    (!twp || (reuse && time_after32(ktime_get_seconds(),
					    tcptw->tw_ts_recent_stamp)))) {
153 154 155 156 157 158 159 160 161 162 163 164
		/* In case of repair and re-using TIME-WAIT sockets we still
		 * want to be sure that it is safe as above but honor the
		 * sequence numbers and time stamps set as part of the repair
		 * process.
		 *
		 * Without this check re-using a TIME-WAIT socket with TCP
		 * repair would accumulate a -1 on the repair assigned
		 * sequence number. The first time it is reused the sequence
		 * is -1, the second time -2, etc. This fixes that issue
		 * without appearing to create any others.
		 */
		if (likely(!tp->repair)) {
165 166 167 168 169
			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;

			if (!seq)
				seq = 1;
			WRITE_ONCE(tp->write_seq, seq);
170 171 172
			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		}
173 174 175 176 177 178 179 180
		sock_hold(sktw);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);

A
Andrey Ignatov 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{
	/* This check is replicated from tcp_v4_connect() and intended to
	 * prevent BPF program called below from accessing bytes that are out
	 * of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	sock_owned_by_me(sk);

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
}

L
Linus Torvalds 已提交
196 197 198
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
199
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
L
Linus Torvalds 已提交
200 201
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
202
	__be16 orig_sport, orig_dport;
203
	__be32 daddr, nexthop;
204
	struct flowi4 *fl4;
205
	struct rtable *rt;
L
Linus Torvalds 已提交
206
	int err;
207
	struct ip_options_rcu *inet_opt;
208
	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
217
	inet_opt = rcu_dereference_protected(inet->inet_opt,
218
					     lockdep_sock_is_held(sk));
219
	if (inet_opt && inet_opt->opt.srr) {
L
Linus Torvalds 已提交
220 221
		if (!daddr)
			return -EINVAL;
222
		nexthop = inet_opt->opt.faddr;
L
Linus Torvalds 已提交
223 224
	}

225 226
	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
227 228
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
229 230
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_TCP,
231
			      orig_sport, orig_dport, sk);
232 233 234
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
235
			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
236
		return err;
237
	}
L
Linus Torvalds 已提交
238 239 240 241 242 243

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

244
	if (!inet_opt || !inet_opt->opt.srr)
245
		daddr = fl4->daddr;
L
Linus Torvalds 已提交
246

E
Eric Dumazet 已提交
247
	if (!inet->inet_saddr)
248
		inet->inet_saddr = fl4->saddr;
249
	sk_rcv_saddr_set(sk, inet->inet_saddr);
L
Linus Torvalds 已提交
250

E
Eric Dumazet 已提交
251
	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
L
Linus Torvalds 已提交
252 253 254
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
P
Pavel Emelyanov 已提交
255
		if (likely(!tp->repair))
256
			WRITE_ONCE(tp->write_seq, 0);
L
Linus Torvalds 已提交
257 258
	}

E
Eric Dumazet 已提交
259
	inet->inet_dport = usin->sin_port;
260
	sk_daddr_set(sk, daddr);
L
Linus Torvalds 已提交
261

262
	inet_csk(sk)->icsk_ext_hdr_len = 0;
263 264
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
L
Linus Torvalds 已提交
265

266
	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
L
Linus Torvalds 已提交
267 268 269 270 271 272 273

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
274
	err = inet_hash_connect(tcp_death_row, sk);
L
Linus Torvalds 已提交
275 276 277
	if (err)
		goto failure;

278
	sk_set_txhash(sk);
279

280
	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
281 282 283 284
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
L
Linus Torvalds 已提交
285
		goto failure;
286
	}
L
Linus Torvalds 已提交
287
	/* OK, now commit destination to socket.  */
288
	sk->sk_gso_type = SKB_GSO_TCPV4;
289
	sk_setup_caps(sk, &rt->dst);
W
Wei Wang 已提交
290
	rt = NULL;
L
Linus Torvalds 已提交
291

292 293
	if (likely(!tp->repair)) {
		if (!tp->write_seq)
294 295 296 297 298
			WRITE_ONCE(tp->write_seq,
				   secure_tcp_seq(inet->inet_saddr,
						  inet->inet_daddr,
						  inet->inet_sport,
						  usin->sin_port));
299 300
		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
						 inet->inet_saddr,
301
						 inet->inet_daddr);
302
	}
L
Linus Torvalds 已提交
303

304
	inet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
305

W
Wei Wang 已提交
306 307 308 309 310
	if (tcp_fastopen_defer_connect(sk, &err))
		return err;
	if (err)
		goto failure;

A
Andrey Vagin 已提交
311
	err = tcp_connect(sk);
P
Pavel Emelyanov 已提交
312

L
Linus Torvalds 已提交
313 314 315 316 317 318
	if (err)
		goto failure;

	return 0;

failure:
319 320 321 322
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
323 324 325
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
E
Eric Dumazet 已提交
326
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
327 328
	return err;
}
E
Eric Dumazet 已提交
329
EXPORT_SYMBOL(tcp_v4_connect);
L
Linus Torvalds 已提交
330 331

/*
332 333 334
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
L
Linus Torvalds 已提交
335
 */
336
void tcp_v4_mtu_reduced(struct sock *sk)
L
Linus Torvalds 已提交
337 338
{
	struct inet_sock *inet = inet_sk(sk);
339 340
	struct dst_entry *dst;
	u32 mtu;
L
Linus Torvalds 已提交
341

342 343 344
	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
		return;
	mtu = tcp_sk(sk)->mtu_info;
345 346
	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355 356 357
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
358
	    ip_sk_accept_pmtu(sk) &&
359
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}
370
EXPORT_SYMBOL(tcp_v4_mtu_reduced);
L
Linus Torvalds 已提交
371

372 373 374 375
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);

376
	if (dst)
377
		dst->ops->redirect(dst, sk, skb);
378 379
}

380 381

/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
382
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
383 384 385 386 387 388 389 390
{
	struct request_sock *req = inet_reqsk(sk);
	struct net *net = sock_net(sk);

	/* ICMPs are not backlogged, hence we cannot get
	 * an established socket here.
	 */
	if (seq != tcp_rsk(req)->snt_isn) {
391
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
392
	} else if (abort) {
393 394 395 396 397 398
		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
F
Fan Du 已提交
399
		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
400
		tcp_listendrop(req->rsk_listener);
401
	}
402
	reqsk_put(req);
403 404 405
}
EXPORT_SYMBOL(tcp_req_err);

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
/* TCP-LD (RFC 6069) logic */
static void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	s32 remaining;
	u32 delta_us;

	if (sock_owned_by_user(sk))
		return;

	if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
	    !icsk->icsk_backoff)
		return;

	skb = tcp_rtx_queue_head(sk);
	if (WARN_ON_ONCE(!skb))
		return;

	icsk->icsk_backoff--;
	icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
	icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);

	tcp_mstamp_refresh(tp);
	delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
	remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);

	if (remaining > 0) {
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
					  remaining, TCP_RTO_MAX);
	} else {
		/* RTO revert clocked out retransmission.
		 * Will retransmit now.
		 */
		tcp_retransmit_timer(sk);
	}
}

L
Linus Torvalds 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

461
int tcp_v4_err(struct sk_buff *skb, u32 info)
L
Linus Torvalds 已提交
462
{
463 464
	const struct iphdr *iph = (const struct iphdr *)skb->data;
	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
L
Linus Torvalds 已提交
465 466
	struct tcp_sock *tp;
	struct inet_sock *inet;
467 468
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
L
Linus Torvalds 已提交
469
	struct sock *sk;
470
	struct request_sock *fastopen;
471
	u32 seq, snd_una;
L
Linus Torvalds 已提交
472
	int err;
473
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
474

475 476
	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
				       th->dest, iph->saddr, ntohs(th->source),
477
				       inet_iif(skb), 0);
L
Linus Torvalds 已提交
478
	if (!sk) {
E
Eric Dumazet 已提交
479
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
480
		return -ENOENT;
L
Linus Torvalds 已提交
481 482
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
483
		inet_twsk_put(inet_twsk(sk));
484
		return 0;
L
Linus Torvalds 已提交
485
	}
486
	seq = ntohl(th->seq);
487 488 489 490 491 492 493 494
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
				     type == ICMP_TIME_EXCEEDED ||
				     (type == ICMP_DEST_UNREACH &&
				      (code == ICMP_NET_UNREACH ||
				       code == ICMP_HOST_UNREACH)));
		return 0;
	}
L
Linus Torvalds 已提交
495 496 497 498

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
499 500
	 * We do take care of PMTU discovery (RFC1191) special case :
	 * we can receive locally generated ICMP messages while socket is held.
L
Linus Torvalds 已提交
501
	 */
502 503
	if (sock_owned_by_user(sk)) {
		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
504
			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
505
	}
L
Linus Torvalds 已提交
506 507 508
	if (sk->sk_state == TCP_CLOSE)
		goto out;

509
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
510
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
511 512 513
		goto out;
	}

L
Linus Torvalds 已提交
514
	tp = tcp_sk(sk);
515
	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
516
	fastopen = rcu_dereference(tp->fastopen_rsk);
517
	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
L
Linus Torvalds 已提交
518
	if (sk->sk_state != TCP_LISTEN &&
519
	    !between(seq, snd_una, tp->snd_nxt)) {
520
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
521 522 523 524
		goto out;
	}

	switch (type) {
525
	case ICMP_REDIRECT:
526
		if (!sock_owned_by_user(sk))
527
			do_redirect(skb, sk);
528
		goto out;
L
Linus Torvalds 已提交
529 530 531 532 533 534 535 536 537 538 539
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
540 541 542 543 544 545 546
			/* We are not interested in TCP_LISTEN and open_requests
			 * (SYN-ACKs send out by Linux are always <576bytes so
			 * they should go through unfragmented).
			 */
			if (sk->sk_state == TCP_LISTEN)
				goto out;

547
			tp->mtu_info = info;
548
			if (!sock_owned_by_user(sk)) {
549
				tcp_v4_mtu_reduced(sk);
550
			} else {
551
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
552 553
					sock_hold(sk);
			}
L
Linus Torvalds 已提交
554 555 556 557
			goto out;
		}

		err = icmp_err_convert[code].errno;
558 559 560 561 562 563
		/* check if this ICMP message allows revert of backoff.
		 * (see RFC 6069)
		 */
		if (!fastopen &&
		    (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
			tcp_ld_RTO_revert(sk, seq);
L
Linus Torvalds 已提交
564 565 566 567 568 569 570 571 572 573
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
	case TCP_SYN_SENT:
574 575 576 577
	case TCP_SYN_RECV:
		/* Only in fast or simultaneous open. If a fast open socket is
		 * is already accepted it is treated as a connected one below.
		 */
578
		if (fastopen && !fastopen->sk)
579 580
			break;

581
		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
582

L
Linus Torvalds 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
622
	return 0;
L
Linus Torvalds 已提交
623 624
}

625
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
626
{
627
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
628

629 630 631
	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
	skb->csum_start = skb_transport_header(skb) - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
632 633
}

634
/* This routine computes an IPv4 TCP checksum. */
635
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
636
{
637
	const struct inet_sock *inet = inet_sk(sk);
638 639 640

	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
E
Eric Dumazet 已提交
641
EXPORT_SYMBOL(tcp_v4_send_check);
642

L
Linus Torvalds 已提交
643 644 645 646 647 648 649 650 651 652 653 654 655
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

656
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
657
{
658
	const struct tcphdr *th = tcp_hdr(skb);
659 660 661
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
662
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
663 664
#endif
	} rep;
L
Linus Torvalds 已提交
665
	struct ip_reply_arg arg;
666
#ifdef CONFIG_TCP_MD5SIG
667
	struct tcp_md5sig_key *key = NULL;
668 669 670 671
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
672
#endif
673
	u64 transmit_time = 0;
J
Jon Maxwell 已提交
674
	struct sock *ctl_sk;
675
	struct net *net;
L
Linus Torvalds 已提交
676 677 678 679 680

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

681 682 683 684
	/* If sk not NULL, it means we did a successful lookup and incoming
	 * route had to be correct. prequeue might have dropped our dst.
	 */
	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
685 686 687
		return;

	/* Swap the send and the receive. */
688 689 690 691 692
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
693 694

	if (th->ack) {
695
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
696
	} else {
697 698 699
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
700 701
	}

702
	memset(&arg, 0, sizeof(arg));
703 704 705
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

706
	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
707
#ifdef CONFIG_TCP_MD5SIG
708
	rcu_read_lock();
709
	hash_location = tcp_parse_md5sig_option(th);
710
	if (sk && sk_fullsock(sk)) {
711
		const union tcp_md5_addr *addr;
712
		int l3index;
713

714 715 716 717
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and inet_iif is set to it.
		 */
		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
718
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
719
		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
720
	} else if (hash_location) {
721
		const union tcp_md5_addr *addr;
722 723
		int sdif = tcp_v4_sdif(skb);
		int dif = inet_iif(skb);
724
		int l3index;
725

726 727 728 729 730 731 732
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
733 734
		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
					     ip_hdr(skb)->saddr,
735
					     th->source, ip_hdr(skb)->daddr,
736
					     ntohs(th->source), dif, sdif);
737 738
		/* don't send rst if it can't find key */
		if (!sk1)
739 740
			goto out;

741 742 743 744
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and dif is set to it.
		 */
		l3index = sdif ? dif : 0;
745
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
746
		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
747
		if (!key)
748 749
			goto out;

750

751
		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
752
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
753 754
			goto out;

755 756
	}

757 758 759 760 761 762 763 764 765
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

766
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
767 768
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
769 770
	}
#endif
771 772
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
773
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
774
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
775 776
	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;

777
	/* When socket is gone, all binding information is lost.
A
Alexey Kuznetsov 已提交
778 779
	 * routing might fail in this case. No choice here, if we choose to force
	 * input interface, we will misroute in case of asymmetric route.
780
	 */
781
	if (sk) {
A
Alexey Kuznetsov 已提交
782
		arg.bound_dev_if = sk->sk_bound_dev_if;
783 784
		if (sk_fullsock(sk))
			trace_tcp_send_reset(sk, skb);
785
	}
L
Linus Torvalds 已提交
786

787 788 789
	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));

790
	arg.tos = ip_hdr(skb)->tos;
791
	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
792
	local_bh_disable();
793
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
794
	if (sk) {
J
Jon Maxwell 已提交
795 796
		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_mark : sk->sk_mark;
797 798
		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_priority : sk->sk_priority;
799
		transmit_time = tcp_transmit_time(sk);
800
	}
J
Jon Maxwell 已提交
801
	ip_send_unicast_reply(ctl_sk,
802
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
803
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
804 805
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
806

J
Jon Maxwell 已提交
807
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
808 809
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
810
	local_bh_enable();
811 812

#ifdef CONFIG_TCP_MD5SIG
813 814
out:
	rcu_read_unlock();
815
#endif
L
Linus Torvalds 已提交
816 817 818 819 820 821
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

822
static void tcp_v4_send_ack(const struct sock *sk,
823
			    struct sk_buff *skb, u32 seq, u32 ack,
824
			    u32 win, u32 tsval, u32 tsecr, int oif,
825
			    struct tcp_md5sig_key *key,
826
			    int reply_flags, u8 tos)
L
Linus Torvalds 已提交
827
{
828
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
829 830
	struct {
		struct tcphdr th;
831
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
832
#ifdef CONFIG_TCP_MD5SIG
833
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
834 835
#endif
			];
L
Linus Torvalds 已提交
836
	} rep;
837
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
838
	struct ip_reply_arg arg;
J
Jon Maxwell 已提交
839
	struct sock *ctl_sk;
840
	u64 transmit_time;
L
Linus Torvalds 已提交
841 842

	memset(&rep.th, 0, sizeof(struct tcphdr));
843
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
844 845 846

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
847
	if (tsecr) {
848 849 850
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
851 852
		rep.opt[1] = htonl(tsval);
		rep.opt[2] = htonl(tsecr);
853
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
854 855 856 857 858 859 860 861 862 863 864
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

865 866
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
867
		int offset = (tsecr) ? 3 : 0;
868 869 870 871 872 873 874 875

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

876
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
877 878
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
879 880
	}
#endif
881
	arg.flags = reply_flags;
882 883
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
884 885
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
886 887
	if (oif)
		arg.bound_dev_if = oif;
888
	arg.tos = tos;
889
	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
890
	local_bh_disable();
891
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
892 893
	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_mark : sk->sk_mark;
894 895
	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_priority : sk->sk_priority;
896
	transmit_time = tcp_transmit_time(sk);
J
Jon Maxwell 已提交
897
	ip_send_unicast_reply(ctl_sk,
898
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
899
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
900 901
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
902

J
Jon Maxwell 已提交
903
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
904
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
905
	local_bh_enable();
L
Linus Torvalds 已提交
906 907 908 909
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
910
	struct inet_timewait_sock *tw = inet_twsk(sk);
911
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
912

913
	tcp_v4_send_ack(sk, skb,
914
			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
915
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
916
			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
917 918
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
919
			tcp_twsk_md5_key(tcptw),
920 921
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
			tw->tw_tos
922
			);
L
Linus Torvalds 已提交
923

924
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
925 926
}

927
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
928
				  struct request_sock *req)
L
Linus Torvalds 已提交
929
{
930
	const union tcp_md5_addr *addr;
931
	int l3index;
932

933 934 935
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
936 937 938
	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
					     tcp_sk(sk)->snd_nxt;

939 940 941 942 943
	/* RFC 7323 2.3
	 * The window field (SEG.WND) of every outgoing segment, with the
	 * exception of <SYN> segments, MUST be right-shifted by
	 * Rcv.Wind.Shift bits:
	 */
944
	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
945
	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
946
	tcp_v4_send_ack(sk, skb, seq,
947 948
			tcp_rsk(req)->rcv_nxt,
			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
949
			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
950 951
			req->ts_recent,
			0,
952
			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
953 954
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			ip_hdr(skb)->tos);
L
Linus Torvalds 已提交
955 956 957
}

/*
958
 *	Send a SYN-ACK after having received a SYN.
959
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
960 961
 *	socket.
 */
962
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
963
			      struct flowi *fl,
964
			      struct request_sock *req,
965
			      struct tcp_fastopen_cookie *foc,
966
			      enum tcp_synack_type synack_type)
L
Linus Torvalds 已提交
967
{
968
	const struct inet_request_sock *ireq = inet_rsk(req);
969
	struct flowi4 fl4;
L
Linus Torvalds 已提交
970
	int err = -1;
971
	struct sk_buff *skb;
L
Linus Torvalds 已提交
972 973

	/* First, grab a route. */
974
	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
975
		return -1;
L
Linus Torvalds 已提交
976

977
	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
L
Linus Torvalds 已提交
978 979

	if (skb) {
980
		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
L
Linus Torvalds 已提交
981

982
		rcu_read_lock();
983 984
		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
					    ireq->ir_rmt_addr,
985 986
					    rcu_dereference(ireq->ireq_opt));
		rcu_read_unlock();
987
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
988 989 990 991 992 993
	}

	return err;
}

/*
994
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
995
 */
996
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
997
{
E
Eric Dumazet 已提交
998
	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
L
Linus Torvalds 已提交
999 1000
}

1001 1002 1003 1004 1005 1006 1007
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

1008
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1009 1010
EXPORT_SYMBOL(tcp_md5_needed);

1011
/* Find the Key structure for an address.  */
1012
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1013 1014
					   const union tcp_md5_addr *addr,
					   int family)
1015
{
1016
	const struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1017
	struct tcp_md5sig_key *key;
1018
	const struct tcp_md5sig_info *md5sig;
1019 1020 1021
	__be32 mask;
	struct tcp_md5sig_key *best_match = NULL;
	bool match;
1022

1023 1024
	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
1025
				       lockdep_sock_is_held(sk));
1026
	if (!md5sig)
1027
		return NULL;
A
Arnd Bergmann 已提交
1028

1029 1030
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
E
Eric Dumazet 已提交
1031 1032
		if (key->family != family)
			continue;
1033 1034
		if (key->l3index && key->l3index != l3index)
			continue;
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
		if (family == AF_INET) {
			mask = inet_make_mask(key->prefixlen);
			match = (key->addr.a4.s_addr & mask) ==
				(addr->a4.s_addr & mask);
#if IS_ENABLED(CONFIG_IPV6)
		} else if (family == AF_INET6) {
			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
						  key->prefixlen);
#endif
		} else {
			match = false;
		}

		if (match && (!best_match ||
			      key->prefixlen > best_match->prefixlen))
			best_match = key;
	}
	return best_match;
}
1054
EXPORT_SYMBOL(__tcp_md5_do_lookup);
1055

1056 1057
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
						      const union tcp_md5_addr *addr,
1058 1059
						      int family, u8 prefixlen,
						      int l3index)
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
{
	const struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_md5sig_key *key;
	unsigned int size = sizeof(struct in_addr);
	const struct tcp_md5sig_info *md5sig;

	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
				       lockdep_sock_is_held(sk));
	if (!md5sig)
		return NULL;
#if IS_ENABLED(CONFIG_IPV6)
	if (family == AF_INET6)
		size = sizeof(struct in6_addr);
#endif
1075 1076
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
1077 1078
		if (key->family != family)
			continue;
1079 1080
		if (key->l3index && key->l3index != l3index)
			continue;
1081 1082
		if (!memcmp(&key->addr, addr, size) &&
		    key->prefixlen == prefixlen)
E
Eric Dumazet 已提交
1083
			return key;
1084 1085 1086 1087
	}
	return NULL;
}

1088
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1089
					 const struct sock *addr_sk)
1090
{
1091
	const union tcp_md5_addr *addr;
1092
	int l3index;
E
Eric Dumazet 已提交
1093

1094 1095
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
						 addr_sk->sk_bound_dev_if);
1096
	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1097
	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1098 1099 1100 1101
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);

/* This can be called on a newly created socket, from other files */
E
Eric Dumazet 已提交
1102
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1103 1104
		   int family, u8 prefixlen, int l3index,
		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1105 1106
{
	/* Add Key to the list */
1107
	struct tcp_md5sig_key *key;
1108
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1109
	struct tcp_md5sig_info *md5sig;
1110

1111
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1112 1113
	if (key) {
		/* Pre-existing entry - just update that one. */
E
Eric Dumazet 已提交
1114
		memcpy(key->key, newkey, newkeylen);
1115
		key->keylen = newkeylen;
E
Eric Dumazet 已提交
1116 1117
		return 0;
	}
1118

1119
	md5sig = rcu_dereference_protected(tp->md5sig_info,
1120
					   lockdep_sock_is_held(sk));
E
Eric Dumazet 已提交
1121 1122 1123
	if (!md5sig) {
		md5sig = kmalloc(sizeof(*md5sig), gfp);
		if (!md5sig)
1124 1125
			return -ENOMEM;

E
Eric Dumazet 已提交
1126 1127
		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		INIT_HLIST_HEAD(&md5sig->head);
1128
		rcu_assign_pointer(tp->md5sig_info, md5sig);
E
Eric Dumazet 已提交
1129
	}
1130

1131
	key = sock_kmalloc(sk, sizeof(*key), gfp);
E
Eric Dumazet 已提交
1132 1133
	if (!key)
		return -ENOMEM;
1134
	if (!tcp_alloc_md5sig_pool()) {
1135
		sock_kfree_s(sk, key, sizeof(*key));
E
Eric Dumazet 已提交
1136
		return -ENOMEM;
1137
	}
E
Eric Dumazet 已提交
1138 1139 1140 1141

	memcpy(key->key, newkey, newkeylen);
	key->keylen = newkeylen;
	key->family = family;
1142
	key->prefixlen = prefixlen;
1143
	key->l3index = l3index;
E
Eric Dumazet 已提交
1144 1145 1146 1147
	memcpy(&key->addr, addr,
	       (family == AF_INET6) ? sizeof(struct in6_addr) :
				      sizeof(struct in_addr));
	hlist_add_head_rcu(&key->node, &md5sig->head);
1148 1149
	return 0;
}
E
Eric Dumazet 已提交
1150
EXPORT_SYMBOL(tcp_md5_do_add);
1151

1152
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1153
		   u8 prefixlen, int l3index)
1154
{
E
Eric Dumazet 已提交
1155 1156
	struct tcp_md5sig_key *key;

1157
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
E
Eric Dumazet 已提交
1158 1159 1160
	if (!key)
		return -ENOENT;
	hlist_del_rcu(&key->node);
1161
	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1162 1163
	kfree_rcu(key, rcu);
	return 0;
1164
}
E
Eric Dumazet 已提交
1165
EXPORT_SYMBOL(tcp_md5_do_del);
1166

1167
static void tcp_clear_md5_list(struct sock *sk)
1168 1169
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1170
	struct tcp_md5sig_key *key;
1171
	struct hlist_node *n;
1172
	struct tcp_md5sig_info *md5sig;
1173

1174 1175
	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);

1176
	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
E
Eric Dumazet 已提交
1177
		hlist_del_rcu(&key->node);
1178
		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1179
		kfree_rcu(key, rcu);
1180 1181 1182
	}
}

1183 1184
static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
				 char __user *optval, int optlen)
1185 1186 1187
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1188
	const union tcp_md5_addr *addr;
1189
	u8 prefixlen = 32;
1190
	int l3index = 0;
1191 1192 1193 1194

	if (optlen < sizeof(cmd))
		return -EINVAL;

1195
	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1196 1197 1198 1199 1200
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

1201 1202 1203 1204 1205 1206 1207
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
		prefixlen = cmd.tcpm_prefixlen;
		if (prefixlen > 32)
			return -EINVAL;
	}

1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
		struct net_device *dev;

		rcu_read_lock();
		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
		if (dev && netif_is_l3_master(dev))
			l3index = dev->ifindex;

		rcu_read_unlock();

		/* ok to reference set/not set outside of rcu;
		 * right now device MUST be an L3 master
		 */
		if (!dev || !l3index)
			return -EINVAL;
	}

1226 1227
	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;

1228
	if (!cmd.tcpm_keylen)
1229
		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
1230 1231 1232 1233

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

1234
	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1235
			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1236 1237
}

1238 1239 1240
static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
				   __be32 daddr, __be32 saddr,
				   const struct tcphdr *th, int nbytes)
1241 1242
{
	struct tcp4_pseudohdr *bp;
1243
	struct scatterlist sg;
1244
	struct tcphdr *_th;
1245

1246
	bp = hp->scratch;
1247 1248 1249
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1250
	bp->protocol = IPPROTO_TCP;
1251
	bp->len = cpu_to_be16(nbytes);
1252

1253 1254 1255 1256 1257 1258 1259
	_th = (struct tcphdr *)(bp + 1);
	memcpy(_th, th, sizeof(*th));
	_th->check = 0;

	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
				sizeof(*bp) + sizeof(*th));
H
Herbert Xu 已提交
1260
	return crypto_ahash_update(hp->md5_req);
1261 1262
}

E
Eric Dumazet 已提交
1263
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1264
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1265 1266
{
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1267
	struct ahash_request *req;
1268 1269 1270 1271

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1272
	req = hp->md5_req;
1273

H
Herbert Xu 已提交
1274
	if (crypto_ahash_init(req))
1275
		goto clear_hash;
1276
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1277 1278 1279
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1280 1281
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1282 1283 1284 1285
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1286

1287 1288 1289 1290
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1291
	return 1;
1292 1293
}

1294 1295
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
			const struct sock *sk,
E
Eric Dumazet 已提交
1296
			const struct sk_buff *skb)
1297
{
1298
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1299
	struct ahash_request *req;
E
Eric Dumazet 已提交
1300
	const struct tcphdr *th = tcp_hdr(skb);
1301 1302
	__be32 saddr, daddr;

1303 1304 1305
	if (sk) { /* valid for establish/request sockets */
		saddr = sk->sk_rcv_saddr;
		daddr = sk->sk_daddr;
1306
	} else {
1307 1308 1309
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1310
	}
1311 1312 1313 1314

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1315
	req = hp->md5_req;
1316

H
Herbert Xu 已提交
1317
	if (crypto_ahash_init(req))
1318 1319
		goto clear_hash;

1320
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1321 1322 1323 1324 1325
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1326 1327
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1338
}
1339
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1340

1341 1342
#endif

1343
/* Called with rcu_read_lock() */
1344
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1345 1346
				    const struct sk_buff *skb,
				    int dif, int sdif)
1347
{
1348
#ifdef CONFIG_TCP_MD5SIG
1349 1350 1351 1352 1353 1354 1355 1356
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
1357
	const __u8 *hash_location = NULL;
1358
	struct tcp_md5sig_key *hash_expected;
1359
	const struct iphdr *iph = ip_hdr(skb);
1360
	const struct tcphdr *th = tcp_hdr(skb);
1361
	const union tcp_md5_addr *addr;
1362
	unsigned char newhash[16];
1363 1364 1365 1366 1367 1368
	int genhash, l3index;

	/* sdif set, means packet ingressed via a device
	 * in an L3 domain and dif is set to the l3mdev
	 */
	l3index = sdif ? dif : 0;
1369

1370
	addr = (union tcp_md5_addr *)&iph->saddr;
1371
	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1372
	hash_location = tcp_parse_md5sig_option(th);
1373 1374 1375

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
E
Eric Dumazet 已提交
1376
		return false;
1377 1378

	if (hash_expected && !hash_location) {
1379
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
E
Eric Dumazet 已提交
1380
		return true;
1381 1382 1383
	}

	if (!hash_expected && hash_location) {
1384
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
E
Eric Dumazet 已提交
1385
		return true;
1386 1387 1388 1389 1390
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1391 1392
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
1393
				      NULL, skb);
1394 1395

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1396
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1397
		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1398 1399 1400
				     &iph->saddr, ntohs(th->source),
				     &iph->daddr, ntohs(th->dest),
				     genhash ? " tcp_v4_calc_md5_hash failed"
1401
				     : "", l3index);
E
Eric Dumazet 已提交
1402
		return true;
1403
	}
E
Eric Dumazet 已提交
1404
	return false;
1405
#endif
1406 1407
	return false;
}
1408

1409 1410
static void tcp_v4_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
1411 1412 1413
			    struct sk_buff *skb)
{
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
1414
	struct net *net = sock_net(sk_listener);
1415

1416 1417
	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
E
Eric Dumazet 已提交
1418
	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1419 1420
}

1421 1422
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
					  struct flowi *fl,
1423
					  const struct request_sock *req)
1424
{
1425
	return inet_csk_route_req(sk, &fl->u.ip4, req);
1426 1427
}

1428
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1429
	.family		=	PF_INET,
1430
	.obj_size	=	sizeof(struct tcp_request_sock),
1431
	.rtx_syn_ack	=	tcp_rtx_synack,
1432 1433
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1434
	.send_reset	=	tcp_v4_send_reset,
S
stephen hemminger 已提交
1435
	.syn_ack_timeout =	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
1436 1437
};

1438
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1439
	.mss_clamp	=	TCP_MSS_DEFAULT,
1440
#ifdef CONFIG_TCP_MD5SIG
1441
	.req_md5_lookup	=	tcp_v4_md5_lookup,
1442
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1443
#endif
1444
	.init_req	=	tcp_v4_init_req,
1445 1446 1447
#ifdef CONFIG_SYN_COOKIES
	.cookie_init_seq =	cookie_v4_init_sequence,
#endif
1448
	.route_req	=	tcp_v4_route_req,
1449 1450
	.init_seq	=	tcp_v4_init_seq,
	.init_ts_off	=	tcp_v4_init_ts_off,
1451
	.send_synack	=	tcp_v4_send_synack,
1452
};
1453

L
Linus Torvalds 已提交
1454 1455 1456
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1457
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1458 1459
		goto drop;

O
Octavian Purdila 已提交
1460 1461
	return tcp_conn_request(&tcp_request_sock_ops,
				&tcp_request_sock_ipv4_ops, sk, skb);
L
Linus Torvalds 已提交
1462 1463

drop:
1464
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1465 1466
	return 0;
}
E
Eric Dumazet 已提交
1467
EXPORT_SYMBOL(tcp_v4_conn_request);
L
Linus Torvalds 已提交
1468 1469 1470 1471 1472 1473


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
1474
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1475
				  struct request_sock *req,
1476 1477 1478
				  struct dst_entry *dst,
				  struct request_sock *req_unhash,
				  bool *own_req)
L
Linus Torvalds 已提交
1479
{
1480
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1481 1482 1483
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1484
#ifdef CONFIG_TCP_MD5SIG
1485
	const union tcp_md5_addr *addr;
1486
	struct tcp_md5sig_key *key;
1487
	int l3index;
1488
#endif
1489
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
1490 1491 1492 1493 1494 1495

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
1496
		goto exit_nonewsk;
L
Linus Torvalds 已提交
1497

1498
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1499
	inet_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1500 1501 1502

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1503
	ireq		      = inet_rsk(req);
1504 1505
	sk_daddr_set(newsk, ireq->ir_rmt_addr);
	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1506
	newsk->sk_bound_dev_if = ireq->ir_iif;
E
Eric Dumazet 已提交
1507 1508 1509
	newinet->inet_saddr   = ireq->ir_loc_addr;
	inet_opt	      = rcu_dereference(ireq->ireq_opt);
	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1510
	newinet->mc_index     = inet_iif(skb);
1511
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1512
	newinet->rcv_tos      = ip_hdr(skb)->tos;
1513
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1514 1515
	if (inet_opt)
		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1516
	newinet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
1517

E
Eric Dumazet 已提交
1518 1519 1520 1521 1522 1523 1524
	if (!dst) {
		dst = inet_csk_route_child_sock(sk, newsk, req);
		if (!dst)
			goto put_and_exit;
	} else {
		/* syncookie case : see end of cookie_v4_check() */
	}
1525 1526
	sk_setup_caps(newsk, dst);

1527 1528
	tcp_ca_openreq_child(newsk, dst);

L
Linus Torvalds 已提交
1529
	tcp_sync_mss(newsk, dst_mtu(dst));
E
Eric Dumazet 已提交
1530
	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1531

L
Linus Torvalds 已提交
1532 1533
	tcp_initialize_rcv_mss(newsk);

1534
#ifdef CONFIG_TCP_MD5SIG
1535
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1536
	/* Copy over the MD5 key from the original socket */
1537
	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1538
	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1539
	if (key) {
1540 1541 1542 1543 1544 1545
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1546
		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1547
			       key->key, key->keylen, GFP_ATOMIC);
E
Eric Dumazet 已提交
1548
		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1549 1550 1551
	}
#endif

1552 1553
	if (__inet_inherit_port(sk, newsk) < 0)
		goto put_and_exit;
1554
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
E
Eric Dumazet 已提交
1555
	if (likely(*own_req)) {
1556
		tcp_move_syn(newtp, req);
E
Eric Dumazet 已提交
1557 1558 1559 1560
		ireq->ireq_opt = NULL;
	} else {
		newinet->inet_opt = NULL;
	}
L
Linus Torvalds 已提交
1561 1562 1563
	return newsk;

exit_overflow:
1564
	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1565 1566
exit_nonewsk:
	dst_release(dst);
L
Linus Torvalds 已提交
1567
exit:
1568
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1569
	return NULL;
1570
put_and_exit:
E
Eric Dumazet 已提交
1571
	newinet->inet_opt = NULL;
1572 1573
	inet_csk_prepare_forced_close(newsk);
	tcp_done(newsk);
1574
	goto exit;
L
Linus Torvalds 已提交
1575
}
E
Eric Dumazet 已提交
1576
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
L
Linus Torvalds 已提交
1577

1578
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1579
{
1580
#ifdef CONFIG_SYN_COOKIES
1581
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1582

1583
	if (!th->syn)
C
Cong Wang 已提交
1584
		sk = cookie_v4_check(sk, skb);
L
Linus Torvalds 已提交
1585 1586 1587 1588
#endif
	return sk;
}

1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
			 struct tcphdr *th, u32 *cookie)
{
	u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
				    &tcp_request_sock_ipv4_ops, sk, th);
	if (mss) {
		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
		tcp_synq_overflow(sk);
	}
#endif
	return mss;
}

L
Linus Torvalds 已提交
1604
/* The socket must have it's spinlock held when we get
1605
 * here, unless it is a TCP_LISTEN socket.
L
Linus Torvalds 已提交
1606 1607 1608 1609 1610 1611 1612 1613
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1614 1615
	struct sock *rsk;

L
Linus Torvalds 已提交
1616
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1617 1618
		struct dst_entry *dst = sk->sk_rx_dst;

1619
		sock_rps_save_rxhash(sk, skb);
1620
		sk_mark_napi_id(sk, skb);
1621
		if (dst) {
E
Eric Dumazet 已提交
1622
			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1623
			    !dst->ops->check(dst, 0)) {
1624 1625 1626 1627
				dst_release(dst);
				sk->sk_rx_dst = NULL;
			}
		}
1628
		tcp_rcv_established(sk, skb);
L
Linus Torvalds 已提交
1629 1630 1631
		return 0;
	}

E
Eric Dumazet 已提交
1632
	if (tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1633 1634 1635
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
1636 1637
		struct sock *nsk = tcp_v4_cookie_check(sk, skb);

L
Linus Torvalds 已提交
1638 1639 1640
		if (!nsk)
			goto discard;
		if (nsk != sk) {
1641 1642
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1643
				goto reset;
1644
			}
L
Linus Torvalds 已提交
1645 1646
			return 0;
		}
1647
	} else
1648
		sock_rps_save_rxhash(sk, skb);
1649

1650
	if (tcp_rcv_state_process(sk, skb)) {
1651
		rsk = sk;
L
Linus Torvalds 已提交
1652
		goto reset;
1653
	}
L
Linus Torvalds 已提交
1654 1655 1656
	return 0;

reset:
1657
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1668 1669
	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1670 1671
	goto discard;
}
E
Eric Dumazet 已提交
1672
EXPORT_SYMBOL(tcp_v4_do_rcv);
L
Linus Torvalds 已提交
1673

1674
int tcp_v4_early_demux(struct sk_buff *skb)
D
David S. Miller 已提交
1675 1676 1677 1678 1679 1680
{
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
1681
		return 0;
D
David S. Miller 已提交
1682

1683
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1684
		return 0;
D
David S. Miller 已提交
1685 1686

	iph = ip_hdr(skb);
1687
	th = tcp_hdr(skb);
D
David S. Miller 已提交
1688 1689

	if (th->doff < sizeof(struct tcphdr) / 4)
1690
		return 0;
D
David S. Miller 已提交
1691

1692
	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
D
David S. Miller 已提交
1693
				       iph->saddr, th->source,
1694
				       iph->daddr, ntohs(th->dest),
1695
				       skb->skb_iif, inet_sdif(skb));
D
David S. Miller 已提交
1696 1697 1698
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
1699
		if (sk_fullsock(sk)) {
1700
			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
E
Eric Dumazet 已提交
1701

D
David S. Miller 已提交
1702 1703
			if (dst)
				dst = dst_check(dst, 0);
1704
			if (dst &&
E
Eric Dumazet 已提交
1705
			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1706
				skb_dst_set_noref(skb, dst);
D
David S. Miller 已提交
1707 1708
		}
	}
1709
	return 0;
D
David S. Miller 已提交
1710 1711
}

E
Eric Dumazet 已提交
1712 1713
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
1714
	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1715 1716 1717 1718 1719 1720 1721 1722
	struct skb_shared_info *shinfo;
	const struct tcphdr *th;
	struct tcphdr *thtail;
	struct sk_buff *tail;
	unsigned int hdrlen;
	bool fragstolen;
	u32 gso_segs;
	int delta;
E
Eric Dumazet 已提交
1723 1724 1725 1726 1727 1728 1729

	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
	 * we can fix skb->truesize to its real value to avoid future drops.
	 * This is valid because skb is not yet charged to the socket.
	 * It has been noticed pure SACK packets were sometimes dropped
	 * (if cooked by drivers without copybreak feature).
	 */
1730
	skb_condense(skb);
E
Eric Dumazet 已提交
1731

E
Eric Dumazet 已提交
1732 1733
	skb_dst_drop(skb);

1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
	if (unlikely(tcp_checksum_complete(skb))) {
		bh_unlock_sock(sk);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
		return true;
	}

	/* Attempt coalescing to last skb in backlog, even if we are
	 * above the limits.
	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
	 */
	th = (const struct tcphdr *)skb->data;
	hdrlen = th->doff * 4;
	shinfo = skb_shinfo(skb);

	if (!shinfo->gso_size)
		shinfo->gso_size = skb->len - hdrlen;

	if (!shinfo->gso_segs)
		shinfo->gso_segs = 1;

	tail = sk->sk_backlog.tail;
	if (!tail)
		goto no_coalesce;
	thtail = (struct tcphdr *)tail->data;

	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
	    ((TCP_SKB_CB(tail)->tcp_flags |
1763 1764 1765
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
	    !((TCP_SKB_CB(tail)->tcp_flags &
	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
	    ((TCP_SKB_CB(tail)->tcp_flags ^
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
	    tail->decrypted != skb->decrypted ||
#endif
	    thtail->doff != th->doff ||
	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
		goto no_coalesce;

	__skb_pull(skb, hdrlen);
	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
		thtail->window = th->window;

		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;

		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;

1784 1785 1786 1787 1788 1789 1790 1791 1792
		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
		 * thtail->fin, so that the fast path in tcp_rcv_established()
		 * is not entered if we append a packet with a FIN.
		 * SYN, RST, URG are not present.
		 * ACK is set on both packets.
		 * PSH : we do not really care in TCP stack,
		 *       at least for 'GRO' packets.
		 */
		thtail->fin |= th->fin;
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;

		if (TCP_SKB_CB(skb)->has_rxtstamp) {
			TCP_SKB_CB(tail)->has_rxtstamp = true;
			tail->tstamp = skb->tstamp;
			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
		}

		/* Not as strict as GRO. We only need to carry mss max value */
		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
						 skb_shinfo(tail)->gso_size);

		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);

		sk->sk_backlog.len += delta;
		__NET_INC_STATS(sock_net(sk),
				LINUX_MIB_TCPBACKLOGCOALESCE);
		kfree_skb_partial(skb, fragstolen);
		return false;
	}
	__skb_push(skb, hdrlen);

no_coalesce:
	/* Only socket owner can try to collapse/prune rx queues
	 * to reduce memory overhead, so add a little headroom here.
	 * Few sockets backlog are possibly concurrently non empty.
	 */
	limit += 64*1024;

E
Eric Dumazet 已提交
1823 1824 1825 1826 1827 1828 1829 1830 1831
	if (unlikely(sk_add_backlog(sk, skb, limit))) {
		bh_unlock_sock(sk);
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
		return true;
	}
	return false;
}
EXPORT_SYMBOL(tcp_add_backlog);

1832 1833 1834 1835
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
	struct tcphdr *th = (struct tcphdr *)skb->data;

1836
	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1837 1838 1839
}
EXPORT_SYMBOL(tcp_filter);

1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
static void tcp_v4_restore_cb(struct sk_buff *skb)
{
	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
		sizeof(struct inet_skb_parm));
}

static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
			   const struct tcphdr *th)
{
	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
	 * barrier() makes sure compiler wont play fool^Waliasing games.
	 */
	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
		sizeof(struct inet_skb_parm));
	barrier();

	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
	TCP_SKB_CB(skb)->sacked	 = 0;
	TCP_SKB_CB(skb)->has_rxtstamp =
			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
}

L
Linus Torvalds 已提交
1868 1869 1870 1871 1872 1873
/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1874
	struct net *net = dev_net(skb->dev);
E
Eric Dumazet 已提交
1875
	struct sk_buff *skb_to_free;
1876
	int sdif = inet_sdif(skb);
1877
	int dif = inet_iif(skb);
1878
	const struct iphdr *iph;
1879
	const struct tcphdr *th;
1880
	bool refcounted;
L
Linus Torvalds 已提交
1881 1882 1883 1884 1885 1886 1887
	struct sock *sk;
	int ret;

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
E
Eric Dumazet 已提交
1888
	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1889 1890 1891 1892

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1893
	th = (const struct tcphdr *)skb->data;
L
Linus Torvalds 已提交
1894

1895
	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
L
Linus Torvalds 已提交
1896 1897 1898 1899 1900 1901
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1902
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1903
	 * So, we defer the checks. */
1904 1905

	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1906
		goto csum_error;
L
Linus Torvalds 已提交
1907

1908
	th = (const struct tcphdr *)skb->data;
1909
	iph = ip_hdr(skb);
1910
lookup:
1911
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1912
			       th->dest, sdif, &refcounted);
L
Linus Torvalds 已提交
1913 1914 1915
	if (!sk)
		goto no_tcp_socket;

E
Eric Dumazet 已提交
1916 1917 1918 1919
process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

1920 1921
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		struct request_sock *req = inet_reqsk(sk);
1922
		bool req_stolen = false;
1923
		struct sock *nsk;
1924 1925

		sk = req->rsk_listener;
1926
		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1927
			sk_drops_add(sk, skb);
1928 1929 1930
			reqsk_put(req);
			goto discard_it;
		}
1931 1932 1933 1934
		if (tcp_checksum_complete(skb)) {
			reqsk_put(req);
			goto csum_error;
		}
1935
		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1936
			inet_csk_reqsk_queue_drop_and_put(sk, req);
1937 1938
			goto lookup;
		}
1939 1940 1941
		/* We own a reference on the listener, increase it again
		 * as we might lose it too soon.
		 */
1942
		sock_hold(sk);
1943
		refcounted = true;
E
Eric Dumazet 已提交
1944
		nsk = NULL;
1945 1946 1947 1948
		if (!tcp_filter(sk, skb)) {
			th = (const struct tcphdr *)skb->data;
			iph = ip_hdr(skb);
			tcp_v4_fill_cb(skb, iph, th);
1949
			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1950
		}
1951 1952
		if (!nsk) {
			reqsk_put(req);
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
			if (req_stolen) {
				/* Another cpu got exclusive access to req
				 * and created a full blown socket.
				 * Try to feed this packet to this socket
				 * instead of discarding it.
				 */
				tcp_v4_restore_cb(skb);
				sock_put(sk);
				goto lookup;
			}
1963
			goto discard_and_relse;
1964 1965 1966
		}
		if (nsk == sk) {
			reqsk_put(req);
1967
			tcp_v4_restore_cb(skb);
1968 1969
		} else if (tcp_child_process(sk, nsk, skb)) {
			tcp_v4_send_reset(nsk, skb);
1970
			goto discard_and_relse;
1971
		} else {
1972
			sock_put(sk);
1973 1974 1975
			return 0;
		}
	}
1976
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1977
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1978
		goto discard_and_relse;
1979
	}
1980

L
Linus Torvalds 已提交
1981 1982
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
1983

1984
	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
1985 1986
		goto discard_and_relse;

1987
	nf_reset_ct(skb);
L
Linus Torvalds 已提交
1988

1989
	if (tcp_filter(sk, skb))
L
Linus Torvalds 已提交
1990
		goto discard_and_relse;
1991 1992
	th = (const struct tcphdr *)skb->data;
	iph = ip_hdr(skb);
1993
	tcp_v4_fill_cb(skb, iph, th);
L
Linus Torvalds 已提交
1994 1995 1996

	skb->dev = NULL;

1997 1998 1999 2000 2001 2002 2003
	if (sk->sk_state == TCP_LISTEN) {
		ret = tcp_v4_do_rcv(sk, skb);
		goto put_and_return;
	}

	sk_incoming_cpu_update(sk);

2004
	bh_lock_sock_nested(sk);
2005
	tcp_segs_in(tcp_sk(sk), skb);
L
Linus Torvalds 已提交
2006 2007
	ret = 0;
	if (!sock_owned_by_user(sk)) {
E
Eric Dumazet 已提交
2008 2009
		skb_to_free = sk->sk_rx_skb_cache;
		sk->sk_rx_skb_cache = NULL;
F
Florian Westphal 已提交
2010
		ret = tcp_v4_do_rcv(sk, skb);
E
Eric Dumazet 已提交
2011 2012 2013 2014
	} else {
		if (tcp_add_backlog(sk, skb))
			goto discard_and_relse;
		skb_to_free = NULL;
Z
Zhu Yi 已提交
2015
	}
L
Linus Torvalds 已提交
2016
	bh_unlock_sock(sk);
E
Eric Dumazet 已提交
2017 2018
	if (skb_to_free)
		__kfree_skb(skb_to_free);
L
Linus Torvalds 已提交
2019

2020
put_and_return:
2021 2022
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2023 2024 2025 2026 2027 2028 2029

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

2030 2031
	tcp_v4_fill_cb(skb, iph, th);

E
Eric Dumazet 已提交
2032
	if (tcp_checksum_complete(skb)) {
2033
csum_error:
E
Eric Dumazet 已提交
2034
		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
2035
bad_packet:
E
Eric Dumazet 已提交
2036
		__TCP_INC_STATS(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
2037
	} else {
2038
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
2039 2040 2041 2042 2043
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
2044
	return 0;
L
Linus Torvalds 已提交
2045 2046

discard_and_relse:
2047
	sk_drops_add(sk, skb);
2048 2049
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2050 2051 2052 2053
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2054
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2055 2056 2057
		goto discard_it;
	}

2058 2059
	tcp_v4_fill_cb(skb, iph, th);

2060 2061 2062
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
2063
	}
2064
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
2065
	case TCP_TW_SYN: {
2066
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2067 2068
							&tcp_hashinfo, skb,
							__tcp_hdrlen(th),
2069
							iph->saddr, th->source,
2070
							iph->daddr, th->dest,
2071 2072
							inet_iif(skb),
							sdif);
L
Linus Torvalds 已提交
2073
		if (sk2) {
2074
			inet_twsk_deschedule_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2075
			sk = sk2;
2076
			tcp_v4_restore_cb(skb);
2077
			refcounted = false;
L
Linus Torvalds 已提交
2078 2079 2080
			goto process;
		}
	}
2081
		/* to ACK */
J
Joe Perches 已提交
2082
		fallthrough;
L
Linus Torvalds 已提交
2083 2084 2085 2086
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
2087 2088 2089
		tcp_v4_send_reset(sk, skb);
		inet_twsk_deschedule_put(inet_twsk(sk));
		goto discard_it;
L
Linus Torvalds 已提交
2090 2091 2092 2093 2094
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

2095 2096 2097 2098 2099
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
	.twsk_destructor= tcp_twsk_destructor,
};
L
Linus Torvalds 已提交
2100

2101
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
E
Eric Dumazet 已提交
2102 2103 2104
{
	struct dst_entry *dst = skb_dst(skb);

E
Eric Dumazet 已提交
2105
	if (dst && dst_hold_safe(dst)) {
2106 2107 2108
		sk->sk_rx_dst = dst;
		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
	}
E
Eric Dumazet 已提交
2109
}
2110
EXPORT_SYMBOL(inet_sk_rx_dst_set);
E
Eric Dumazet 已提交
2111

2112
const struct inet_connection_sock_af_ops ipv4_specific = {
2113 2114 2115
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
E
Eric Dumazet 已提交
2116
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2117 2118 2119 2120 2121 2122 2123
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
2124
#ifdef CONFIG_COMPAT
2125 2126
	.compat_setsockopt = compat_ip_setsockopt,
	.compat_getsockopt = compat_ip_getsockopt,
2127
#endif
2128
	.mtu_reduced	   = tcp_v4_mtu_reduced,
L
Linus Torvalds 已提交
2129
};
E
Eric Dumazet 已提交
2130
EXPORT_SYMBOL(ipv4_specific);
L
Linus Torvalds 已提交
2131

2132
#ifdef CONFIG_TCP_MD5SIG
2133
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2134
	.md5_lookup		= tcp_v4_md5_lookup,
2135
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2136 2137
	.md5_parse		= tcp_v4_parse_md5_keys,
};
2138
#endif
2139

L
Linus Torvalds 已提交
2140 2141 2142 2143 2144
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
2145
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
2146

2147
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
2148

2149
	icsk->icsk_af_ops = &ipv4_specific;
2150

2151
#ifdef CONFIG_TCP_MD5SIG
2152
	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2153
#endif
L
Linus Torvalds 已提交
2154 2155 2156 2157

	return 0;
}

2158
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
2159 2160 2161
{
	struct tcp_sock *tp = tcp_sk(sk);

2162 2163
	trace_tcp_destroy_sock(sk);

L
Linus Torvalds 已提交
2164 2165
	tcp_clear_xmit_timers(sk);

2166
	tcp_cleanup_congestion_control(sk);
2167

D
Dave Watson 已提交
2168 2169
	tcp_cleanup_ulp(sk);

L
Linus Torvalds 已提交
2170
	/* Cleanup up the write buffer. */
2171
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
2172

2173 2174 2175
	/* Check if we want to disable active TFO */
	tcp_fastopen_active_disable_ofo_check(sk);

L
Linus Torvalds 已提交
2176
	/* Cleans up our, hopefully empty, out_of_order_queue. */
2177
	skb_rbtree_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
2178

2179 2180 2181
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
E
Eric Dumazet 已提交
2182
		tcp_clear_md5_list(sk);
2183
		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2184 2185 2186
		tp->md5sig_info = NULL;
	}
#endif
C
Chris Leech 已提交
2187

L
Linus Torvalds 已提交
2188
	/* Clean up a referenced TCP bind bucket. */
2189
	if (inet_csk(sk)->icsk_bind_hash)
2190
		inet_put_port(sk);
L
Linus Torvalds 已提交
2191

2192
	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2193

2194 2195
	/* If socket is aborted during connect operation */
	tcp_free_fastopen_req(tp);
2196
	tcp_fastopen_destroy_cipher(sk);
2197
	tcp_saved_syn_free(tp);
2198

2199
	sk_sockets_allocated_dec(sk);
L
Linus Torvalds 已提交
2200 2201 2202 2203 2204 2205
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

2206 2207 2208 2209 2210
/*
 * Get next listener socket follow cur.  If cur is NULL, get first socket
 * starting from bucket given in st->bucket; when st->bucket is zero the
 * very first socket in the hash table is returned.
 */
L
Linus Torvalds 已提交
2211 2212
static void *listening_get_next(struct seq_file *seq, void *cur)
{
2213
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
J
Jianjun Kong 已提交
2214
	struct tcp_iter_state *st = seq->private;
2215
	struct net *net = seq_file_net(seq);
2216
	struct inet_listen_hashbucket *ilb;
2217
	struct hlist_nulls_node *node;
2218
	struct sock *sk = cur;
L
Linus Torvalds 已提交
2219 2220

	if (!sk) {
2221
get_head:
2222
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2223
		spin_lock(&ilb->lock);
2224
		sk = sk_nulls_head(&ilb->nulls_head);
2225
		st->offset = 0;
L
Linus Torvalds 已提交
2226 2227
		goto get_sk;
	}
2228
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
2229
	++st->num;
2230
	++st->offset;
L
Linus Torvalds 已提交
2231

2232
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2233
get_sk:
2234
	sk_nulls_for_each_from(sk, node) {
2235 2236
		if (!net_eq(sock_net(sk), net))
			continue;
2237
		if (sk->sk_family == afinfo->family)
2238
			return sk;
L
Linus Torvalds 已提交
2239
	}
2240
	spin_unlock(&ilb->lock);
2241
	st->offset = 0;
2242 2243 2244
	if (++st->bucket < INET_LHTABLE_SIZE)
		goto get_head;
	return NULL;
L
Linus Torvalds 已提交
2245 2246 2247 2248
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
2249 2250 2251 2252 2253 2254
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	st->offset = 0;
	rc = listening_get_next(seq, NULL);
L
Linus Torvalds 已提交
2255 2256 2257 2258 2259 2260 2261 2262

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

E
Eric Dumazet 已提交
2263
static inline bool empty_bucket(const struct tcp_iter_state *st)
2264
{
E
Eric Dumazet 已提交
2265
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2266 2267
}

2268 2269 2270 2271
/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
L
Linus Torvalds 已提交
2272 2273
static void *established_get_first(struct seq_file *seq)
{
2274
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
J
Jianjun Kong 已提交
2275
	struct tcp_iter_state *st = seq->private;
2276
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2277 2278
	void *rc = NULL;

2279 2280
	st->offset = 0;
	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2281
		struct sock *sk;
2282
		struct hlist_nulls_node *node;
2283
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2284

2285 2286 2287 2288
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2289
		spin_lock_bh(lock);
2290
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2291
			if (sk->sk_family != afinfo->family ||
2292
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2293 2294 2295 2296 2297
				continue;
			}
			rc = sk;
			goto out;
		}
2298
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2299 2300 2301 2302 2303 2304 2305
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
2306
	struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2307
	struct sock *sk = cur;
2308
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2309
	struct tcp_iter_state *st = seq->private;
2310
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2311 2312

	++st->num;
2313
	++st->offset;
L
Linus Torvalds 已提交
2314

E
Eric Dumazet 已提交
2315
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2316

2317
	sk_nulls_for_each_from(sk, node) {
2318 2319
		if (sk->sk_family == afinfo->family &&
		    net_eq(sock_net(sk), net))
E
Eric Dumazet 已提交
2320
			return sk;
L
Linus Torvalds 已提交
2321 2322
	}

E
Eric Dumazet 已提交
2323 2324 2325
	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
	++st->bucket;
	return established_get_first(seq);
L
Linus Torvalds 已提交
2326 2327 2328 2329
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
2330 2331 2332 2333 2334
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	rc = established_get_first(seq);
L
Linus Torvalds 已提交
2335 2336 2337 2338

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2339
	}
L
Linus Torvalds 已提交
2340 2341 2342 2343 2344 2345
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2346
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
static void *tcp_seek_last_pos(struct seq_file *seq)
{
	struct tcp_iter_state *st = seq->private;
	int offset = st->offset;
	int orig_num = st->num;
	void *rc = NULL;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (st->bucket >= INET_LHTABLE_SIZE)
			break;
		st->state = TCP_SEQ_STATE_LISTENING;
		rc = listening_get_next(seq, NULL);
		while (offset-- && rc)
			rc = listening_get_next(seq, rc);
		if (rc)
			break;
		st->bucket = 0;
E
Eric Dumazet 已提交
2377
		st->state = TCP_SEQ_STATE_ESTABLISHED;
J
Joe Perches 已提交
2378
		fallthrough;
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
	case TCP_SEQ_STATE_ESTABLISHED:
		if (st->bucket > tcp_hashinfo.ehash_mask)
			break;
		rc = established_get_first(seq);
		while (offset-- && rc)
			rc = established_get_next(seq, rc);
	}

	st->num = orig_num;

	return rc;
}

2392
void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2393
{
J
Jianjun Kong 已提交
2394
	struct tcp_iter_state *st = seq->private;
2395 2396 2397 2398 2399 2400 2401 2402
	void *rc;

	if (*pos && *pos == st->last_pos) {
		rc = tcp_seek_last_pos(seq);
		if (rc)
			goto out;
	}

L
Linus Torvalds 已提交
2403 2404
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
2405 2406 2407 2408 2409 2410 2411
	st->bucket = 0;
	st->offset = 0;
	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;

out:
	st->last_pos = *pos;
	return rc;
L
Linus Torvalds 已提交
2412
}
2413
EXPORT_SYMBOL(tcp_seq_start);
L
Linus Torvalds 已提交
2414

2415
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2416
{
2417
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
	void *rc = NULL;

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
2430 2431
			st->bucket = 0;
			st->offset = 0;
L
Linus Torvalds 已提交
2432 2433 2434 2435 2436 2437 2438 2439 2440
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
2441
	st->last_pos = *pos;
L
Linus Torvalds 已提交
2442 2443
	return rc;
}
2444
EXPORT_SYMBOL(tcp_seq_next);
L
Linus Torvalds 已提交
2445

2446
void tcp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2447
{
J
Jianjun Kong 已提交
2448
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2449 2450 2451 2452

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2453
			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2454 2455 2456
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2457
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2458 2459 2460
		break;
	}
}
2461
EXPORT_SYMBOL(tcp_seq_stop);
L
Linus Torvalds 已提交
2462

2463
static void get_openreq4(const struct request_sock *req,
E
Eric Dumazet 已提交
2464
			 struct seq_file *f, int i)
L
Linus Torvalds 已提交
2465
{
2466
	const struct inet_request_sock *ireq = inet_rsk(req);
2467
	long delta = req->rsk_timer.expires - jiffies;
L
Linus Torvalds 已提交
2468

2469
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2470
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
L
Linus Torvalds 已提交
2471
		i,
2472
		ireq->ir_loc_addr,
2473
		ireq->ir_num,
2474 2475
		ireq->ir_rmt_addr,
		ntohs(ireq->ir_rmt_port),
L
Linus Torvalds 已提交
2476 2477 2478
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
2479
		jiffies_delta_to_clock_t(delta),
2480
		req->num_timeout,
E
Eric Dumazet 已提交
2481 2482
		from_kuid_munged(seq_user_ns(f),
				 sock_i_uid(req->rsk_listener)),
L
Linus Torvalds 已提交
2483 2484
		0,  /* non standard timer */
		0, /* open_requests have no inode */
2485
		0,
2486
		req);
L
Linus Torvalds 已提交
2487 2488
}

2489
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
L
Linus Torvalds 已提交
2490 2491 2492
{
	int timer_active;
	unsigned long timer_expires;
2493
	const struct tcp_sock *tp = tcp_sk(sk);
2494
	const struct inet_connection_sock *icsk = inet_csk(sk);
2495
	const struct inet_sock *inet = inet_sk(sk);
2496
	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
E
Eric Dumazet 已提交
2497 2498 2499 2500
	__be32 dest = inet->inet_daddr;
	__be32 src = inet->inet_rcv_saddr;
	__u16 destp = ntohs(inet->inet_dport);
	__u16 srcp = ntohs(inet->inet_sport);
2501
	int rx_queue;
2502
	int state;
L
Linus Torvalds 已提交
2503

N
Nandita Dukkipati 已提交
2504
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2505
	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
N
Nandita Dukkipati 已提交
2506
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2507
		timer_active	= 1;
2508 2509
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2510
		timer_active	= 4;
2511
		timer_expires	= icsk->icsk_timeout;
2512
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2513
		timer_active	= 2;
2514
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2515 2516 2517 2518 2519
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2520
	state = inet_sk_state_load(sk);
2521
	if (state == TCP_LISTEN)
2522
		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2523
	else
2524 2525
		/* Because we don't lock the socket,
		 * we might find a transient negative value.
2526
		 */
2527
		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2528
				      READ_ONCE(tp->copied_seq), 0);
2529

2530
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2531
			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2532
		i, src, srcp, dest, destp, state,
2533
		READ_ONCE(tp->write_seq) - tp->snd_una,
2534
		rx_queue,
L
Linus Torvalds 已提交
2535
		timer_active,
2536
		jiffies_delta_to_clock_t(timer_expires - jiffies),
2537
		icsk->icsk_retransmits,
2538
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2539
		icsk->icsk_probes_out,
2540
		sock_i_ino(sk),
2541
		refcount_read(&sk->sk_refcnt), sk,
2542 2543
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
W
Wei Wang 已提交
2544
		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
L
Linus Torvalds 已提交
2545
		tp->snd_cwnd,
2546 2547
		state == TCP_LISTEN ?
		    fastopenq->max_qlen :
2548
		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
L
Linus Torvalds 已提交
2549 2550
}

2551
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2552
			       struct seq_file *f, int i)
L
Linus Torvalds 已提交
2553
{
2554
	long delta = tw->tw_timer.expires - jiffies;
2555
	__be32 dest, src;
L
Linus Torvalds 已提交
2556 2557 2558 2559 2560 2561 2562
	__u16 destp, srcp;

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2563
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2564
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
L
Linus Torvalds 已提交
2565
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2566
		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2567
		refcount_read(&tw->tw_refcnt), tw);
L
Linus Torvalds 已提交
2568 2569 2570 2571 2572 2573
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2574
	struct tcp_iter_state *st;
E
Eric Dumazet 已提交
2575
	struct sock *sk = v;
L
Linus Torvalds 已提交
2576

2577
	seq_setwidth(seq, TMPSZ - 1);
L
Linus Torvalds 已提交
2578
	if (v == SEQ_START_TOKEN) {
2579
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
L
Linus Torvalds 已提交
2580 2581 2582 2583 2584 2585
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

2586 2587 2588
	if (sk->sk_state == TCP_TIME_WAIT)
		get_timewait4_sock(v, seq, st->num);
	else if (sk->sk_state == TCP_NEW_SYN_RECV)
E
Eric Dumazet 已提交
2589
		get_openreq4(v, seq, st->num);
2590 2591
	else
		get_tcp4_sock(v, seq, st->num);
L
Linus Torvalds 已提交
2592
out:
2593
	seq_pad(seq, '\n');
L
Linus Torvalds 已提交
2594 2595 2596
	return 0;
}

2597 2598 2599 2600 2601 2602 2603
static const struct seq_operations tcp4_seq_ops = {
	.show		= tcp4_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= tcp_seq_stop,
};

L
Linus Torvalds 已提交
2604 2605 2606 2607
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.family		= AF_INET,
};

2608
static int __net_init tcp4_proc_init_net(struct net *net)
2609
{
2610 2611
	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2612 2613
		return -ENOMEM;
	return 0;
2614 2615
}

2616
static void __net_exit tcp4_proc_exit_net(struct net *net)
2617
{
2618
	remove_proc_entry("tcp", net->proc_net);
2619 2620 2621 2622 2623 2624 2625
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2626 2627
int __init tcp4_proc_init(void)
{
2628
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2629 2630 2631 2632
}

void tcp4_proc_exit(void)
{
2633
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2634 2635 2636 2637 2638 2639 2640
}
#endif /* CONFIG_PROC_FS */

struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
A
Andrey Ignatov 已提交
2641
	.pre_connect		= tcp_v4_pre_connect,
L
Linus Torvalds 已提交
2642 2643
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2644
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2645 2646 2647 2648 2649 2650
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
2651
	.keepalive		= tcp_set_keepalive,
L
Linus Torvalds 已提交
2652
	.recvmsg		= tcp_recvmsg,
2653 2654
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2655
	.backlog_rcv		= tcp_v4_do_rcv,
E
Eric Dumazet 已提交
2656
	.release_cb		= tcp_release_cb,
2657 2658 2659
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2660
	.enter_memory_pressure	= tcp_enter_memory_pressure,
2661
	.leave_memory_pressure	= tcp_leave_memory_pressure,
2662
	.stream_memory_free	= tcp_stream_memory_free,
L
Linus Torvalds 已提交
2663
	.sockets_allocated	= &tcp_sockets_allocated,
2664
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2665 2666
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2667
	.sysctl_mem		= sysctl_tcp_mem,
2668 2669
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
L
Linus Torvalds 已提交
2670 2671
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2672
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2673
	.twsk_prot		= &tcp_timewait_sock_ops,
2674
	.rsk_prot		= &tcp_request_sock_ops,
2675
	.h.hashinfo		= &tcp_hashinfo,
2676
	.no_autobind		= true,
2677 2678 2679
#ifdef CONFIG_COMPAT
	.compat_setsockopt	= compat_tcp_setsockopt,
	.compat_getsockopt	= compat_tcp_getsockopt,
G
Glauber Costa 已提交
2680
#endif
2681
	.diag_destroy		= tcp_abort,
L
Linus Torvalds 已提交
2682
};
E
Eric Dumazet 已提交
2683
EXPORT_SYMBOL(tcp_prot);
L
Linus Torvalds 已提交
2684

2685 2686 2687 2688
static void __net_exit tcp_sk_exit(struct net *net)
{
	int cpu;

2689
	if (net->ipv4.tcp_congestion_control)
2690 2691
		bpf_module_put(net->ipv4.tcp_congestion_control,
			       net->ipv4.tcp_congestion_control->owner);
2692

2693 2694 2695 2696 2697
	for_each_possible_cpu(cpu)
		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
	free_percpu(net->ipv4.tcp_sk);
}

2698 2699
static int __net_init tcp_sk_init(struct net *net)
{
2700
	int res, cpu, cnt;
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712

	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
	if (!net->ipv4.tcp_sk)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		struct sock *sk;

		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
					   IPPROTO_TCP, net);
		if (res)
			goto fail;
2713
		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2714 2715 2716 2717 2718 2719

		/* Please enforce IP_DF and IPID==0 for RST and
		 * ACK sent in SYN-RECV and TIME-WAIT state.
		 */
		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;

2720 2721
		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
	}
2722

2723
	net->ipv4.sysctl_tcp_ecn = 2;
2724 2725
	net->ipv4.sysctl_tcp_ecn_fallback = 1;

F
Fan Du 已提交
2726
	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
E
Eric Dumazet 已提交
2727
	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2728
	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2729
	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2730
	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2731

2732
	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2733
	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2734
	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2735

2736
	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2737
	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2738
	net->ipv4.sysctl_tcp_syncookies = 1;
2739
	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2740
	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2741
	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2742
	net->ipv4.sysctl_tcp_orphan_retries = 0;
2743
	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2744
	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2745
	net->ipv4.sysctl_tcp_tw_reuse = 2;
2746
	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2747

2748
	cnt = tcp_hashinfo.ehash_mask + 1;
2749
	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2750 2751
	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;

2752
	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
E
Eric Dumazet 已提交
2753
	net->ipv4.sysctl_tcp_sack = 1;
2754
	net->ipv4.sysctl_tcp_window_scaling = 1;
2755
	net->ipv4.sysctl_tcp_timestamps = 1;
2756
	net->ipv4.sysctl_tcp_early_retrans = 3;
2757
	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2758
	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2759
	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2760
	net->ipv4.sysctl_tcp_max_reordering = 300;
E
Eric Dumazet 已提交
2761
	net->ipv4.sysctl_tcp_dsack = 1;
2762
	net->ipv4.sysctl_tcp_app_win = 31;
2763
	net->ipv4.sysctl_tcp_adv_win_scale = 1;
E
Eric Dumazet 已提交
2764
	net->ipv4.sysctl_tcp_frto = 2;
2765
	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2766 2767 2768 2769 2770
	/* This limits the percentage of the congestion window which we
	 * will allow a single TSO frame to consume.  Building TSO frames
	 * which are too large can cause TCP streams to be bursty.
	 */
	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2771 2772
	/* Default TSQ limit of 16 TSO segments */
	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2773 2774
	/* rfc5961 challenge ack rate limiting */
	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2775
	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2776
	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2777
	net->ipv4.sysctl_tcp_autocorking = 1;
2778
	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2779
	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2780
	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2781 2782 2783 2784 2785 2786 2787 2788
	if (net != &init_net) {
		memcpy(net->ipv4.sysctl_tcp_rmem,
		       init_net.ipv4.sysctl_tcp_rmem,
		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
		memcpy(net->ipv4.sysctl_tcp_wmem,
		       init_net.ipv4.sysctl_tcp_wmem,
		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
	}
2789
	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2790
	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
E
Eric Dumazet 已提交
2791
	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2792
	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2793
	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2794 2795
	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2796

2797 2798
	/* Reno is always built in */
	if (!net_eq(net, &init_net) &&
2799 2800
	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
			       init_net.ipv4.tcp_congestion_control->owner))
2801 2802 2803 2804
		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
	else
		net->ipv4.tcp_congestion_control = &tcp_reno;

2805
	return 0;
2806 2807 2808 2809
fail:
	tcp_sk_exit(net);

	return res;
E
Eric W. Biederman 已提交
2810 2811 2812 2813
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
2814 2815
	struct net *net;

2816
	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2817 2818 2819

	list_for_each_entry(net, net_exit_list, exit_list)
		tcp_fastopen_ctx_destroy(net);
2820 2821 2822
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
E
Eric W. Biederman 已提交
2823 2824 2825
       .init	   = tcp_sk_init,
       .exit	   = tcp_sk_exit,
       .exit_batch = tcp_sk_exit_batch,
2826 2827
};

2828
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2829
{
2830
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2831 2832
		panic("Failed to create the TCP control socket.\n");
}