tcp_ipv4.c 77.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
32
 *					request_sock handling and moved
L
Linus Torvalds 已提交
33 34
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
35
 *					Added new listen semantics.
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

48
#define pr_fmt(fmt) "TCP: " fmt
L
Linus Torvalds 已提交
49

H
Herbert Xu 已提交
50
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
59
#include <linux/slab.h>
L
Linus Torvalds 已提交
60

61
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
62
#include <net/icmp.h>
63
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
64
#include <net/tcp.h>
65
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
66 67
#include <net/ipv6.h>
#include <net/inet_common.h>
68
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
69
#include <net/xfrm.h>
70
#include <net/secure_seq.h>
71
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
72 73 74 75 76 77

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
78
#include <linux/inetdevice.h>
79
#include <linux/btf_ids.h>
L
Linus Torvalds 已提交
80

H
Herbert Xu 已提交
81
#include <crypto/hash.h>
82 83
#include <linux/scatterlist.h>

84 85
#include <trace/events/tcp.h>

86
#ifdef CONFIG_TCP_MD5SIG
E
Eric Dumazet 已提交
87
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
88
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
89 90
#endif

91
struct inet_hashinfo tcp_hashinfo;
E
Eric Dumazet 已提交
92
EXPORT_SYMBOL(tcp_hashinfo);
L
Linus Torvalds 已提交
93

94
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
L
Linus Torvalds 已提交
95
{
96 97 98 99 100 101
	return secure_tcp_seq(ip_hdr(skb)->daddr,
			      ip_hdr(skb)->saddr,
			      tcp_hdr(skb)->dest,
			      tcp_hdr(skb)->source);
}

102
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
103
{
104
	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
L
Linus Torvalds 已提交
105 106
}

107 108
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
109
	const struct inet_timewait_sock *tw = inet_twsk(sktw);
110 111
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);
112 113 114 115 116 117 118 119 120 121 122 123 124
	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;

	if (reuse == 2) {
		/* Still does not detect *everything* that goes through
		 * lo, since we require a loopback src or dst address
		 * or direct binding to 'lo' interface.
		 */
		bool loopback = false;
		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
			loopback = true;
#if IS_ENABLED(CONFIG_IPV6)
		if (tw->tw_family == AF_INET6) {
			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
125
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
126
			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
127
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
128 129 130 131 132 133 134 135 136 137 138
				loopback = true;
		} else
#endif
		{
			if (ipv4_is_loopback(tw->tw_daddr) ||
			    ipv4_is_loopback(tw->tw_rcv_saddr))
				loopback = true;
		}
		if (!loopback)
			reuse = 0;
	}
139 140 141 142 143 144 145 146 147 148 149 150 151

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
152 153
	    (!twp || (reuse && time_after32(ktime_get_seconds(),
					    tcptw->tw_ts_recent_stamp)))) {
154 155 156 157 158 159 160 161 162 163 164 165
		/* In case of repair and re-using TIME-WAIT sockets we still
		 * want to be sure that it is safe as above but honor the
		 * sequence numbers and time stamps set as part of the repair
		 * process.
		 *
		 * Without this check re-using a TIME-WAIT socket with TCP
		 * repair would accumulate a -1 on the repair assigned
		 * sequence number. The first time it is reused the sequence
		 * is -1, the second time -2, etc. This fixes that issue
		 * without appearing to create any others.
		 */
		if (likely(!tp->repair)) {
166 167 168 169 170
			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;

			if (!seq)
				seq = 1;
			WRITE_ONCE(tp->write_seq, seq);
171 172 173
			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		}
174 175 176 177 178 179 180 181
		sock_hold(sktw);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);

A
Andrey Ignatov 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{
	/* This check is replicated from tcp_v4_connect() and intended to
	 * prevent BPF program called below from accessing bytes that are out
	 * of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	sock_owned_by_me(sk);

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
}

L
Linus Torvalds 已提交
197 198 199
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
200
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
L
Linus Torvalds 已提交
201 202
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
203
	__be16 orig_sport, orig_dport;
204
	__be32 daddr, nexthop;
205
	struct flowi4 *fl4;
206
	struct rtable *rt;
L
Linus Torvalds 已提交
207
	int err;
208
	struct ip_options_rcu *inet_opt;
209
	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
218
	inet_opt = rcu_dereference_protected(inet->inet_opt,
219
					     lockdep_sock_is_held(sk));
220
	if (inet_opt && inet_opt->opt.srr) {
L
Linus Torvalds 已提交
221 222
		if (!daddr)
			return -EINVAL;
223
		nexthop = inet_opt->opt.faddr;
L
Linus Torvalds 已提交
224 225
	}

226 227
	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
228 229
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
230 231
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_TCP,
232
			      orig_sport, orig_dport, sk);
233 234 235
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
236
			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
237
		return err;
238
	}
L
Linus Torvalds 已提交
239 240 241 242 243 244

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

245
	if (!inet_opt || !inet_opt->opt.srr)
246
		daddr = fl4->daddr;
L
Linus Torvalds 已提交
247

E
Eric Dumazet 已提交
248
	if (!inet->inet_saddr)
249
		inet->inet_saddr = fl4->saddr;
250
	sk_rcv_saddr_set(sk, inet->inet_saddr);
L
Linus Torvalds 已提交
251

E
Eric Dumazet 已提交
252
	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
L
Linus Torvalds 已提交
253 254 255
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
P
Pavel Emelyanov 已提交
256
		if (likely(!tp->repair))
257
			WRITE_ONCE(tp->write_seq, 0);
L
Linus Torvalds 已提交
258 259
	}

E
Eric Dumazet 已提交
260
	inet->inet_dport = usin->sin_port;
261
	sk_daddr_set(sk, daddr);
L
Linus Torvalds 已提交
262

263
	inet_csk(sk)->icsk_ext_hdr_len = 0;
264 265
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
L
Linus Torvalds 已提交
266

267
	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
L
Linus Torvalds 已提交
268 269 270 271 272 273 274

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
275
	err = inet_hash_connect(tcp_death_row, sk);
L
Linus Torvalds 已提交
276 277 278
	if (err)
		goto failure;

279
	sk_set_txhash(sk);
280

281
	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
282 283 284 285
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
L
Linus Torvalds 已提交
286
		goto failure;
287
	}
L
Linus Torvalds 已提交
288
	/* OK, now commit destination to socket.  */
289
	sk->sk_gso_type = SKB_GSO_TCPV4;
290
	sk_setup_caps(sk, &rt->dst);
W
Wei Wang 已提交
291
	rt = NULL;
L
Linus Torvalds 已提交
292

293 294
	if (likely(!tp->repair)) {
		if (!tp->write_seq)
295 296 297 298 299
			WRITE_ONCE(tp->write_seq,
				   secure_tcp_seq(inet->inet_saddr,
						  inet->inet_daddr,
						  inet->inet_sport,
						  usin->sin_port));
300 301
		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
						 inet->inet_saddr,
302
						 inet->inet_daddr);
303
	}
L
Linus Torvalds 已提交
304

305
	inet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
306

W
Wei Wang 已提交
307 308 309 310 311
	if (tcp_fastopen_defer_connect(sk, &err))
		return err;
	if (err)
		goto failure;

A
Andrey Vagin 已提交
312
	err = tcp_connect(sk);
P
Pavel Emelyanov 已提交
313

L
Linus Torvalds 已提交
314 315 316 317 318 319
	if (err)
		goto failure;

	return 0;

failure:
320 321 322 323
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
324 325 326
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
E
Eric Dumazet 已提交
327
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
328 329
	return err;
}
E
Eric Dumazet 已提交
330
EXPORT_SYMBOL(tcp_v4_connect);
L
Linus Torvalds 已提交
331 332

/*
333 334 335
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
L
Linus Torvalds 已提交
336
 */
337
void tcp_v4_mtu_reduced(struct sock *sk)
L
Linus Torvalds 已提交
338 339
{
	struct inet_sock *inet = inet_sk(sk);
340 341
	struct dst_entry *dst;
	u32 mtu;
L
Linus Torvalds 已提交
342

343 344 345
	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
		return;
	mtu = tcp_sk(sk)->mtu_info;
346 347
	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
L
Linus Torvalds 已提交
348 349 350 351 352 353 354 355 356 357 358
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
359
	    ip_sk_accept_pmtu(sk) &&
360
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
361 362 363 364 365 366 367 368 369 370
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}
371
EXPORT_SYMBOL(tcp_v4_mtu_reduced);
L
Linus Torvalds 已提交
372

373 374 375 376
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);

377
	if (dst)
378
		dst->ops->redirect(dst, sk, skb);
379 380
}

381 382

/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
383
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
384 385 386 387 388 389 390 391
{
	struct request_sock *req = inet_reqsk(sk);
	struct net *net = sock_net(sk);

	/* ICMPs are not backlogged, hence we cannot get
	 * an established socket here.
	 */
	if (seq != tcp_rsk(req)->snt_isn) {
392
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
393
	} else if (abort) {
394 395 396 397 398 399
		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
F
Fan Du 已提交
400
		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
401
		tcp_listendrop(req->rsk_listener);
402
	}
403
	reqsk_put(req);
404 405 406
}
EXPORT_SYMBOL(tcp_req_err);

407
/* TCP-LD (RFC 6069) logic */
408
void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	s32 remaining;
	u32 delta_us;

	if (sock_owned_by_user(sk))
		return;

	if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
	    !icsk->icsk_backoff)
		return;

	skb = tcp_rtx_queue_head(sk);
	if (WARN_ON_ONCE(!skb))
		return;

	icsk->icsk_backoff--;
	icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
	icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);

	tcp_mstamp_refresh(tp);
	delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
	remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);

	if (remaining > 0) {
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
					  remaining, TCP_RTO_MAX);
	} else {
		/* RTO revert clocked out retransmission.
		 * Will retransmit now.
		 */
		tcp_retransmit_timer(sk);
	}
}
445
EXPORT_SYMBOL(tcp_ld_RTO_revert);
446

L
Linus Torvalds 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

463
int tcp_v4_err(struct sk_buff *skb, u32 info)
L
Linus Torvalds 已提交
464
{
465 466
	const struct iphdr *iph = (const struct iphdr *)skb->data;
	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
L
Linus Torvalds 已提交
467 468
	struct tcp_sock *tp;
	struct inet_sock *inet;
469 470
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
L
Linus Torvalds 已提交
471
	struct sock *sk;
472
	struct request_sock *fastopen;
473
	u32 seq, snd_una;
L
Linus Torvalds 已提交
474
	int err;
475
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
476

477 478
	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
				       th->dest, iph->saddr, ntohs(th->source),
479
				       inet_iif(skb), 0);
L
Linus Torvalds 已提交
480
	if (!sk) {
E
Eric Dumazet 已提交
481
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
482
		return -ENOENT;
L
Linus Torvalds 已提交
483 484
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
485
		inet_twsk_put(inet_twsk(sk));
486
		return 0;
L
Linus Torvalds 已提交
487
	}
488
	seq = ntohl(th->seq);
489 490 491 492 493 494 495 496
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
				     type == ICMP_TIME_EXCEEDED ||
				     (type == ICMP_DEST_UNREACH &&
				      (code == ICMP_NET_UNREACH ||
				       code == ICMP_HOST_UNREACH)));
		return 0;
	}
L
Linus Torvalds 已提交
497 498 499 500

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
501 502
	 * We do take care of PMTU discovery (RFC1191) special case :
	 * we can receive locally generated ICMP messages while socket is held.
L
Linus Torvalds 已提交
503
	 */
504 505
	if (sock_owned_by_user(sk)) {
		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
506
			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
507
	}
L
Linus Torvalds 已提交
508 509 510
	if (sk->sk_state == TCP_CLOSE)
		goto out;

511
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
512
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
513 514 515
		goto out;
	}

L
Linus Torvalds 已提交
516
	tp = tcp_sk(sk);
517
	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
518
	fastopen = rcu_dereference(tp->fastopen_rsk);
519
	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
L
Linus Torvalds 已提交
520
	if (sk->sk_state != TCP_LISTEN &&
521
	    !between(seq, snd_una, tp->snd_nxt)) {
522
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
523 524 525 526
		goto out;
	}

	switch (type) {
527
	case ICMP_REDIRECT:
528
		if (!sock_owned_by_user(sk))
529
			do_redirect(skb, sk);
530
		goto out;
L
Linus Torvalds 已提交
531 532 533 534 535 536 537 538 539 540 541
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
542 543 544 545 546 547 548
			/* We are not interested in TCP_LISTEN and open_requests
			 * (SYN-ACKs send out by Linux are always <576bytes so
			 * they should go through unfragmented).
			 */
			if (sk->sk_state == TCP_LISTEN)
				goto out;

549
			tp->mtu_info = info;
550
			if (!sock_owned_by_user(sk)) {
551
				tcp_v4_mtu_reduced(sk);
552
			} else {
553
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
554 555
					sock_hold(sk);
			}
L
Linus Torvalds 已提交
556 557 558 559
			goto out;
		}

		err = icmp_err_convert[code].errno;
560 561 562 563 564 565
		/* check if this ICMP message allows revert of backoff.
		 * (see RFC 6069)
		 */
		if (!fastopen &&
		    (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
			tcp_ld_RTO_revert(sk, seq);
L
Linus Torvalds 已提交
566 567 568 569 570 571 572 573 574 575
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
	case TCP_SYN_SENT:
576 577 578 579
	case TCP_SYN_RECV:
		/* Only in fast or simultaneous open. If a fast open socket is
		 * is already accepted it is treated as a connected one below.
		 */
580
		if (fastopen && !fastopen->sk)
581 582
			break;

583
		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
584

L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
624
	return 0;
L
Linus Torvalds 已提交
625 626
}

627
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
628
{
629
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
630

631 632 633
	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
	skb->csum_start = skb_transport_header(skb) - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
634 635
}

636
/* This routine computes an IPv4 TCP checksum. */
637
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
638
{
639
	const struct inet_sock *inet = inet_sk(sk);
640 641 642

	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
E
Eric Dumazet 已提交
643
EXPORT_SYMBOL(tcp_v4_send_check);
644

L
Linus Torvalds 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

658
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
659
{
660
	const struct tcphdr *th = tcp_hdr(skb);
661 662 663
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
664
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
665 666
#endif
	} rep;
L
Linus Torvalds 已提交
667
	struct ip_reply_arg arg;
668
#ifdef CONFIG_TCP_MD5SIG
669
	struct tcp_md5sig_key *key = NULL;
670 671 672 673
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
674
#endif
675
	u64 transmit_time = 0;
J
Jon Maxwell 已提交
676
	struct sock *ctl_sk;
677
	struct net *net;
L
Linus Torvalds 已提交
678 679 680 681 682

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

683 684 685 686
	/* If sk not NULL, it means we did a successful lookup and incoming
	 * route had to be correct. prequeue might have dropped our dst.
	 */
	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
687 688 689
		return;

	/* Swap the send and the receive. */
690 691 692 693 694
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
695 696

	if (th->ack) {
697
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
698
	} else {
699 700 701
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
702 703
	}

704
	memset(&arg, 0, sizeof(arg));
705 706 707
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

708
	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
709
#ifdef CONFIG_TCP_MD5SIG
710
	rcu_read_lock();
711
	hash_location = tcp_parse_md5sig_option(th);
712
	if (sk && sk_fullsock(sk)) {
713
		const union tcp_md5_addr *addr;
714
		int l3index;
715

716 717 718 719
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and inet_iif is set to it.
		 */
		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
720
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
721
		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
722
	} else if (hash_location) {
723
		const union tcp_md5_addr *addr;
724 725
		int sdif = tcp_v4_sdif(skb);
		int dif = inet_iif(skb);
726
		int l3index;
727

728 729 730 731 732 733 734
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
735 736
		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
					     ip_hdr(skb)->saddr,
737
					     th->source, ip_hdr(skb)->daddr,
738
					     ntohs(th->source), dif, sdif);
739 740
		/* don't send rst if it can't find key */
		if (!sk1)
741 742
			goto out;

743 744 745 746
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and dif is set to it.
		 */
		l3index = sdif ? dif : 0;
747
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
748
		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
749
		if (!key)
750 751
			goto out;

752

753
		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
754
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
755 756
			goto out;

757 758
	}

759 760 761 762 763 764 765 766 767
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

768
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
769 770
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
771 772
	}
#endif
773 774
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
775
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
776
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
777 778
	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;

779
	/* When socket is gone, all binding information is lost.
A
Alexey Kuznetsov 已提交
780 781
	 * routing might fail in this case. No choice here, if we choose to force
	 * input interface, we will misroute in case of asymmetric route.
782
	 */
783
	if (sk) {
A
Alexey Kuznetsov 已提交
784
		arg.bound_dev_if = sk->sk_bound_dev_if;
785 786
		if (sk_fullsock(sk))
			trace_tcp_send_reset(sk, skb);
787
	}
L
Linus Torvalds 已提交
788

789 790 791
	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));

792
	arg.tos = ip_hdr(skb)->tos;
793
	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
794
	local_bh_disable();
795
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
796
	if (sk) {
J
Jon Maxwell 已提交
797 798
		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_mark : sk->sk_mark;
799 800
		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_priority : sk->sk_priority;
801
		transmit_time = tcp_transmit_time(sk);
802
	}
J
Jon Maxwell 已提交
803
	ip_send_unicast_reply(ctl_sk,
804
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
805
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
806 807
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
808

J
Jon Maxwell 已提交
809
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
810 811
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
812
	local_bh_enable();
813 814

#ifdef CONFIG_TCP_MD5SIG
815 816
out:
	rcu_read_unlock();
817
#endif
L
Linus Torvalds 已提交
818 819 820 821 822 823
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

824
static void tcp_v4_send_ack(const struct sock *sk,
825
			    struct sk_buff *skb, u32 seq, u32 ack,
826
			    u32 win, u32 tsval, u32 tsecr, int oif,
827
			    struct tcp_md5sig_key *key,
828
			    int reply_flags, u8 tos)
L
Linus Torvalds 已提交
829
{
830
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
831 832
	struct {
		struct tcphdr th;
833
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
834
#ifdef CONFIG_TCP_MD5SIG
835
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
836 837
#endif
			];
L
Linus Torvalds 已提交
838
	} rep;
839
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
840
	struct ip_reply_arg arg;
J
Jon Maxwell 已提交
841
	struct sock *ctl_sk;
842
	u64 transmit_time;
L
Linus Torvalds 已提交
843 844

	memset(&rep.th, 0, sizeof(struct tcphdr));
845
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
846 847 848

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
849
	if (tsecr) {
850 851 852
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
853 854
		rep.opt[1] = htonl(tsval);
		rep.opt[2] = htonl(tsecr);
855
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
856 857 858 859 860 861 862 863 864 865 866
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

867 868
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
869
		int offset = (tsecr) ? 3 : 0;
870 871 872 873 874 875 876 877

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

878
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
879 880
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
881 882
	}
#endif
883
	arg.flags = reply_flags;
884 885
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
886 887
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
888 889
	if (oif)
		arg.bound_dev_if = oif;
890
	arg.tos = tos;
891
	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
892
	local_bh_disable();
893
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
894 895
	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_mark : sk->sk_mark;
896 897
	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_priority : sk->sk_priority;
898
	transmit_time = tcp_transmit_time(sk);
J
Jon Maxwell 已提交
899
	ip_send_unicast_reply(ctl_sk,
900
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
901
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
902 903
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
904

J
Jon Maxwell 已提交
905
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
906
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
907
	local_bh_enable();
L
Linus Torvalds 已提交
908 909 910 911
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
912
	struct inet_timewait_sock *tw = inet_twsk(sk);
913
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
914

915
	tcp_v4_send_ack(sk, skb,
916
			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
917
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
918
			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
919 920
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
921
			tcp_twsk_md5_key(tcptw),
922 923
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
			tw->tw_tos
924
			);
L
Linus Torvalds 已提交
925

926
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
927 928
}

929
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
930
				  struct request_sock *req)
L
Linus Torvalds 已提交
931
{
932
	const union tcp_md5_addr *addr;
933
	int l3index;
934

935 936 937
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
938 939 940
	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
					     tcp_sk(sk)->snd_nxt;

941 942 943 944 945
	/* RFC 7323 2.3
	 * The window field (SEG.WND) of every outgoing segment, with the
	 * exception of <SYN> segments, MUST be right-shifted by
	 * Rcv.Wind.Shift bits:
	 */
946
	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
947
	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
948
	tcp_v4_send_ack(sk, skb, seq,
949 950
			tcp_rsk(req)->rcv_nxt,
			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
951
			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
952 953
			req->ts_recent,
			0,
954
			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
955 956
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			ip_hdr(skb)->tos);
L
Linus Torvalds 已提交
957 958 959
}

/*
960
 *	Send a SYN-ACK after having received a SYN.
961
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
962 963
 *	socket.
 */
964
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
965
			      struct flowi *fl,
966
			      struct request_sock *req,
967
			      struct tcp_fastopen_cookie *foc,
968
			      enum tcp_synack_type synack_type)
L
Linus Torvalds 已提交
969
{
970
	const struct inet_request_sock *ireq = inet_rsk(req);
971
	struct flowi4 fl4;
L
Linus Torvalds 已提交
972
	int err = -1;
973
	struct sk_buff *skb;
L
Linus Torvalds 已提交
974 975

	/* First, grab a route. */
976
	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
977
		return -1;
L
Linus Torvalds 已提交
978

979
	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
L
Linus Torvalds 已提交
980 981

	if (skb) {
982
		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
L
Linus Torvalds 已提交
983

984
		rcu_read_lock();
985 986
		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
					    ireq->ir_rmt_addr,
987 988
					    rcu_dereference(ireq->ireq_opt));
		rcu_read_unlock();
989
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
990 991 992 993 994 995
	}

	return err;
}

/*
996
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
997
 */
998
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
999
{
E
Eric Dumazet 已提交
1000
	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
L
Linus Torvalds 已提交
1001 1002
}

1003 1004 1005 1006 1007 1008 1009
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

1010
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1011 1012
EXPORT_SYMBOL(tcp_md5_needed);

1013
/* Find the Key structure for an address.  */
1014
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1015 1016
					   const union tcp_md5_addr *addr,
					   int family)
1017
{
1018
	const struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1019
	struct tcp_md5sig_key *key;
1020
	const struct tcp_md5sig_info *md5sig;
1021 1022 1023
	__be32 mask;
	struct tcp_md5sig_key *best_match = NULL;
	bool match;
1024

1025 1026
	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
1027
				       lockdep_sock_is_held(sk));
1028
	if (!md5sig)
1029
		return NULL;
A
Arnd Bergmann 已提交
1030

1031 1032
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
E
Eric Dumazet 已提交
1033 1034
		if (key->family != family)
			continue;
1035 1036
		if (key->l3index && key->l3index != l3index)
			continue;
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
		if (family == AF_INET) {
			mask = inet_make_mask(key->prefixlen);
			match = (key->addr.a4.s_addr & mask) ==
				(addr->a4.s_addr & mask);
#if IS_ENABLED(CONFIG_IPV6)
		} else if (family == AF_INET6) {
			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
						  key->prefixlen);
#endif
		} else {
			match = false;
		}

		if (match && (!best_match ||
			      key->prefixlen > best_match->prefixlen))
			best_match = key;
	}
	return best_match;
}
1056
EXPORT_SYMBOL(__tcp_md5_do_lookup);
1057

1058 1059
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
						      const union tcp_md5_addr *addr,
1060 1061
						      int family, u8 prefixlen,
						      int l3index)
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
{
	const struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_md5sig_key *key;
	unsigned int size = sizeof(struct in_addr);
	const struct tcp_md5sig_info *md5sig;

	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
				       lockdep_sock_is_held(sk));
	if (!md5sig)
		return NULL;
#if IS_ENABLED(CONFIG_IPV6)
	if (family == AF_INET6)
		size = sizeof(struct in6_addr);
#endif
1077 1078
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
1079 1080
		if (key->family != family)
			continue;
1081 1082
		if (key->l3index && key->l3index != l3index)
			continue;
1083 1084
		if (!memcmp(&key->addr, addr, size) &&
		    key->prefixlen == prefixlen)
E
Eric Dumazet 已提交
1085
			return key;
1086 1087 1088 1089
	}
	return NULL;
}

1090
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1091
					 const struct sock *addr_sk)
1092
{
1093
	const union tcp_md5_addr *addr;
1094
	int l3index;
E
Eric Dumazet 已提交
1095

1096 1097
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
						 addr_sk->sk_bound_dev_if);
1098
	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1099
	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1100 1101 1102 1103
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);

/* This can be called on a newly created socket, from other files */
E
Eric Dumazet 已提交
1104
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1105 1106
		   int family, u8 prefixlen, int l3index,
		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1107 1108
{
	/* Add Key to the list */
1109
	struct tcp_md5sig_key *key;
1110
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1111
	struct tcp_md5sig_info *md5sig;
1112

1113
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1114
	if (key) {
1115 1116 1117 1118 1119 1120 1121
		/* Pre-existing entry - just update that one.
		 * Note that the key might be used concurrently.
		 * data_race() is telling kcsan that we do not care of
		 * key mismatches, since changing MD5 key on live flows
		 * can lead to packet drops.
		 */
		data_race(memcpy(key->key, newkey, newkeylen));
1122

1123 1124 1125 1126 1127 1128
		/* Pairs with READ_ONCE() in tcp_md5_hash_key().
		 * Also note that a reader could catch new key->keylen value
		 * but old key->key[], this is the reason we use __GFP_ZERO
		 * at sock_kmalloc() time below these lines.
		 */
		WRITE_ONCE(key->keylen, newkeylen);
1129

E
Eric Dumazet 已提交
1130 1131
		return 0;
	}
1132

1133
	md5sig = rcu_dereference_protected(tp->md5sig_info,
1134
					   lockdep_sock_is_held(sk));
E
Eric Dumazet 已提交
1135 1136 1137
	if (!md5sig) {
		md5sig = kmalloc(sizeof(*md5sig), gfp);
		if (!md5sig)
1138 1139
			return -ENOMEM;

E
Eric Dumazet 已提交
1140 1141
		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		INIT_HLIST_HEAD(&md5sig->head);
1142
		rcu_assign_pointer(tp->md5sig_info, md5sig);
E
Eric Dumazet 已提交
1143
	}
1144

1145
	key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
E
Eric Dumazet 已提交
1146 1147
	if (!key)
		return -ENOMEM;
1148
	if (!tcp_alloc_md5sig_pool()) {
1149
		sock_kfree_s(sk, key, sizeof(*key));
E
Eric Dumazet 已提交
1150
		return -ENOMEM;
1151
	}
E
Eric Dumazet 已提交
1152 1153 1154 1155

	memcpy(key->key, newkey, newkeylen);
	key->keylen = newkeylen;
	key->family = family;
1156
	key->prefixlen = prefixlen;
1157
	key->l3index = l3index;
E
Eric Dumazet 已提交
1158 1159 1160 1161
	memcpy(&key->addr, addr,
	       (family == AF_INET6) ? sizeof(struct in6_addr) :
				      sizeof(struct in_addr));
	hlist_add_head_rcu(&key->node, &md5sig->head);
1162 1163
	return 0;
}
E
Eric Dumazet 已提交
1164
EXPORT_SYMBOL(tcp_md5_do_add);
1165

1166
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1167
		   u8 prefixlen, int l3index)
1168
{
E
Eric Dumazet 已提交
1169 1170
	struct tcp_md5sig_key *key;

1171
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
E
Eric Dumazet 已提交
1172 1173 1174
	if (!key)
		return -ENOENT;
	hlist_del_rcu(&key->node);
1175
	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1176 1177
	kfree_rcu(key, rcu);
	return 0;
1178
}
E
Eric Dumazet 已提交
1179
EXPORT_SYMBOL(tcp_md5_do_del);
1180

1181
static void tcp_clear_md5_list(struct sock *sk)
1182 1183
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1184
	struct tcp_md5sig_key *key;
1185
	struct hlist_node *n;
1186
	struct tcp_md5sig_info *md5sig;
1187

1188 1189
	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);

1190
	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
E
Eric Dumazet 已提交
1191
		hlist_del_rcu(&key->node);
1192
		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1193
		kfree_rcu(key, rcu);
1194 1195 1196
	}
}

1197
static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1198
				 sockptr_t optval, int optlen)
1199 1200 1201
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1202
	const union tcp_md5_addr *addr;
1203
	u8 prefixlen = 32;
1204
	int l3index = 0;
1205 1206 1207 1208

	if (optlen < sizeof(cmd))
		return -EINVAL;

1209
	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1210 1211 1212 1213 1214
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

1215 1216 1217 1218 1219 1220 1221
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
		prefixlen = cmd.tcpm_prefixlen;
		if (prefixlen > 32)
			return -EINVAL;
	}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
		struct net_device *dev;

		rcu_read_lock();
		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
		if (dev && netif_is_l3_master(dev))
			l3index = dev->ifindex;

		rcu_read_unlock();

		/* ok to reference set/not set outside of rcu;
		 * right now device MUST be an L3 master
		 */
		if (!dev || !l3index)
			return -EINVAL;
	}

1240 1241
	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;

1242
	if (!cmd.tcpm_keylen)
1243
		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
1244 1245 1246 1247

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

1248
	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1249
			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1250 1251
}

1252 1253 1254
static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
				   __be32 daddr, __be32 saddr,
				   const struct tcphdr *th, int nbytes)
1255 1256
{
	struct tcp4_pseudohdr *bp;
1257
	struct scatterlist sg;
1258
	struct tcphdr *_th;
1259

1260
	bp = hp->scratch;
1261 1262 1263
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1264
	bp->protocol = IPPROTO_TCP;
1265
	bp->len = cpu_to_be16(nbytes);
1266

1267 1268 1269 1270 1271 1272 1273
	_th = (struct tcphdr *)(bp + 1);
	memcpy(_th, th, sizeof(*th));
	_th->check = 0;

	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
				sizeof(*bp) + sizeof(*th));
H
Herbert Xu 已提交
1274
	return crypto_ahash_update(hp->md5_req);
1275 1276
}

E
Eric Dumazet 已提交
1277
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1278
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1279 1280
{
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1281
	struct ahash_request *req;
1282 1283 1284 1285

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1286
	req = hp->md5_req;
1287

H
Herbert Xu 已提交
1288
	if (crypto_ahash_init(req))
1289
		goto clear_hash;
1290
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1291 1292 1293
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1294 1295
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1296 1297 1298 1299
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1300

1301 1302 1303 1304
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1305
	return 1;
1306 1307
}

1308 1309
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
			const struct sock *sk,
E
Eric Dumazet 已提交
1310
			const struct sk_buff *skb)
1311
{
1312
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1313
	struct ahash_request *req;
E
Eric Dumazet 已提交
1314
	const struct tcphdr *th = tcp_hdr(skb);
1315 1316
	__be32 saddr, daddr;

1317 1318 1319
	if (sk) { /* valid for establish/request sockets */
		saddr = sk->sk_rcv_saddr;
		daddr = sk->sk_daddr;
1320
	} else {
1321 1322 1323
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1324
	}
1325 1326 1327 1328

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1329
	req = hp->md5_req;
1330

H
Herbert Xu 已提交
1331
	if (crypto_ahash_init(req))
1332 1333
		goto clear_hash;

1334
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1335 1336 1337 1338 1339
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1340 1341
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1352
}
1353
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1354

1355 1356
#endif

1357
/* Called with rcu_read_lock() */
1358
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1359 1360
				    const struct sk_buff *skb,
				    int dif, int sdif)
1361
{
1362
#ifdef CONFIG_TCP_MD5SIG
1363 1364 1365 1366 1367 1368 1369 1370
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
1371
	const __u8 *hash_location = NULL;
1372
	struct tcp_md5sig_key *hash_expected;
1373
	const struct iphdr *iph = ip_hdr(skb);
1374
	const struct tcphdr *th = tcp_hdr(skb);
1375
	const union tcp_md5_addr *addr;
1376
	unsigned char newhash[16];
1377 1378 1379 1380 1381 1382
	int genhash, l3index;

	/* sdif set, means packet ingressed via a device
	 * in an L3 domain and dif is set to the l3mdev
	 */
	l3index = sdif ? dif : 0;
1383

1384
	addr = (union tcp_md5_addr *)&iph->saddr;
1385
	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1386
	hash_location = tcp_parse_md5sig_option(th);
1387 1388 1389

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
E
Eric Dumazet 已提交
1390
		return false;
1391 1392

	if (hash_expected && !hash_location) {
1393
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
E
Eric Dumazet 已提交
1394
		return true;
1395 1396 1397
	}

	if (!hash_expected && hash_location) {
1398
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
E
Eric Dumazet 已提交
1399
		return true;
1400 1401 1402 1403 1404
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1405 1406
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
1407
				      NULL, skb);
1408 1409

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1410
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1411
		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1412 1413 1414
				     &iph->saddr, ntohs(th->source),
				     &iph->daddr, ntohs(th->dest),
				     genhash ? " tcp_v4_calc_md5_hash failed"
1415
				     : "", l3index);
E
Eric Dumazet 已提交
1416
		return true;
1417
	}
E
Eric Dumazet 已提交
1418
	return false;
1419
#endif
1420 1421
	return false;
}
1422

1423 1424
static void tcp_v4_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
1425 1426 1427
			    struct sk_buff *skb)
{
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
1428
	struct net *net = sock_net(sk_listener);
1429

1430 1431
	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
E
Eric Dumazet 已提交
1432
	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1433 1434
}

1435 1436
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
					  struct flowi *fl,
1437
					  const struct request_sock *req)
1438
{
1439
	return inet_csk_route_req(sk, &fl->u.ip4, req);
1440 1441
}

1442
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1443
	.family		=	PF_INET,
1444
	.obj_size	=	sizeof(struct tcp_request_sock),
1445
	.rtx_syn_ack	=	tcp_rtx_synack,
1446 1447
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1448
	.send_reset	=	tcp_v4_send_reset,
S
stephen hemminger 已提交
1449
	.syn_ack_timeout =	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
1450 1451
};

1452
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1453
	.mss_clamp	=	TCP_MSS_DEFAULT,
1454
#ifdef CONFIG_TCP_MD5SIG
1455
	.req_md5_lookup	=	tcp_v4_md5_lookup,
1456
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1457
#endif
1458
	.init_req	=	tcp_v4_init_req,
1459 1460 1461
#ifdef CONFIG_SYN_COOKIES
	.cookie_init_seq =	cookie_v4_init_sequence,
#endif
1462
	.route_req	=	tcp_v4_route_req,
1463 1464
	.init_seq	=	tcp_v4_init_seq,
	.init_ts_off	=	tcp_v4_init_ts_off,
1465
	.send_synack	=	tcp_v4_send_synack,
1466
};
1467

L
Linus Torvalds 已提交
1468 1469 1470
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1471
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1472 1473
		goto drop;

O
Octavian Purdila 已提交
1474 1475
	return tcp_conn_request(&tcp_request_sock_ops,
				&tcp_request_sock_ipv4_ops, sk, skb);
L
Linus Torvalds 已提交
1476 1477

drop:
1478
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1479 1480
	return 0;
}
E
Eric Dumazet 已提交
1481
EXPORT_SYMBOL(tcp_v4_conn_request);
L
Linus Torvalds 已提交
1482 1483 1484 1485 1486 1487


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
1488
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1489
				  struct request_sock *req,
1490 1491 1492
				  struct dst_entry *dst,
				  struct request_sock *req_unhash,
				  bool *own_req)
L
Linus Torvalds 已提交
1493
{
1494
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1495 1496 1497
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1498
#ifdef CONFIG_TCP_MD5SIG
1499
	const union tcp_md5_addr *addr;
1500
	struct tcp_md5sig_key *key;
1501
	int l3index;
1502
#endif
1503
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
1504 1505 1506 1507 1508 1509

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
1510
		goto exit_nonewsk;
L
Linus Torvalds 已提交
1511

1512
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1513
	inet_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1514 1515 1516

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1517
	ireq		      = inet_rsk(req);
1518 1519
	sk_daddr_set(newsk, ireq->ir_rmt_addr);
	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1520
	newsk->sk_bound_dev_if = ireq->ir_iif;
E
Eric Dumazet 已提交
1521 1522 1523
	newinet->inet_saddr   = ireq->ir_loc_addr;
	inet_opt	      = rcu_dereference(ireq->ireq_opt);
	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1524
	newinet->mc_index     = inet_iif(skb);
1525
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1526
	newinet->rcv_tos      = ip_hdr(skb)->tos;
1527
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1528 1529
	if (inet_opt)
		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1530
	newinet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
1531

E
Eric Dumazet 已提交
1532 1533 1534 1535 1536 1537 1538
	if (!dst) {
		dst = inet_csk_route_child_sock(sk, newsk, req);
		if (!dst)
			goto put_and_exit;
	} else {
		/* syncookie case : see end of cookie_v4_check() */
	}
1539 1540
	sk_setup_caps(newsk, dst);

1541 1542
	tcp_ca_openreq_child(newsk, dst);

L
Linus Torvalds 已提交
1543
	tcp_sync_mss(newsk, dst_mtu(dst));
E
Eric Dumazet 已提交
1544
	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1545

L
Linus Torvalds 已提交
1546 1547
	tcp_initialize_rcv_mss(newsk);

1548
#ifdef CONFIG_TCP_MD5SIG
1549
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1550
	/* Copy over the MD5 key from the original socket */
1551
	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1552
	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1553
	if (key) {
1554 1555 1556 1557 1558 1559
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1560
		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1561
			       key->key, key->keylen, GFP_ATOMIC);
E
Eric Dumazet 已提交
1562
		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1563 1564 1565
	}
#endif

1566 1567
	if (__inet_inherit_port(sk, newsk) < 0)
		goto put_and_exit;
1568
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
E
Eric Dumazet 已提交
1569
	if (likely(*own_req)) {
1570
		tcp_move_syn(newtp, req);
E
Eric Dumazet 已提交
1571 1572 1573 1574
		ireq->ireq_opt = NULL;
	} else {
		newinet->inet_opt = NULL;
	}
L
Linus Torvalds 已提交
1575 1576 1577
	return newsk;

exit_overflow:
1578
	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1579 1580
exit_nonewsk:
	dst_release(dst);
L
Linus Torvalds 已提交
1581
exit:
1582
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1583
	return NULL;
1584
put_and_exit:
E
Eric Dumazet 已提交
1585
	newinet->inet_opt = NULL;
1586 1587
	inet_csk_prepare_forced_close(newsk);
	tcp_done(newsk);
1588
	goto exit;
L
Linus Torvalds 已提交
1589
}
E
Eric Dumazet 已提交
1590
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
L
Linus Torvalds 已提交
1591

1592
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1593
{
1594
#ifdef CONFIG_SYN_COOKIES
1595
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1596

1597
	if (!th->syn)
C
Cong Wang 已提交
1598
		sk = cookie_v4_check(sk, skb);
L
Linus Torvalds 已提交
1599 1600 1601 1602
#endif
	return sk;
}

1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
			 struct tcphdr *th, u32 *cookie)
{
	u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
				    &tcp_request_sock_ipv4_ops, sk, th);
	if (mss) {
		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
		tcp_synq_overflow(sk);
	}
#endif
	return mss;
}

L
Linus Torvalds 已提交
1618
/* The socket must have it's spinlock held when we get
1619
 * here, unless it is a TCP_LISTEN socket.
L
Linus Torvalds 已提交
1620 1621 1622 1623 1624 1625 1626 1627
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1628 1629
	struct sock *rsk;

L
Linus Torvalds 已提交
1630
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1631 1632
		struct dst_entry *dst = sk->sk_rx_dst;

1633
		sock_rps_save_rxhash(sk, skb);
1634
		sk_mark_napi_id(sk, skb);
1635
		if (dst) {
E
Eric Dumazet 已提交
1636
			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1637
			    !dst->ops->check(dst, 0)) {
1638 1639 1640 1641
				dst_release(dst);
				sk->sk_rx_dst = NULL;
			}
		}
1642
		tcp_rcv_established(sk, skb);
L
Linus Torvalds 已提交
1643 1644 1645
		return 0;
	}

E
Eric Dumazet 已提交
1646
	if (tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1647 1648 1649
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
1650 1651
		struct sock *nsk = tcp_v4_cookie_check(sk, skb);

L
Linus Torvalds 已提交
1652 1653 1654
		if (!nsk)
			goto discard;
		if (nsk != sk) {
1655 1656
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1657
				goto reset;
1658
			}
L
Linus Torvalds 已提交
1659 1660
			return 0;
		}
1661
	} else
1662
		sock_rps_save_rxhash(sk, skb);
1663

1664
	if (tcp_rcv_state_process(sk, skb)) {
1665
		rsk = sk;
L
Linus Torvalds 已提交
1666
		goto reset;
1667
	}
L
Linus Torvalds 已提交
1668 1669 1670
	return 0;

reset:
1671
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1682 1683
	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1684 1685
	goto discard;
}
E
Eric Dumazet 已提交
1686
EXPORT_SYMBOL(tcp_v4_do_rcv);
L
Linus Torvalds 已提交
1687

1688
int tcp_v4_early_demux(struct sk_buff *skb)
D
David S. Miller 已提交
1689 1690 1691 1692 1693 1694
{
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
1695
		return 0;
D
David S. Miller 已提交
1696

1697
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1698
		return 0;
D
David S. Miller 已提交
1699 1700

	iph = ip_hdr(skb);
1701
	th = tcp_hdr(skb);
D
David S. Miller 已提交
1702 1703

	if (th->doff < sizeof(struct tcphdr) / 4)
1704
		return 0;
D
David S. Miller 已提交
1705

1706
	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
D
David S. Miller 已提交
1707
				       iph->saddr, th->source,
1708
				       iph->daddr, ntohs(th->dest),
1709
				       skb->skb_iif, inet_sdif(skb));
D
David S. Miller 已提交
1710 1711 1712
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
1713
		if (sk_fullsock(sk)) {
1714
			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
E
Eric Dumazet 已提交
1715

D
David S. Miller 已提交
1716 1717
			if (dst)
				dst = dst_check(dst, 0);
1718
			if (dst &&
E
Eric Dumazet 已提交
1719
			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1720
				skb_dst_set_noref(skb, dst);
D
David S. Miller 已提交
1721 1722
		}
	}
1723
	return 0;
D
David S. Miller 已提交
1724 1725
}

E
Eric Dumazet 已提交
1726 1727
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
1728
	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1729 1730 1731 1732 1733 1734 1735 1736
	struct skb_shared_info *shinfo;
	const struct tcphdr *th;
	struct tcphdr *thtail;
	struct sk_buff *tail;
	unsigned int hdrlen;
	bool fragstolen;
	u32 gso_segs;
	int delta;
E
Eric Dumazet 已提交
1737 1738 1739 1740 1741 1742 1743

	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
	 * we can fix skb->truesize to its real value to avoid future drops.
	 * This is valid because skb is not yet charged to the socket.
	 * It has been noticed pure SACK packets were sometimes dropped
	 * (if cooked by drivers without copybreak feature).
	 */
1744
	skb_condense(skb);
E
Eric Dumazet 已提交
1745

E
Eric Dumazet 已提交
1746 1747
	skb_dst_drop(skb);

1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	if (unlikely(tcp_checksum_complete(skb))) {
		bh_unlock_sock(sk);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
		return true;
	}

	/* Attempt coalescing to last skb in backlog, even if we are
	 * above the limits.
	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
	 */
	th = (const struct tcphdr *)skb->data;
	hdrlen = th->doff * 4;
	shinfo = skb_shinfo(skb);

	if (!shinfo->gso_size)
		shinfo->gso_size = skb->len - hdrlen;

	if (!shinfo->gso_segs)
		shinfo->gso_segs = 1;

	tail = sk->sk_backlog.tail;
	if (!tail)
		goto no_coalesce;
	thtail = (struct tcphdr *)tail->data;

	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
	    ((TCP_SKB_CB(tail)->tcp_flags |
1777 1778 1779
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
	    !((TCP_SKB_CB(tail)->tcp_flags &
	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
	    ((TCP_SKB_CB(tail)->tcp_flags ^
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
	    tail->decrypted != skb->decrypted ||
#endif
	    thtail->doff != th->doff ||
	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
		goto no_coalesce;

	__skb_pull(skb, hdrlen);
	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
		thtail->window = th->window;

		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;

		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;

1798 1799 1800 1801 1802 1803 1804 1805 1806
		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
		 * thtail->fin, so that the fast path in tcp_rcv_established()
		 * is not entered if we append a packet with a FIN.
		 * SYN, RST, URG are not present.
		 * ACK is set on both packets.
		 * PSH : we do not really care in TCP stack,
		 *       at least for 'GRO' packets.
		 */
		thtail->fin |= th->fin;
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;

		if (TCP_SKB_CB(skb)->has_rxtstamp) {
			TCP_SKB_CB(tail)->has_rxtstamp = true;
			tail->tstamp = skb->tstamp;
			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
		}

		/* Not as strict as GRO. We only need to carry mss max value */
		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
						 skb_shinfo(tail)->gso_size);

		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);

		sk->sk_backlog.len += delta;
		__NET_INC_STATS(sock_net(sk),
				LINUX_MIB_TCPBACKLOGCOALESCE);
		kfree_skb_partial(skb, fragstolen);
		return false;
	}
	__skb_push(skb, hdrlen);

no_coalesce:
	/* Only socket owner can try to collapse/prune rx queues
	 * to reduce memory overhead, so add a little headroom here.
	 * Few sockets backlog are possibly concurrently non empty.
	 */
	limit += 64*1024;

E
Eric Dumazet 已提交
1837 1838 1839 1840 1841 1842 1843 1844 1845
	if (unlikely(sk_add_backlog(sk, skb, limit))) {
		bh_unlock_sock(sk);
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
		return true;
	}
	return false;
}
EXPORT_SYMBOL(tcp_add_backlog);

1846 1847 1848 1849
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
	struct tcphdr *th = (struct tcphdr *)skb->data;

1850
	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1851 1852 1853
}
EXPORT_SYMBOL(tcp_filter);

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
static void tcp_v4_restore_cb(struct sk_buff *skb)
{
	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
		sizeof(struct inet_skb_parm));
}

static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
			   const struct tcphdr *th)
{
	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
	 * barrier() makes sure compiler wont play fool^Waliasing games.
	 */
	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
		sizeof(struct inet_skb_parm));
	barrier();

	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
	TCP_SKB_CB(skb)->sacked	 = 0;
	TCP_SKB_CB(skb)->has_rxtstamp =
			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
}

L
Linus Torvalds 已提交
1882 1883 1884 1885 1886 1887
/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1888
	struct net *net = dev_net(skb->dev);
E
Eric Dumazet 已提交
1889
	struct sk_buff *skb_to_free;
1890
	int sdif = inet_sdif(skb);
1891
	int dif = inet_iif(skb);
1892
	const struct iphdr *iph;
1893
	const struct tcphdr *th;
1894
	bool refcounted;
L
Linus Torvalds 已提交
1895 1896 1897 1898 1899 1900 1901
	struct sock *sk;
	int ret;

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
E
Eric Dumazet 已提交
1902
	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1903 1904 1905 1906

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1907
	th = (const struct tcphdr *)skb->data;
L
Linus Torvalds 已提交
1908

1909
	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
L
Linus Torvalds 已提交
1910 1911 1912 1913 1914 1915
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1916
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1917
	 * So, we defer the checks. */
1918 1919

	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1920
		goto csum_error;
L
Linus Torvalds 已提交
1921

1922
	th = (const struct tcphdr *)skb->data;
1923
	iph = ip_hdr(skb);
1924
lookup:
1925
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1926
			       th->dest, sdif, &refcounted);
L
Linus Torvalds 已提交
1927 1928 1929
	if (!sk)
		goto no_tcp_socket;

E
Eric Dumazet 已提交
1930 1931 1932 1933
process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

1934 1935
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		struct request_sock *req = inet_reqsk(sk);
1936
		bool req_stolen = false;
1937
		struct sock *nsk;
1938 1939

		sk = req->rsk_listener;
1940
		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1941
			sk_drops_add(sk, skb);
1942 1943 1944
			reqsk_put(req);
			goto discard_it;
		}
1945 1946 1947 1948
		if (tcp_checksum_complete(skb)) {
			reqsk_put(req);
			goto csum_error;
		}
1949
		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1950
			inet_csk_reqsk_queue_drop_and_put(sk, req);
1951 1952
			goto lookup;
		}
1953 1954 1955
		/* We own a reference on the listener, increase it again
		 * as we might lose it too soon.
		 */
1956
		sock_hold(sk);
1957
		refcounted = true;
E
Eric Dumazet 已提交
1958
		nsk = NULL;
1959 1960 1961 1962
		if (!tcp_filter(sk, skb)) {
			th = (const struct tcphdr *)skb->data;
			iph = ip_hdr(skb);
			tcp_v4_fill_cb(skb, iph, th);
1963
			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1964
		}
1965 1966
		if (!nsk) {
			reqsk_put(req);
1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
			if (req_stolen) {
				/* Another cpu got exclusive access to req
				 * and created a full blown socket.
				 * Try to feed this packet to this socket
				 * instead of discarding it.
				 */
				tcp_v4_restore_cb(skb);
				sock_put(sk);
				goto lookup;
			}
1977
			goto discard_and_relse;
1978 1979 1980
		}
		if (nsk == sk) {
			reqsk_put(req);
1981
			tcp_v4_restore_cb(skb);
1982 1983
		} else if (tcp_child_process(sk, nsk, skb)) {
			tcp_v4_send_reset(nsk, skb);
1984
			goto discard_and_relse;
1985
		} else {
1986
			sock_put(sk);
1987 1988 1989
			return 0;
		}
	}
1990
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1991
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1992
		goto discard_and_relse;
1993
	}
1994

L
Linus Torvalds 已提交
1995 1996
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
1997

1998
	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
1999 2000
		goto discard_and_relse;

2001
	nf_reset_ct(skb);
L
Linus Torvalds 已提交
2002

2003
	if (tcp_filter(sk, skb))
L
Linus Torvalds 已提交
2004
		goto discard_and_relse;
2005 2006
	th = (const struct tcphdr *)skb->data;
	iph = ip_hdr(skb);
2007
	tcp_v4_fill_cb(skb, iph, th);
L
Linus Torvalds 已提交
2008 2009 2010

	skb->dev = NULL;

2011 2012 2013 2014 2015 2016 2017
	if (sk->sk_state == TCP_LISTEN) {
		ret = tcp_v4_do_rcv(sk, skb);
		goto put_and_return;
	}

	sk_incoming_cpu_update(sk);

2018
	bh_lock_sock_nested(sk);
2019
	tcp_segs_in(tcp_sk(sk), skb);
L
Linus Torvalds 已提交
2020 2021
	ret = 0;
	if (!sock_owned_by_user(sk)) {
E
Eric Dumazet 已提交
2022 2023
		skb_to_free = sk->sk_rx_skb_cache;
		sk->sk_rx_skb_cache = NULL;
F
Florian Westphal 已提交
2024
		ret = tcp_v4_do_rcv(sk, skb);
E
Eric Dumazet 已提交
2025 2026 2027 2028
	} else {
		if (tcp_add_backlog(sk, skb))
			goto discard_and_relse;
		skb_to_free = NULL;
Z
Zhu Yi 已提交
2029
	}
L
Linus Torvalds 已提交
2030
	bh_unlock_sock(sk);
E
Eric Dumazet 已提交
2031 2032
	if (skb_to_free)
		__kfree_skb(skb_to_free);
L
Linus Torvalds 已提交
2033

2034
put_and_return:
2035 2036
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2037 2038 2039 2040 2041 2042 2043

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

2044 2045
	tcp_v4_fill_cb(skb, iph, th);

E
Eric Dumazet 已提交
2046
	if (tcp_checksum_complete(skb)) {
2047
csum_error:
E
Eric Dumazet 已提交
2048
		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
2049
bad_packet:
E
Eric Dumazet 已提交
2050
		__TCP_INC_STATS(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
2051
	} else {
2052
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
2053 2054 2055 2056 2057
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
2058
	return 0;
L
Linus Torvalds 已提交
2059 2060

discard_and_relse:
2061
	sk_drops_add(sk, skb);
2062 2063
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2064 2065 2066 2067
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2068
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2069 2070 2071
		goto discard_it;
	}

2072 2073
	tcp_v4_fill_cb(skb, iph, th);

2074 2075 2076
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
2077
	}
2078
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
2079
	case TCP_TW_SYN: {
2080
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2081 2082
							&tcp_hashinfo, skb,
							__tcp_hdrlen(th),
2083
							iph->saddr, th->source,
2084
							iph->daddr, th->dest,
2085 2086
							inet_iif(skb),
							sdif);
L
Linus Torvalds 已提交
2087
		if (sk2) {
2088
			inet_twsk_deschedule_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2089
			sk = sk2;
2090
			tcp_v4_restore_cb(skb);
2091
			refcounted = false;
L
Linus Torvalds 已提交
2092 2093 2094
			goto process;
		}
	}
2095
		/* to ACK */
J
Joe Perches 已提交
2096
		fallthrough;
L
Linus Torvalds 已提交
2097 2098 2099 2100
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
2101 2102 2103
		tcp_v4_send_reset(sk, skb);
		inet_twsk_deschedule_put(inet_twsk(sk));
		goto discard_it;
L
Linus Torvalds 已提交
2104 2105 2106 2107 2108
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

2109 2110 2111 2112 2113
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
	.twsk_destructor= tcp_twsk_destructor,
};
L
Linus Torvalds 已提交
2114

2115
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
E
Eric Dumazet 已提交
2116 2117 2118
{
	struct dst_entry *dst = skb_dst(skb);

E
Eric Dumazet 已提交
2119
	if (dst && dst_hold_safe(dst)) {
2120 2121 2122
		sk->sk_rx_dst = dst;
		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
	}
E
Eric Dumazet 已提交
2123
}
2124
EXPORT_SYMBOL(inet_sk_rx_dst_set);
E
Eric Dumazet 已提交
2125

2126
const struct inet_connection_sock_af_ops ipv4_specific = {
2127 2128 2129
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
E
Eric Dumazet 已提交
2130
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2131 2132 2133 2134 2135 2136 2137
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
2138
	.mtu_reduced	   = tcp_v4_mtu_reduced,
L
Linus Torvalds 已提交
2139
};
E
Eric Dumazet 已提交
2140
EXPORT_SYMBOL(ipv4_specific);
L
Linus Torvalds 已提交
2141

2142
#ifdef CONFIG_TCP_MD5SIG
2143
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2144
	.md5_lookup		= tcp_v4_md5_lookup,
2145
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2146 2147
	.md5_parse		= tcp_v4_parse_md5_keys,
};
2148
#endif
2149

L
Linus Torvalds 已提交
2150 2151 2152 2153 2154
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
2155
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
2156

2157
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
2158

2159
	icsk->icsk_af_ops = &ipv4_specific;
2160

2161
#ifdef CONFIG_TCP_MD5SIG
2162
	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2163
#endif
L
Linus Torvalds 已提交
2164 2165 2166 2167

	return 0;
}

2168
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
2169 2170 2171
{
	struct tcp_sock *tp = tcp_sk(sk);

2172 2173
	trace_tcp_destroy_sock(sk);

L
Linus Torvalds 已提交
2174 2175
	tcp_clear_xmit_timers(sk);

2176
	tcp_cleanup_congestion_control(sk);
2177

D
Dave Watson 已提交
2178 2179
	tcp_cleanup_ulp(sk);

L
Linus Torvalds 已提交
2180
	/* Cleanup up the write buffer. */
2181
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
2182

2183 2184 2185
	/* Check if we want to disable active TFO */
	tcp_fastopen_active_disable_ofo_check(sk);

L
Linus Torvalds 已提交
2186
	/* Cleans up our, hopefully empty, out_of_order_queue. */
2187
	skb_rbtree_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
2188

2189 2190 2191
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
E
Eric Dumazet 已提交
2192
		tcp_clear_md5_list(sk);
2193
		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2194 2195 2196
		tp->md5sig_info = NULL;
	}
#endif
C
Chris Leech 已提交
2197

L
Linus Torvalds 已提交
2198
	/* Clean up a referenced TCP bind bucket. */
2199
	if (inet_csk(sk)->icsk_bind_hash)
2200
		inet_put_port(sk);
L
Linus Torvalds 已提交
2201

2202
	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2203

2204 2205
	/* If socket is aborted during connect operation */
	tcp_free_fastopen_req(tp);
2206
	tcp_fastopen_destroy_cipher(sk);
2207
	tcp_saved_syn_free(tp);
2208

2209
	sk_sockets_allocated_dec(sk);
L
Linus Torvalds 已提交
2210 2211 2212 2213 2214 2215
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

2216 2217 2218 2219 2220
/*
 * Get next listener socket follow cur.  If cur is NULL, get first socket
 * starting from bucket given in st->bucket; when st->bucket is zero the
 * very first socket in the hash table is returned.
 */
L
Linus Torvalds 已提交
2221 2222
static void *listening_get_next(struct seq_file *seq, void *cur)
{
2223
	struct tcp_seq_afinfo *afinfo;
J
Jianjun Kong 已提交
2224
	struct tcp_iter_state *st = seq->private;
2225
	struct net *net = seq_file_net(seq);
2226
	struct inet_listen_hashbucket *ilb;
2227
	struct hlist_nulls_node *node;
2228
	struct sock *sk = cur;
L
Linus Torvalds 已提交
2229

2230 2231 2232 2233 2234
	if (st->bpf_seq_afinfo)
		afinfo = st->bpf_seq_afinfo;
	else
		afinfo = PDE_DATA(file_inode(seq->file));

L
Linus Torvalds 已提交
2235
	if (!sk) {
2236
get_head:
2237
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2238
		spin_lock(&ilb->lock);
2239
		sk = sk_nulls_head(&ilb->nulls_head);
2240
		st->offset = 0;
L
Linus Torvalds 已提交
2241 2242
		goto get_sk;
	}
2243
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
2244
	++st->num;
2245
	++st->offset;
L
Linus Torvalds 已提交
2246

2247
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2248
get_sk:
2249
	sk_nulls_for_each_from(sk, node) {
2250 2251
		if (!net_eq(sock_net(sk), net))
			continue;
2252 2253
		if (afinfo->family == AF_UNSPEC ||
		    sk->sk_family == afinfo->family)
2254
			return sk;
L
Linus Torvalds 已提交
2255
	}
2256
	spin_unlock(&ilb->lock);
2257
	st->offset = 0;
2258 2259 2260
	if (++st->bucket < INET_LHTABLE_SIZE)
		goto get_head;
	return NULL;
L
Linus Torvalds 已提交
2261 2262 2263 2264
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
2265 2266 2267 2268 2269 2270
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	st->offset = 0;
	rc = listening_get_next(seq, NULL);
L
Linus Torvalds 已提交
2271 2272 2273 2274 2275 2276 2277 2278

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

E
Eric Dumazet 已提交
2279
static inline bool empty_bucket(const struct tcp_iter_state *st)
2280
{
E
Eric Dumazet 已提交
2281
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2282 2283
}

2284 2285 2286 2287
/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
L
Linus Torvalds 已提交
2288 2289
static void *established_get_first(struct seq_file *seq)
{
2290
	struct tcp_seq_afinfo *afinfo;
J
Jianjun Kong 已提交
2291
	struct tcp_iter_state *st = seq->private;
2292
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2293 2294
	void *rc = NULL;

2295 2296 2297 2298 2299
	if (st->bpf_seq_afinfo)
		afinfo = st->bpf_seq_afinfo;
	else
		afinfo = PDE_DATA(file_inode(seq->file));

2300 2301
	st->offset = 0;
	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2302
		struct sock *sk;
2303
		struct hlist_nulls_node *node;
2304
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2305

2306 2307 2308 2309
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2310
		spin_lock_bh(lock);
2311
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2312 2313
			if ((afinfo->family != AF_UNSPEC &&
			     sk->sk_family != afinfo->family) ||
2314
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2315 2316 2317 2318 2319
				continue;
			}
			rc = sk;
			goto out;
		}
2320
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2321 2322 2323 2324 2325 2326 2327
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
2328
	struct tcp_seq_afinfo *afinfo;
L
Linus Torvalds 已提交
2329
	struct sock *sk = cur;
2330
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2331
	struct tcp_iter_state *st = seq->private;
2332
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2333

2334 2335 2336 2337 2338
	if (st->bpf_seq_afinfo)
		afinfo = st->bpf_seq_afinfo;
	else
		afinfo = PDE_DATA(file_inode(seq->file));

L
Linus Torvalds 已提交
2339
	++st->num;
2340
	++st->offset;
L
Linus Torvalds 已提交
2341

E
Eric Dumazet 已提交
2342
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2343

2344
	sk_nulls_for_each_from(sk, node) {
2345 2346
		if ((afinfo->family == AF_UNSPEC ||
		     sk->sk_family == afinfo->family) &&
2347
		    net_eq(sock_net(sk), net))
E
Eric Dumazet 已提交
2348
			return sk;
L
Linus Torvalds 已提交
2349 2350
	}

E
Eric Dumazet 已提交
2351 2352 2353
	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
	++st->bucket;
	return established_get_first(seq);
L
Linus Torvalds 已提交
2354 2355 2356 2357
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
2358 2359 2360 2361 2362
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	rc = established_get_first(seq);
L
Linus Torvalds 已提交
2363 2364 2365 2366

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2367
	}
L
Linus Torvalds 已提交
2368 2369 2370 2371 2372 2373
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2374
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
static void *tcp_seek_last_pos(struct seq_file *seq)
{
	struct tcp_iter_state *st = seq->private;
	int offset = st->offset;
	int orig_num = st->num;
	void *rc = NULL;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (st->bucket >= INET_LHTABLE_SIZE)
			break;
		st->state = TCP_SEQ_STATE_LISTENING;
		rc = listening_get_next(seq, NULL);
		while (offset-- && rc)
			rc = listening_get_next(seq, rc);
		if (rc)
			break;
		st->bucket = 0;
E
Eric Dumazet 已提交
2405
		st->state = TCP_SEQ_STATE_ESTABLISHED;
J
Joe Perches 已提交
2406
		fallthrough;
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
	case TCP_SEQ_STATE_ESTABLISHED:
		if (st->bucket > tcp_hashinfo.ehash_mask)
			break;
		rc = established_get_first(seq);
		while (offset-- && rc)
			rc = established_get_next(seq, rc);
	}

	st->num = orig_num;

	return rc;
}

2420
void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2421
{
J
Jianjun Kong 已提交
2422
	struct tcp_iter_state *st = seq->private;
2423 2424 2425 2426 2427 2428 2429 2430
	void *rc;

	if (*pos && *pos == st->last_pos) {
		rc = tcp_seek_last_pos(seq);
		if (rc)
			goto out;
	}

L
Linus Torvalds 已提交
2431 2432
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
2433 2434 2435 2436 2437 2438 2439
	st->bucket = 0;
	st->offset = 0;
	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;

out:
	st->last_pos = *pos;
	return rc;
L
Linus Torvalds 已提交
2440
}
2441
EXPORT_SYMBOL(tcp_seq_start);
L
Linus Torvalds 已提交
2442

2443
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2444
{
2445
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
	void *rc = NULL;

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
2458 2459
			st->bucket = 0;
			st->offset = 0;
L
Linus Torvalds 已提交
2460 2461 2462 2463 2464 2465 2466 2467 2468
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
2469
	st->last_pos = *pos;
L
Linus Torvalds 已提交
2470 2471
	return rc;
}
2472
EXPORT_SYMBOL(tcp_seq_next);
L
Linus Torvalds 已提交
2473

2474
void tcp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2475
{
J
Jianjun Kong 已提交
2476
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2477 2478 2479 2480

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2481
			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2482 2483 2484
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2485
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2486 2487 2488
		break;
	}
}
2489
EXPORT_SYMBOL(tcp_seq_stop);
L
Linus Torvalds 已提交
2490

2491
static void get_openreq4(const struct request_sock *req,
E
Eric Dumazet 已提交
2492
			 struct seq_file *f, int i)
L
Linus Torvalds 已提交
2493
{
2494
	const struct inet_request_sock *ireq = inet_rsk(req);
2495
	long delta = req->rsk_timer.expires - jiffies;
L
Linus Torvalds 已提交
2496

2497
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2498
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
L
Linus Torvalds 已提交
2499
		i,
2500
		ireq->ir_loc_addr,
2501
		ireq->ir_num,
2502 2503
		ireq->ir_rmt_addr,
		ntohs(ireq->ir_rmt_port),
L
Linus Torvalds 已提交
2504 2505 2506
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
2507
		jiffies_delta_to_clock_t(delta),
2508
		req->num_timeout,
E
Eric Dumazet 已提交
2509 2510
		from_kuid_munged(seq_user_ns(f),
				 sock_i_uid(req->rsk_listener)),
L
Linus Torvalds 已提交
2511 2512
		0,  /* non standard timer */
		0, /* open_requests have no inode */
2513
		0,
2514
		req);
L
Linus Torvalds 已提交
2515 2516
}

2517
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
L
Linus Torvalds 已提交
2518 2519 2520
{
	int timer_active;
	unsigned long timer_expires;
2521
	const struct tcp_sock *tp = tcp_sk(sk);
2522
	const struct inet_connection_sock *icsk = inet_csk(sk);
2523
	const struct inet_sock *inet = inet_sk(sk);
2524
	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
E
Eric Dumazet 已提交
2525 2526 2527 2528
	__be32 dest = inet->inet_daddr;
	__be32 src = inet->inet_rcv_saddr;
	__u16 destp = ntohs(inet->inet_dport);
	__u16 srcp = ntohs(inet->inet_sport);
2529
	int rx_queue;
2530
	int state;
L
Linus Torvalds 已提交
2531

N
Nandita Dukkipati 已提交
2532
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2533
	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
N
Nandita Dukkipati 已提交
2534
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2535
		timer_active	= 1;
2536 2537
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2538
		timer_active	= 4;
2539
		timer_expires	= icsk->icsk_timeout;
2540
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2541
		timer_active	= 2;
2542
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2543 2544 2545 2546 2547
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2548
	state = inet_sk_state_load(sk);
2549
	if (state == TCP_LISTEN)
2550
		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2551
	else
2552 2553
		/* Because we don't lock the socket,
		 * we might find a transient negative value.
2554
		 */
2555
		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2556
				      READ_ONCE(tp->copied_seq), 0);
2557

2558
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2559
			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2560
		i, src, srcp, dest, destp, state,
2561
		READ_ONCE(tp->write_seq) - tp->snd_una,
2562
		rx_queue,
L
Linus Torvalds 已提交
2563
		timer_active,
2564
		jiffies_delta_to_clock_t(timer_expires - jiffies),
2565
		icsk->icsk_retransmits,
2566
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2567
		icsk->icsk_probes_out,
2568
		sock_i_ino(sk),
2569
		refcount_read(&sk->sk_refcnt), sk,
2570 2571
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
W
Wei Wang 已提交
2572
		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
L
Linus Torvalds 已提交
2573
		tp->snd_cwnd,
2574 2575
		state == TCP_LISTEN ?
		    fastopenq->max_qlen :
2576
		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
L
Linus Torvalds 已提交
2577 2578
}

2579
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2580
			       struct seq_file *f, int i)
L
Linus Torvalds 已提交
2581
{
2582
	long delta = tw->tw_timer.expires - jiffies;
2583
	__be32 dest, src;
L
Linus Torvalds 已提交
2584 2585 2586 2587 2588 2589 2590
	__u16 destp, srcp;

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2591
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2592
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
L
Linus Torvalds 已提交
2593
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2594
		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2595
		refcount_read(&tw->tw_refcnt), tw);
L
Linus Torvalds 已提交
2596 2597 2598 2599 2600 2601
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2602
	struct tcp_iter_state *st;
E
Eric Dumazet 已提交
2603
	struct sock *sk = v;
L
Linus Torvalds 已提交
2604

2605
	seq_setwidth(seq, TMPSZ - 1);
L
Linus Torvalds 已提交
2606
	if (v == SEQ_START_TOKEN) {
2607
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
L
Linus Torvalds 已提交
2608 2609 2610 2611 2612 2613
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

2614 2615 2616
	if (sk->sk_state == TCP_TIME_WAIT)
		get_timewait4_sock(v, seq, st->num);
	else if (sk->sk_state == TCP_NEW_SYN_RECV)
E
Eric Dumazet 已提交
2617
		get_openreq4(v, seq, st->num);
2618 2619
	else
		get_tcp4_sock(v, seq, st->num);
L
Linus Torvalds 已提交
2620
out:
2621
	seq_pad(seq, '\n');
L
Linus Torvalds 已提交
2622 2623 2624
	return 0;
}

2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
#ifdef CONFIG_BPF_SYSCALL
struct bpf_iter__tcp {
	__bpf_md_ptr(struct bpf_iter_meta *, meta);
	__bpf_md_ptr(struct sock_common *, sk_common);
	uid_t uid __aligned(8);
};

static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
			     struct sock_common *sk_common, uid_t uid)
{
	struct bpf_iter__tcp ctx;

	meta->seq_num--;  /* skip SEQ_START_TOKEN */
	ctx.meta = meta;
	ctx.sk_common = sk_common;
	ctx.uid = uid;
	return bpf_iter_run_prog(prog, &ctx);
}

static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
{
	struct bpf_iter_meta meta;
	struct bpf_prog *prog;
	struct sock *sk = v;
	uid_t uid;

	if (v == SEQ_START_TOKEN)
		return 0;

	if (sk->sk_state == TCP_TIME_WAIT) {
		uid = 0;
	} else if (sk->sk_state == TCP_NEW_SYN_RECV) {
		const struct request_sock *req = v;

		uid = from_kuid_munged(seq_user_ns(seq),
				       sock_i_uid(req->rsk_listener));
	} else {
		uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
	}

	meta.seq = seq;
	prog = bpf_iter_get_info(&meta, false);
	return tcp_prog_seq_show(prog, &meta, v, uid);
}

static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
{
	struct bpf_iter_meta meta;
	struct bpf_prog *prog;

	if (!v) {
		meta.seq = seq;
		prog = bpf_iter_get_info(&meta, true);
		if (prog)
			(void)tcp_prog_seq_show(prog, &meta, v, 0);
	}

	tcp_seq_stop(seq, v);
}

static const struct seq_operations bpf_iter_tcp_seq_ops = {
	.show		= bpf_iter_tcp_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= bpf_iter_tcp_seq_stop,
};
#endif

2693 2694 2695 2696 2697 2698 2699
static const struct seq_operations tcp4_seq_ops = {
	.show		= tcp4_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= tcp_seq_stop,
};

L
Linus Torvalds 已提交
2700 2701 2702 2703
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.family		= AF_INET,
};

2704
static int __net_init tcp4_proc_init_net(struct net *net)
2705
{
2706 2707
	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2708 2709
		return -ENOMEM;
	return 0;
2710 2711
}

2712
static void __net_exit tcp4_proc_exit_net(struct net *net)
2713
{
2714
	remove_proc_entry("tcp", net->proc_net);
2715 2716 2717 2718 2719 2720 2721
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2722 2723
int __init tcp4_proc_init(void)
{
2724
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2725 2726 2727 2728
}

void tcp4_proc_exit(void)
{
2729
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2730 2731 2732 2733 2734 2735 2736
}
#endif /* CONFIG_PROC_FS */

struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
A
Andrey Ignatov 已提交
2737
	.pre_connect		= tcp_v4_pre_connect,
L
Linus Torvalds 已提交
2738 2739
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2740
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2741 2742 2743 2744 2745 2746
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
2747
	.keepalive		= tcp_set_keepalive,
L
Linus Torvalds 已提交
2748
	.recvmsg		= tcp_recvmsg,
2749 2750
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2751
	.backlog_rcv		= tcp_v4_do_rcv,
E
Eric Dumazet 已提交
2752
	.release_cb		= tcp_release_cb,
2753 2754 2755
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2756
	.enter_memory_pressure	= tcp_enter_memory_pressure,
2757
	.leave_memory_pressure	= tcp_leave_memory_pressure,
2758
	.stream_memory_free	= tcp_stream_memory_free,
L
Linus Torvalds 已提交
2759
	.sockets_allocated	= &tcp_sockets_allocated,
2760
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2761 2762
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2763
	.sysctl_mem		= sysctl_tcp_mem,
2764 2765
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
L
Linus Torvalds 已提交
2766 2767
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2768
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2769
	.twsk_prot		= &tcp_timewait_sock_ops,
2770
	.rsk_prot		= &tcp_request_sock_ops,
2771
	.h.hashinfo		= &tcp_hashinfo,
2772
	.no_autobind		= true,
2773
	.diag_destroy		= tcp_abort,
L
Linus Torvalds 已提交
2774
};
E
Eric Dumazet 已提交
2775
EXPORT_SYMBOL(tcp_prot);
L
Linus Torvalds 已提交
2776

2777 2778 2779 2780
static void __net_exit tcp_sk_exit(struct net *net)
{
	int cpu;

2781
	if (net->ipv4.tcp_congestion_control)
2782 2783
		bpf_module_put(net->ipv4.tcp_congestion_control,
			       net->ipv4.tcp_congestion_control->owner);
2784

2785 2786 2787 2788 2789
	for_each_possible_cpu(cpu)
		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
	free_percpu(net->ipv4.tcp_sk);
}

2790 2791
static int __net_init tcp_sk_init(struct net *net)
{
2792
	int res, cpu, cnt;
2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804

	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
	if (!net->ipv4.tcp_sk)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		struct sock *sk;

		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
					   IPPROTO_TCP, net);
		if (res)
			goto fail;
2805
		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2806 2807 2808 2809 2810 2811

		/* Please enforce IP_DF and IPID==0 for RST and
		 * ACK sent in SYN-RECV and TIME-WAIT state.
		 */
		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;

2812 2813
		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
	}
2814

2815
	net->ipv4.sysctl_tcp_ecn = 2;
2816 2817
	net->ipv4.sysctl_tcp_ecn_fallback = 1;

F
Fan Du 已提交
2818
	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
E
Eric Dumazet 已提交
2819
	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2820
	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2821
	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2822
	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2823

2824
	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2825
	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2826
	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2827

2828
	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2829
	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2830
	net->ipv4.sysctl_tcp_syncookies = 1;
2831
	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2832
	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2833
	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2834
	net->ipv4.sysctl_tcp_orphan_retries = 0;
2835
	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2836
	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2837
	net->ipv4.sysctl_tcp_tw_reuse = 2;
2838
	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2839

2840
	cnt = tcp_hashinfo.ehash_mask + 1;
2841
	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2842 2843
	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;

2844
	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
E
Eric Dumazet 已提交
2845
	net->ipv4.sysctl_tcp_sack = 1;
2846
	net->ipv4.sysctl_tcp_window_scaling = 1;
2847
	net->ipv4.sysctl_tcp_timestamps = 1;
2848
	net->ipv4.sysctl_tcp_early_retrans = 3;
2849
	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2850
	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2851
	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2852
	net->ipv4.sysctl_tcp_max_reordering = 300;
E
Eric Dumazet 已提交
2853
	net->ipv4.sysctl_tcp_dsack = 1;
2854
	net->ipv4.sysctl_tcp_app_win = 31;
2855
	net->ipv4.sysctl_tcp_adv_win_scale = 1;
E
Eric Dumazet 已提交
2856
	net->ipv4.sysctl_tcp_frto = 2;
2857
	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2858 2859 2860 2861 2862
	/* This limits the percentage of the congestion window which we
	 * will allow a single TSO frame to consume.  Building TSO frames
	 * which are too large can cause TCP streams to be bursty.
	 */
	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2863 2864
	/* Default TSQ limit of 16 TSO segments */
	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2865 2866
	/* rfc5961 challenge ack rate limiting */
	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2867
	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2868
	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2869
	net->ipv4.sysctl_tcp_autocorking = 1;
2870
	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2871
	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2872
	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2873 2874 2875 2876 2877 2878 2879 2880
	if (net != &init_net) {
		memcpy(net->ipv4.sysctl_tcp_rmem,
		       init_net.ipv4.sysctl_tcp_rmem,
		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
		memcpy(net->ipv4.sysctl_tcp_wmem,
		       init_net.ipv4.sysctl_tcp_wmem,
		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
	}
2881
	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2882
	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
E
Eric Dumazet 已提交
2883
	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2884
	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2885
	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2886 2887
	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2888

2889 2890
	/* Reno is always built in */
	if (!net_eq(net, &init_net) &&
2891 2892
	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
			       init_net.ipv4.tcp_congestion_control->owner))
2893 2894 2895 2896
		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
	else
		net->ipv4.tcp_congestion_control = &tcp_reno;

2897
	return 0;
2898 2899 2900 2901
fail:
	tcp_sk_exit(net);

	return res;
E
Eric W. Biederman 已提交
2902 2903 2904 2905
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
2906 2907
	struct net *net;

2908
	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2909 2910 2911

	list_for_each_entry(net, net_exit_list, exit_list)
		tcp_fastopen_ctx_destroy(net);
2912 2913 2914
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
E
Eric W. Biederman 已提交
2915 2916 2917
       .init	   = tcp_sk_init,
       .exit	   = tcp_sk_exit,
       .exit_batch = tcp_sk_exit_batch,
2918 2919
};

2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
		     struct sock_common *sk_common, uid_t uid)

static int bpf_iter_init_tcp(void *priv_data)
{
	struct tcp_iter_state *st = priv_data;
	struct tcp_seq_afinfo *afinfo;
	int ret;

	afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
	if (!afinfo)
		return -ENOMEM;

	afinfo->family = AF_UNSPEC;
	st->bpf_seq_afinfo = afinfo;
	ret = bpf_iter_init_seq_net(priv_data);
	if (ret)
		kfree(afinfo);
	return ret;
}

static void bpf_iter_fini_tcp(void *priv_data)
{
	struct tcp_iter_state *st = priv_data;

	kfree(st->bpf_seq_afinfo);
	bpf_iter_fini_seq_net(priv_data);
}

2950
static struct bpf_iter_reg tcp_reg_info = {
2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
	.target			= "tcp",
	.seq_ops		= &bpf_iter_tcp_seq_ops,
	.init_seq_private	= bpf_iter_init_tcp,
	.fini_seq_private	= bpf_iter_fini_tcp,
	.seq_priv_size		= sizeof(struct tcp_iter_state),
	.ctx_arg_info_size	= 1,
	.ctx_arg_info		= {
		{ offsetof(struct bpf_iter__tcp, sk_common),
		  PTR_TO_BTF_ID_OR_NULL },
	},
};

static void __init bpf_iter_register(void)
{
2965
	tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
2966 2967 2968 2969 2970 2971
	if (bpf_iter_reg_target(&tcp_reg_info))
		pr_warn("Warning: could not register bpf iterator tcp\n");
}

#endif

2972
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2973
{
2974
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2975
		panic("Failed to create the TCP control socket.\n");
2976 2977 2978 2979

#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
	bpf_iter_register();
#endif
L
Linus Torvalds 已提交
2980
}