tcp_ipv4.c 77.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
32
 *					request_sock handling and moved
L
Linus Torvalds 已提交
33 34
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
35
 *					Added new listen semantics.
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

48
#define pr_fmt(fmt) "TCP: " fmt
L
Linus Torvalds 已提交
49

H
Herbert Xu 已提交
50
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
59
#include <linux/slab.h>
L
Linus Torvalds 已提交
60

61
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
62
#include <net/icmp.h>
63
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
64
#include <net/tcp.h>
65
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
66 67
#include <net/ipv6.h>
#include <net/inet_common.h>
68
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
69
#include <net/xfrm.h>
70
#include <net/secure_seq.h>
71
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
72 73 74 75 76 77

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
78
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
79

H
Herbert Xu 已提交
80
#include <crypto/hash.h>
81 82
#include <linux/scatterlist.h>

83 84
#include <trace/events/tcp.h>

85
#ifdef CONFIG_TCP_MD5SIG
E
Eric Dumazet 已提交
86
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
87
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
88 89
#endif

90
struct inet_hashinfo tcp_hashinfo;
E
Eric Dumazet 已提交
91
EXPORT_SYMBOL(tcp_hashinfo);
L
Linus Torvalds 已提交
92

93
static u32 tcp_v4_init_seq(const struct sk_buff *skb)
L
Linus Torvalds 已提交
94
{
95 96 97 98 99 100
	return secure_tcp_seq(ip_hdr(skb)->daddr,
			      ip_hdr(skb)->saddr,
			      tcp_hdr(skb)->dest,
			      tcp_hdr(skb)->source);
}

101
static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
102
{
103
	return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
L
Linus Torvalds 已提交
104 105
}

106 107
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
108
	const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 110
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);
111 112 113 114 115 116 117 118 119 120 121 122 123
	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;

	if (reuse == 2) {
		/* Still does not detect *everything* that goes through
		 * lo, since we require a loopback src or dst address
		 * or direct binding to 'lo' interface.
		 */
		bool loopback = false;
		if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
			loopback = true;
#if IS_ENABLED(CONFIG_IPV6)
		if (tw->tw_family == AF_INET6) {
			if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
125
			    ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
126
			    ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
127 128 129 130 131 132 133 134 135 136 137
				loopback = true;
		} else
#endif
		{
			if (ipv4_is_loopback(tw->tw_daddr) ||
			    ipv4_is_loopback(tw->tw_rcv_saddr))
				loopback = true;
		}
		if (!loopback)
			reuse = 0;
	}
138 139 140 141 142 143 144 145 146 147 148 149 150

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
151 152
	    (!twp || (reuse && time_after32(ktime_get_seconds(),
					    tcptw->tw_ts_recent_stamp)))) {
153 154 155 156 157 158 159 160 161 162 163 164
		/* In case of repair and re-using TIME-WAIT sockets we still
		 * want to be sure that it is safe as above but honor the
		 * sequence numbers and time stamps set as part of the repair
		 * process.
		 *
		 * Without this check re-using a TIME-WAIT socket with TCP
		 * repair would accumulate a -1 on the repair assigned
		 * sequence number. The first time it is reused the sequence
		 * is -1, the second time -2, etc. This fixes that issue
		 * without appearing to create any others.
		 */
		if (likely(!tp->repair)) {
165 166 167 168 169
			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;

			if (!seq)
				seq = 1;
			WRITE_ONCE(tp->write_seq, seq);
170 171 172
			tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
			tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		}
173 174 175 176 177 178 179 180
		sock_hold(sktw);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);

A
Andrey Ignatov 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{
	/* This check is replicated from tcp_v4_connect() and intended to
	 * prevent BPF program called below from accessing bytes that are out
	 * of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	sock_owned_by_me(sk);

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
}

L
Linus Torvalds 已提交
196 197 198
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
199
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
L
Linus Torvalds 已提交
200 201
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
202
	__be16 orig_sport, orig_dport;
203
	__be32 daddr, nexthop;
204
	struct flowi4 *fl4;
205
	struct rtable *rt;
L
Linus Torvalds 已提交
206
	int err;
207
	struct ip_options_rcu *inet_opt;
208
	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
217
	inet_opt = rcu_dereference_protected(inet->inet_opt,
218
					     lockdep_sock_is_held(sk));
219
	if (inet_opt && inet_opt->opt.srr) {
L
Linus Torvalds 已提交
220 221
		if (!daddr)
			return -EINVAL;
222
		nexthop = inet_opt->opt.faddr;
L
Linus Torvalds 已提交
223 224
	}

225 226
	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
227 228
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
229 230
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_TCP,
231
			      orig_sport, orig_dport, sk);
232 233 234
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
235
			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
236
		return err;
237
	}
L
Linus Torvalds 已提交
238 239 240 241 242 243

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

244
	if (!inet_opt || !inet_opt->opt.srr)
245
		daddr = fl4->daddr;
L
Linus Torvalds 已提交
246

E
Eric Dumazet 已提交
247
	if (!inet->inet_saddr)
248
		inet->inet_saddr = fl4->saddr;
249
	sk_rcv_saddr_set(sk, inet->inet_saddr);
L
Linus Torvalds 已提交
250

E
Eric Dumazet 已提交
251
	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
L
Linus Torvalds 已提交
252 253 254
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
P
Pavel Emelyanov 已提交
255
		if (likely(!tp->repair))
256
			WRITE_ONCE(tp->write_seq, 0);
L
Linus Torvalds 已提交
257 258
	}

E
Eric Dumazet 已提交
259
	inet->inet_dport = usin->sin_port;
260
	sk_daddr_set(sk, daddr);
L
Linus Torvalds 已提交
261

262
	inet_csk(sk)->icsk_ext_hdr_len = 0;
263 264
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
L
Linus Torvalds 已提交
265

266
	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
L
Linus Torvalds 已提交
267 268 269 270 271 272 273

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
274
	err = inet_hash_connect(tcp_death_row, sk);
L
Linus Torvalds 已提交
275 276 277
	if (err)
		goto failure;

278
	sk_set_txhash(sk);
279

280
	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
281 282 283 284
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
L
Linus Torvalds 已提交
285
		goto failure;
286
	}
L
Linus Torvalds 已提交
287
	/* OK, now commit destination to socket.  */
288
	sk->sk_gso_type = SKB_GSO_TCPV4;
289
	sk_setup_caps(sk, &rt->dst);
W
Wei Wang 已提交
290
	rt = NULL;
L
Linus Torvalds 已提交
291

292 293
	if (likely(!tp->repair)) {
		if (!tp->write_seq)
294 295 296 297 298
			WRITE_ONCE(tp->write_seq,
				   secure_tcp_seq(inet->inet_saddr,
						  inet->inet_daddr,
						  inet->inet_sport,
						  usin->sin_port));
299 300
		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
						 inet->inet_saddr,
301
						 inet->inet_daddr);
302
	}
L
Linus Torvalds 已提交
303

304
	inet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
305

W
Wei Wang 已提交
306 307 308 309 310
	if (tcp_fastopen_defer_connect(sk, &err))
		return err;
	if (err)
		goto failure;

A
Andrey Vagin 已提交
311
	err = tcp_connect(sk);
P
Pavel Emelyanov 已提交
312

L
Linus Torvalds 已提交
313 314 315 316 317 318
	if (err)
		goto failure;

	return 0;

failure:
319 320 321 322
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
323 324 325
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
E
Eric Dumazet 已提交
326
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
327 328
	return err;
}
E
Eric Dumazet 已提交
329
EXPORT_SYMBOL(tcp_v4_connect);
L
Linus Torvalds 已提交
330 331

/*
332 333 334
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
L
Linus Torvalds 已提交
335
 */
336
void tcp_v4_mtu_reduced(struct sock *sk)
L
Linus Torvalds 已提交
337 338
{
	struct inet_sock *inet = inet_sk(sk);
339 340
	struct dst_entry *dst;
	u32 mtu;
L
Linus Torvalds 已提交
341

342 343 344
	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
		return;
	mtu = tcp_sk(sk)->mtu_info;
345 346
	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355 356 357
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
358
	    ip_sk_accept_pmtu(sk) &&
359
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367 368 369
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}
370
EXPORT_SYMBOL(tcp_v4_mtu_reduced);
L
Linus Torvalds 已提交
371

372 373 374 375
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);

376
	if (dst)
377
		dst->ops->redirect(dst, sk, skb);
378 379
}

380 381

/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
382
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
383 384 385 386 387 388 389 390
{
	struct request_sock *req = inet_reqsk(sk);
	struct net *net = sock_net(sk);

	/* ICMPs are not backlogged, hence we cannot get
	 * an established socket here.
	 */
	if (seq != tcp_rsk(req)->snt_isn) {
391
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
392
	} else if (abort) {
393 394 395 396 397 398
		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
F
Fan Du 已提交
399
		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
400
		tcp_listendrop(req->rsk_listener);
401
	}
402
	reqsk_put(req);
403 404 405
}
EXPORT_SYMBOL(tcp_req_err);

406
/* TCP-LD (RFC 6069) logic */
407
void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	s32 remaining;
	u32 delta_us;

	if (sock_owned_by_user(sk))
		return;

	if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
	    !icsk->icsk_backoff)
		return;

	skb = tcp_rtx_queue_head(sk);
	if (WARN_ON_ONCE(!skb))
		return;

	icsk->icsk_backoff--;
	icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
	icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);

	tcp_mstamp_refresh(tp);
	delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
	remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);

	if (remaining > 0) {
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
					  remaining, TCP_RTO_MAX);
	} else {
		/* RTO revert clocked out retransmission.
		 * Will retransmit now.
		 */
		tcp_retransmit_timer(sk);
	}
}
444
EXPORT_SYMBOL(tcp_ld_RTO_revert);
445

L
Linus Torvalds 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

462
int tcp_v4_err(struct sk_buff *skb, u32 info)
L
Linus Torvalds 已提交
463
{
464 465
	const struct iphdr *iph = (const struct iphdr *)skb->data;
	struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
L
Linus Torvalds 已提交
466 467
	struct tcp_sock *tp;
	struct inet_sock *inet;
468 469
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
L
Linus Torvalds 已提交
470
	struct sock *sk;
471
	struct request_sock *fastopen;
472
	u32 seq, snd_una;
L
Linus Torvalds 已提交
473
	int err;
474
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
475

476 477
	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
				       th->dest, iph->saddr, ntohs(th->source),
478
				       inet_iif(skb), 0);
L
Linus Torvalds 已提交
479
	if (!sk) {
E
Eric Dumazet 已提交
480
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
481
		return -ENOENT;
L
Linus Torvalds 已提交
482 483
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
484
		inet_twsk_put(inet_twsk(sk));
485
		return 0;
L
Linus Torvalds 已提交
486
	}
487
	seq = ntohl(th->seq);
488 489 490 491 492 493 494 495
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
				     type == ICMP_TIME_EXCEEDED ||
				     (type == ICMP_DEST_UNREACH &&
				      (code == ICMP_NET_UNREACH ||
				       code == ICMP_HOST_UNREACH)));
		return 0;
	}
L
Linus Torvalds 已提交
496 497 498 499

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
500 501
	 * We do take care of PMTU discovery (RFC1191) special case :
	 * we can receive locally generated ICMP messages while socket is held.
L
Linus Torvalds 已提交
502
	 */
503 504
	if (sock_owned_by_user(sk)) {
		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
505
			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
506
	}
L
Linus Torvalds 已提交
507 508 509
	if (sk->sk_state == TCP_CLOSE)
		goto out;

510
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
511
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
512 513 514
		goto out;
	}

L
Linus Torvalds 已提交
515
	tp = tcp_sk(sk);
516
	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
517
	fastopen = rcu_dereference(tp->fastopen_rsk);
518
	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
L
Linus Torvalds 已提交
519
	if (sk->sk_state != TCP_LISTEN &&
520
	    !between(seq, snd_una, tp->snd_nxt)) {
521
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
522 523 524 525
		goto out;
	}

	switch (type) {
526
	case ICMP_REDIRECT:
527
		if (!sock_owned_by_user(sk))
528
			do_redirect(skb, sk);
529
		goto out;
L
Linus Torvalds 已提交
530 531 532 533 534 535 536 537 538 539 540
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
541 542 543 544 545 546 547
			/* We are not interested in TCP_LISTEN and open_requests
			 * (SYN-ACKs send out by Linux are always <576bytes so
			 * they should go through unfragmented).
			 */
			if (sk->sk_state == TCP_LISTEN)
				goto out;

548
			tp->mtu_info = info;
549
			if (!sock_owned_by_user(sk)) {
550
				tcp_v4_mtu_reduced(sk);
551
			} else {
552
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
553 554
					sock_hold(sk);
			}
L
Linus Torvalds 已提交
555 556 557 558
			goto out;
		}

		err = icmp_err_convert[code].errno;
559 560 561 562 563 564
		/* check if this ICMP message allows revert of backoff.
		 * (see RFC 6069)
		 */
		if (!fastopen &&
		    (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
			tcp_ld_RTO_revert(sk, seq);
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
	case TCP_SYN_SENT:
575 576 577 578
	case TCP_SYN_RECV:
		/* Only in fast or simultaneous open. If a fast open socket is
		 * is already accepted it is treated as a connected one below.
		 */
579
		if (fastopen && !fastopen->sk)
580 581
			break;

582
		ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
583

L
Linus Torvalds 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
623
	return 0;
L
Linus Torvalds 已提交
624 625
}

626
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
627
{
628
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
629

630 631 632
	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
	skb->csum_start = skb_transport_header(skb) - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
633 634
}

635
/* This routine computes an IPv4 TCP checksum. */
636
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
637
{
638
	const struct inet_sock *inet = inet_sk(sk);
639 640 641

	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
E
Eric Dumazet 已提交
642
EXPORT_SYMBOL(tcp_v4_send_check);
643

L
Linus Torvalds 已提交
644 645 646 647 648 649 650 651 652 653 654 655 656
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

657
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
658
{
659
	const struct tcphdr *th = tcp_hdr(skb);
660 661 662
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
663
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
664 665
#endif
	} rep;
L
Linus Torvalds 已提交
666
	struct ip_reply_arg arg;
667
#ifdef CONFIG_TCP_MD5SIG
668
	struct tcp_md5sig_key *key = NULL;
669 670 671 672
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
673
#endif
674
	u64 transmit_time = 0;
J
Jon Maxwell 已提交
675
	struct sock *ctl_sk;
676
	struct net *net;
L
Linus Torvalds 已提交
677 678 679 680 681

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

682 683 684 685
	/* If sk not NULL, it means we did a successful lookup and incoming
	 * route had to be correct. prequeue might have dropped our dst.
	 */
	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
686 687 688
		return;

	/* Swap the send and the receive. */
689 690 691 692 693
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
694 695

	if (th->ack) {
696
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
697
	} else {
698 699 700
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
701 702
	}

703
	memset(&arg, 0, sizeof(arg));
704 705 706
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

707
	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
708
#ifdef CONFIG_TCP_MD5SIG
709
	rcu_read_lock();
710
	hash_location = tcp_parse_md5sig_option(th);
711
	if (sk && sk_fullsock(sk)) {
712
		const union tcp_md5_addr *addr;
713
		int l3index;
714

715 716 717 718
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and inet_iif is set to it.
		 */
		l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
719
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
720
		key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
721
	} else if (hash_location) {
722
		const union tcp_md5_addr *addr;
723 724
		int sdif = tcp_v4_sdif(skb);
		int dif = inet_iif(skb);
725
		int l3index;
726

727 728 729 730 731 732 733
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
734 735
		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
					     ip_hdr(skb)->saddr,
736
					     th->source, ip_hdr(skb)->daddr,
737
					     ntohs(th->source), dif, sdif);
738 739
		/* don't send rst if it can't find key */
		if (!sk1)
740 741
			goto out;

742 743 744 745
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and dif is set to it.
		 */
		l3index = sdif ? dif : 0;
746
		addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
747
		key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
748
		if (!key)
749 750
			goto out;

751

752
		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
753
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
754 755
			goto out;

756 757
	}

758 759 760 761 762 763 764 765 766
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

767
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
768 769
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
770 771
	}
#endif
772 773
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
774
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
775
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
776 777
	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;

778
	/* When socket is gone, all binding information is lost.
A
Alexey Kuznetsov 已提交
779 780
	 * routing might fail in this case. No choice here, if we choose to force
	 * input interface, we will misroute in case of asymmetric route.
781
	 */
782
	if (sk) {
A
Alexey Kuznetsov 已提交
783
		arg.bound_dev_if = sk->sk_bound_dev_if;
784 785
		if (sk_fullsock(sk))
			trace_tcp_send_reset(sk, skb);
786
	}
L
Linus Torvalds 已提交
787

788 789 790
	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));

791
	arg.tos = ip_hdr(skb)->tos;
792
	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
793
	local_bh_disable();
794
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
795
	if (sk) {
J
Jon Maxwell 已提交
796 797
		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_mark : sk->sk_mark;
798 799
		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
				   inet_twsk(sk)->tw_priority : sk->sk_priority;
800
		transmit_time = tcp_transmit_time(sk);
801
	}
J
Jon Maxwell 已提交
802
	ip_send_unicast_reply(ctl_sk,
803
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
804
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
805 806
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
807

J
Jon Maxwell 已提交
808
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
809 810
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
811
	local_bh_enable();
812 813

#ifdef CONFIG_TCP_MD5SIG
814 815
out:
	rcu_read_unlock();
816
#endif
L
Linus Torvalds 已提交
817 818 819 820 821 822
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

823
static void tcp_v4_send_ack(const struct sock *sk,
824
			    struct sk_buff *skb, u32 seq, u32 ack,
825
			    u32 win, u32 tsval, u32 tsecr, int oif,
826
			    struct tcp_md5sig_key *key,
827
			    int reply_flags, u8 tos)
L
Linus Torvalds 已提交
828
{
829
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
830 831
	struct {
		struct tcphdr th;
832
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
833
#ifdef CONFIG_TCP_MD5SIG
834
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
835 836
#endif
			];
L
Linus Torvalds 已提交
837
	} rep;
838
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
839
	struct ip_reply_arg arg;
J
Jon Maxwell 已提交
840
	struct sock *ctl_sk;
841
	u64 transmit_time;
L
Linus Torvalds 已提交
842 843

	memset(&rep.th, 0, sizeof(struct tcphdr));
844
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
845 846 847

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
848
	if (tsecr) {
849 850 851
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
852 853
		rep.opt[1] = htonl(tsval);
		rep.opt[2] = htonl(tsecr);
854
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
855 856 857 858 859 860 861 862 863 864 865
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

866 867
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
868
		int offset = (tsecr) ? 3 : 0;
869 870 871 872 873 874 875 876

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

877
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
878 879
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
880 881
	}
#endif
882
	arg.flags = reply_flags;
883 884
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
885 886
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
887 888
	if (oif)
		arg.bound_dev_if = oif;
889
	arg.tos = tos;
890
	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
891
	local_bh_disable();
892
	ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
893 894
	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_mark : sk->sk_mark;
895 896
	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
			   inet_twsk(sk)->tw_priority : sk->sk_priority;
897
	transmit_time = tcp_transmit_time(sk);
J
Jon Maxwell 已提交
898
	ip_send_unicast_reply(ctl_sk,
899
			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
900
			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
901 902
			      &arg, arg.iov[0].iov_len,
			      transmit_time);
L
Linus Torvalds 已提交
903

J
Jon Maxwell 已提交
904
	ctl_sk->sk_mark = 0;
E
Eric Dumazet 已提交
905
	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
906
	local_bh_enable();
L
Linus Torvalds 已提交
907 908 909 910
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
911
	struct inet_timewait_sock *tw = inet_twsk(sk);
912
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
913

914
	tcp_v4_send_ack(sk, skb,
915
			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
916
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
917
			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
918 919
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
920
			tcp_twsk_md5_key(tcptw),
921 922
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
			tw->tw_tos
923
			);
L
Linus Torvalds 已提交
924

925
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
926 927
}

928
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
929
				  struct request_sock *req)
L
Linus Torvalds 已提交
930
{
931
	const union tcp_md5_addr *addr;
932
	int l3index;
933

934 935 936
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
937 938 939
	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
					     tcp_sk(sk)->snd_nxt;

940 941 942 943 944
	/* RFC 7323 2.3
	 * The window field (SEG.WND) of every outgoing segment, with the
	 * exception of <SYN> segments, MUST be right-shifted by
	 * Rcv.Wind.Shift bits:
	 */
945
	addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
946
	l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
947
	tcp_v4_send_ack(sk, skb, seq,
948 949
			tcp_rsk(req)->rcv_nxt,
			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
950
			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
951 952
			req->ts_recent,
			0,
953
			tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
954 955
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			ip_hdr(skb)->tos);
L
Linus Torvalds 已提交
956 957 958
}

/*
959
 *	Send a SYN-ACK after having received a SYN.
960
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
961 962
 *	socket.
 */
963
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
964
			      struct flowi *fl,
965
			      struct request_sock *req,
966
			      struct tcp_fastopen_cookie *foc,
967
			      enum tcp_synack_type synack_type)
L
Linus Torvalds 已提交
968
{
969
	const struct inet_request_sock *ireq = inet_rsk(req);
970
	struct flowi4 fl4;
L
Linus Torvalds 已提交
971
	int err = -1;
972
	struct sk_buff *skb;
L
Linus Torvalds 已提交
973 974

	/* First, grab a route. */
975
	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
976
		return -1;
L
Linus Torvalds 已提交
977

978
	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
L
Linus Torvalds 已提交
979 980

	if (skb) {
981
		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
L
Linus Torvalds 已提交
982

983
		rcu_read_lock();
984 985
		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
					    ireq->ir_rmt_addr,
986 987
					    rcu_dereference(ireq->ireq_opt));
		rcu_read_unlock();
988
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
989 990 991 992 993 994
	}

	return err;
}

/*
995
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
996
 */
997
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
998
{
E
Eric Dumazet 已提交
999
	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
L
Linus Torvalds 已提交
1000 1001
}

1002 1003 1004 1005 1006 1007 1008
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

1009
DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
1010 1011
EXPORT_SYMBOL(tcp_md5_needed);

1012
/* Find the Key structure for an address.  */
1013
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1014 1015
					   const union tcp_md5_addr *addr,
					   int family)
1016
{
1017
	const struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1018
	struct tcp_md5sig_key *key;
1019
	const struct tcp_md5sig_info *md5sig;
1020 1021 1022
	__be32 mask;
	struct tcp_md5sig_key *best_match = NULL;
	bool match;
1023

1024 1025
	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
1026
				       lockdep_sock_is_held(sk));
1027
	if (!md5sig)
1028
		return NULL;
A
Arnd Bergmann 已提交
1029

1030 1031
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
E
Eric Dumazet 已提交
1032 1033
		if (key->family != family)
			continue;
1034 1035
		if (key->l3index && key->l3index != l3index)
			continue;
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
		if (family == AF_INET) {
			mask = inet_make_mask(key->prefixlen);
			match = (key->addr.a4.s_addr & mask) ==
				(addr->a4.s_addr & mask);
#if IS_ENABLED(CONFIG_IPV6)
		} else if (family == AF_INET6) {
			match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
						  key->prefixlen);
#endif
		} else {
			match = false;
		}

		if (match && (!best_match ||
			      key->prefixlen > best_match->prefixlen))
			best_match = key;
	}
	return best_match;
}
1055
EXPORT_SYMBOL(__tcp_md5_do_lookup);
1056

1057 1058
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
						      const union tcp_md5_addr *addr,
1059 1060
						      int family, u8 prefixlen,
						      int l3index)
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
{
	const struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_md5sig_key *key;
	unsigned int size = sizeof(struct in_addr);
	const struct tcp_md5sig_info *md5sig;

	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
				       lockdep_sock_is_held(sk));
	if (!md5sig)
		return NULL;
#if IS_ENABLED(CONFIG_IPV6)
	if (family == AF_INET6)
		size = sizeof(struct in6_addr);
#endif
1076 1077
	hlist_for_each_entry_rcu(key, &md5sig->head, node,
				 lockdep_sock_is_held(sk)) {
1078 1079
		if (key->family != family)
			continue;
1080 1081
		if (key->l3index && key->l3index != l3index)
			continue;
1082 1083
		if (!memcmp(&key->addr, addr, size) &&
		    key->prefixlen == prefixlen)
E
Eric Dumazet 已提交
1084
			return key;
1085 1086 1087 1088
	}
	return NULL;
}

1089
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1090
					 const struct sock *addr_sk)
1091
{
1092
	const union tcp_md5_addr *addr;
1093
	int l3index;
E
Eric Dumazet 已提交
1094

1095 1096
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
						 addr_sk->sk_bound_dev_if);
1097
	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1098
	return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1099 1100 1101 1102
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);

/* This can be called on a newly created socket, from other files */
E
Eric Dumazet 已提交
1103
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1104 1105
		   int family, u8 prefixlen, int l3index,
		   const u8 *newkey, u8 newkeylen, gfp_t gfp)
1106 1107
{
	/* Add Key to the list */
1108
	struct tcp_md5sig_key *key;
1109
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1110
	struct tcp_md5sig_info *md5sig;
1111

1112
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
1113
	if (key) {
1114 1115 1116 1117 1118 1119 1120
		/* Pre-existing entry - just update that one.
		 * Note that the key might be used concurrently.
		 * data_race() is telling kcsan that we do not care of
		 * key mismatches, since changing MD5 key on live flows
		 * can lead to packet drops.
		 */
		data_race(memcpy(key->key, newkey, newkeylen));
1121

1122 1123 1124 1125 1126 1127
		/* Pairs with READ_ONCE() in tcp_md5_hash_key().
		 * Also note that a reader could catch new key->keylen value
		 * but old key->key[], this is the reason we use __GFP_ZERO
		 * at sock_kmalloc() time below these lines.
		 */
		WRITE_ONCE(key->keylen, newkeylen);
1128

E
Eric Dumazet 已提交
1129 1130
		return 0;
	}
1131

1132
	md5sig = rcu_dereference_protected(tp->md5sig_info,
1133
					   lockdep_sock_is_held(sk));
E
Eric Dumazet 已提交
1134 1135 1136
	if (!md5sig) {
		md5sig = kmalloc(sizeof(*md5sig), gfp);
		if (!md5sig)
1137 1138
			return -ENOMEM;

E
Eric Dumazet 已提交
1139 1140
		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		INIT_HLIST_HEAD(&md5sig->head);
1141
		rcu_assign_pointer(tp->md5sig_info, md5sig);
E
Eric Dumazet 已提交
1142
	}
1143

1144
	key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
E
Eric Dumazet 已提交
1145 1146
	if (!key)
		return -ENOMEM;
1147
	if (!tcp_alloc_md5sig_pool()) {
1148
		sock_kfree_s(sk, key, sizeof(*key));
E
Eric Dumazet 已提交
1149
		return -ENOMEM;
1150
	}
E
Eric Dumazet 已提交
1151 1152 1153 1154

	memcpy(key->key, newkey, newkeylen);
	key->keylen = newkeylen;
	key->family = family;
1155
	key->prefixlen = prefixlen;
1156
	key->l3index = l3index;
E
Eric Dumazet 已提交
1157 1158 1159 1160
	memcpy(&key->addr, addr,
	       (family == AF_INET6) ? sizeof(struct in6_addr) :
				      sizeof(struct in_addr));
	hlist_add_head_rcu(&key->node, &md5sig->head);
1161 1162
	return 0;
}
E
Eric Dumazet 已提交
1163
EXPORT_SYMBOL(tcp_md5_do_add);
1164

1165
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1166
		   u8 prefixlen, int l3index)
1167
{
E
Eric Dumazet 已提交
1168 1169
	struct tcp_md5sig_key *key;

1170
	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
E
Eric Dumazet 已提交
1171 1172 1173
	if (!key)
		return -ENOENT;
	hlist_del_rcu(&key->node);
1174
	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1175 1176
	kfree_rcu(key, rcu);
	return 0;
1177
}
E
Eric Dumazet 已提交
1178
EXPORT_SYMBOL(tcp_md5_do_del);
1179

1180
static void tcp_clear_md5_list(struct sock *sk)
1181 1182
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1183
	struct tcp_md5sig_key *key;
1184
	struct hlist_node *n;
1185
	struct tcp_md5sig_info *md5sig;
1186

1187 1188
	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);

1189
	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
E
Eric Dumazet 已提交
1190
		hlist_del_rcu(&key->node);
1191
		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1192
		kfree_rcu(key, rcu);
1193 1194 1195
	}
}

1196 1197
static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
				 char __user *optval, int optlen)
1198 1199 1200
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1201
	const union tcp_md5_addr *addr;
1202
	u8 prefixlen = 32;
1203
	int l3index = 0;
1204 1205 1206 1207

	if (optlen < sizeof(cmd))
		return -EINVAL;

1208
	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1209 1210 1211 1212 1213
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

1214 1215 1216 1217 1218 1219 1220
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
		prefixlen = cmd.tcpm_prefixlen;
		if (prefixlen > 32)
			return -EINVAL;
	}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
		struct net_device *dev;

		rcu_read_lock();
		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
		if (dev && netif_is_l3_master(dev))
			l3index = dev->ifindex;

		rcu_read_unlock();

		/* ok to reference set/not set outside of rcu;
		 * right now device MUST be an L3 master
		 */
		if (!dev || !l3index)
			return -EINVAL;
	}

1239 1240
	addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;

1241
	if (!cmd.tcpm_keylen)
1242
		return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index);
1243 1244 1245 1246

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

1247
	return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index,
1248
			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
1249 1250
}

1251 1252 1253
static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
				   __be32 daddr, __be32 saddr,
				   const struct tcphdr *th, int nbytes)
1254 1255
{
	struct tcp4_pseudohdr *bp;
1256
	struct scatterlist sg;
1257
	struct tcphdr *_th;
1258

1259
	bp = hp->scratch;
1260 1261 1262
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1263
	bp->protocol = IPPROTO_TCP;
1264
	bp->len = cpu_to_be16(nbytes);
1265

1266 1267 1268 1269 1270 1271 1272
	_th = (struct tcphdr *)(bp + 1);
	memcpy(_th, th, sizeof(*th));
	_th->check = 0;

	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
				sizeof(*bp) + sizeof(*th));
H
Herbert Xu 已提交
1273
	return crypto_ahash_update(hp->md5_req);
1274 1275
}

E
Eric Dumazet 已提交
1276
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1277
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1278 1279
{
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1280
	struct ahash_request *req;
1281 1282 1283 1284

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1285
	req = hp->md5_req;
1286

H
Herbert Xu 已提交
1287
	if (crypto_ahash_init(req))
1288
		goto clear_hash;
1289
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1290 1291 1292
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1293 1294
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1295 1296 1297 1298
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1299

1300 1301 1302 1303
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1304
	return 1;
1305 1306
}

1307 1308
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
			const struct sock *sk,
E
Eric Dumazet 已提交
1309
			const struct sk_buff *skb)
1310
{
1311
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
1312
	struct ahash_request *req;
E
Eric Dumazet 已提交
1313
	const struct tcphdr *th = tcp_hdr(skb);
1314 1315
	__be32 saddr, daddr;

1316 1317 1318
	if (sk) { /* valid for establish/request sockets */
		saddr = sk->sk_rcv_saddr;
		daddr = sk->sk_daddr;
1319
	} else {
1320 1321 1322
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1323
	}
1324 1325 1326 1327

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
1328
	req = hp->md5_req;
1329

H
Herbert Xu 已提交
1330
	if (crypto_ahash_init(req))
1331 1332
		goto clear_hash;

1333
	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1334 1335 1336 1337 1338
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
1339 1340
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1351
}
1352
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1353

1354 1355
#endif

1356
/* Called with rcu_read_lock() */
1357
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1358 1359
				    const struct sk_buff *skb,
				    int dif, int sdif)
1360
{
1361
#ifdef CONFIG_TCP_MD5SIG
1362 1363 1364 1365 1366 1367 1368 1369
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
1370
	const __u8 *hash_location = NULL;
1371
	struct tcp_md5sig_key *hash_expected;
1372
	const struct iphdr *iph = ip_hdr(skb);
1373
	const struct tcphdr *th = tcp_hdr(skb);
1374
	const union tcp_md5_addr *addr;
1375
	unsigned char newhash[16];
1376 1377 1378 1379 1380 1381
	int genhash, l3index;

	/* sdif set, means packet ingressed via a device
	 * in an L3 domain and dif is set to the l3mdev
	 */
	l3index = sdif ? dif : 0;
1382

1383
	addr = (union tcp_md5_addr *)&iph->saddr;
1384
	hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1385
	hash_location = tcp_parse_md5sig_option(th);
1386 1387 1388

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
E
Eric Dumazet 已提交
1389
		return false;
1390 1391

	if (hash_expected && !hash_location) {
1392
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
E
Eric Dumazet 已提交
1393
		return true;
1394 1395 1396
	}

	if (!hash_expected && hash_location) {
1397
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
E
Eric Dumazet 已提交
1398
		return true;
1399 1400 1401 1402 1403
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1404 1405
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
1406
				      NULL, skb);
1407 1408

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1409
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1410
		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
1411 1412 1413
				     &iph->saddr, ntohs(th->source),
				     &iph->daddr, ntohs(th->dest),
				     genhash ? " tcp_v4_calc_md5_hash failed"
1414
				     : "", l3index);
E
Eric Dumazet 已提交
1415
		return true;
1416
	}
E
Eric Dumazet 已提交
1417
	return false;
1418
#endif
1419 1420
	return false;
}
1421

1422 1423
static void tcp_v4_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
1424 1425 1426
			    struct sk_buff *skb)
{
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
1427
	struct net *net = sock_net(sk_listener);
1428

1429 1430
	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
E
Eric Dumazet 已提交
1431
	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1432 1433
}

1434 1435
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
					  struct flowi *fl,
1436
					  const struct request_sock *req)
1437
{
1438
	return inet_csk_route_req(sk, &fl->u.ip4, req);
1439 1440
}

1441
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1442
	.family		=	PF_INET,
1443
	.obj_size	=	sizeof(struct tcp_request_sock),
1444
	.rtx_syn_ack	=	tcp_rtx_synack,
1445 1446
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1447
	.send_reset	=	tcp_v4_send_reset,
S
stephen hemminger 已提交
1448
	.syn_ack_timeout =	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
1449 1450
};

1451
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1452
	.mss_clamp	=	TCP_MSS_DEFAULT,
1453
#ifdef CONFIG_TCP_MD5SIG
1454
	.req_md5_lookup	=	tcp_v4_md5_lookup,
1455
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1456
#endif
1457
	.init_req	=	tcp_v4_init_req,
1458 1459 1460
#ifdef CONFIG_SYN_COOKIES
	.cookie_init_seq =	cookie_v4_init_sequence,
#endif
1461
	.route_req	=	tcp_v4_route_req,
1462 1463
	.init_seq	=	tcp_v4_init_seq,
	.init_ts_off	=	tcp_v4_init_ts_off,
1464
	.send_synack	=	tcp_v4_send_synack,
1465
};
1466

L
Linus Torvalds 已提交
1467 1468 1469
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1470
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1471 1472
		goto drop;

O
Octavian Purdila 已提交
1473 1474
	return tcp_conn_request(&tcp_request_sock_ops,
				&tcp_request_sock_ipv4_ops, sk, skb);
L
Linus Torvalds 已提交
1475 1476

drop:
1477
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1478 1479
	return 0;
}
E
Eric Dumazet 已提交
1480
EXPORT_SYMBOL(tcp_v4_conn_request);
L
Linus Torvalds 已提交
1481 1482 1483 1484 1485 1486


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
1487
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1488
				  struct request_sock *req,
1489 1490 1491
				  struct dst_entry *dst,
				  struct request_sock *req_unhash,
				  bool *own_req)
L
Linus Torvalds 已提交
1492
{
1493
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1494 1495 1496
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1497
#ifdef CONFIG_TCP_MD5SIG
1498
	const union tcp_md5_addr *addr;
1499
	struct tcp_md5sig_key *key;
1500
	int l3index;
1501
#endif
1502
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
1503 1504 1505 1506 1507 1508

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
1509
		goto exit_nonewsk;
L
Linus Torvalds 已提交
1510

1511
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1512
	inet_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1513 1514 1515

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1516
	ireq		      = inet_rsk(req);
1517 1518
	sk_daddr_set(newsk, ireq->ir_rmt_addr);
	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1519
	newsk->sk_bound_dev_if = ireq->ir_iif;
E
Eric Dumazet 已提交
1520 1521 1522
	newinet->inet_saddr   = ireq->ir_loc_addr;
	inet_opt	      = rcu_dereference(ireq->ireq_opt);
	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1523
	newinet->mc_index     = inet_iif(skb);
1524
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1525
	newinet->rcv_tos      = ip_hdr(skb)->tos;
1526
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1527 1528
	if (inet_opt)
		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1529
	newinet->inet_id = prandom_u32();
L
Linus Torvalds 已提交
1530

E
Eric Dumazet 已提交
1531 1532 1533 1534 1535 1536 1537
	if (!dst) {
		dst = inet_csk_route_child_sock(sk, newsk, req);
		if (!dst)
			goto put_and_exit;
	} else {
		/* syncookie case : see end of cookie_v4_check() */
	}
1538 1539
	sk_setup_caps(newsk, dst);

1540 1541
	tcp_ca_openreq_child(newsk, dst);

L
Linus Torvalds 已提交
1542
	tcp_sync_mss(newsk, dst_mtu(dst));
E
Eric Dumazet 已提交
1543
	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1544

L
Linus Torvalds 已提交
1545 1546
	tcp_initialize_rcv_mss(newsk);

1547
#ifdef CONFIG_TCP_MD5SIG
1548
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1549
	/* Copy over the MD5 key from the original socket */
1550
	addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1551
	key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1552
	if (key) {
1553 1554 1555 1556 1557 1558
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1559
		tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index,
1560
			       key->key, key->keylen, GFP_ATOMIC);
E
Eric Dumazet 已提交
1561
		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1562 1563 1564
	}
#endif

1565 1566
	if (__inet_inherit_port(sk, newsk) < 0)
		goto put_and_exit;
1567
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
E
Eric Dumazet 已提交
1568
	if (likely(*own_req)) {
1569
		tcp_move_syn(newtp, req);
E
Eric Dumazet 已提交
1570 1571 1572 1573
		ireq->ireq_opt = NULL;
	} else {
		newinet->inet_opt = NULL;
	}
L
Linus Torvalds 已提交
1574 1575 1576
	return newsk;

exit_overflow:
1577
	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1578 1579
exit_nonewsk:
	dst_release(dst);
L
Linus Torvalds 已提交
1580
exit:
1581
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1582
	return NULL;
1583
put_and_exit:
E
Eric Dumazet 已提交
1584
	newinet->inet_opt = NULL;
1585 1586
	inet_csk_prepare_forced_close(newsk);
	tcp_done(newsk);
1587
	goto exit;
L
Linus Torvalds 已提交
1588
}
E
Eric Dumazet 已提交
1589
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
L
Linus Torvalds 已提交
1590

1591
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1592
{
1593
#ifdef CONFIG_SYN_COOKIES
1594
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1595

1596
	if (!th->syn)
C
Cong Wang 已提交
1597
		sk = cookie_v4_check(sk, skb);
L
Linus Torvalds 已提交
1598 1599 1600 1601
#endif
	return sk;
}

1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
			 struct tcphdr *th, u32 *cookie)
{
	u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
	mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
				    &tcp_request_sock_ipv4_ops, sk, th);
	if (mss) {
		*cookie = __cookie_v4_init_sequence(iph, th, &mss);
		tcp_synq_overflow(sk);
	}
#endif
	return mss;
}

L
Linus Torvalds 已提交
1617
/* The socket must have it's spinlock held when we get
1618
 * here, unless it is a TCP_LISTEN socket.
L
Linus Torvalds 已提交
1619 1620 1621 1622 1623 1624 1625 1626
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1627 1628
	struct sock *rsk;

L
Linus Torvalds 已提交
1629
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1630 1631
		struct dst_entry *dst = sk->sk_rx_dst;

1632
		sock_rps_save_rxhash(sk, skb);
1633
		sk_mark_napi_id(sk, skb);
1634
		if (dst) {
E
Eric Dumazet 已提交
1635
			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1636
			    !dst->ops->check(dst, 0)) {
1637 1638 1639 1640
				dst_release(dst);
				sk->sk_rx_dst = NULL;
			}
		}
1641
		tcp_rcv_established(sk, skb);
L
Linus Torvalds 已提交
1642 1643 1644
		return 0;
	}

E
Eric Dumazet 已提交
1645
	if (tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1646 1647 1648
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
1649 1650
		struct sock *nsk = tcp_v4_cookie_check(sk, skb);

L
Linus Torvalds 已提交
1651 1652 1653
		if (!nsk)
			goto discard;
		if (nsk != sk) {
1654 1655
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1656
				goto reset;
1657
			}
L
Linus Torvalds 已提交
1658 1659
			return 0;
		}
1660
	} else
1661
		sock_rps_save_rxhash(sk, skb);
1662

1663
	if (tcp_rcv_state_process(sk, skb)) {
1664
		rsk = sk;
L
Linus Torvalds 已提交
1665
		goto reset;
1666
	}
L
Linus Torvalds 已提交
1667 1668 1669
	return 0;

reset:
1670
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1681 1682
	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1683 1684
	goto discard;
}
E
Eric Dumazet 已提交
1685
EXPORT_SYMBOL(tcp_v4_do_rcv);
L
Linus Torvalds 已提交
1686

1687
int tcp_v4_early_demux(struct sk_buff *skb)
D
David S. Miller 已提交
1688 1689 1690 1691 1692 1693
{
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
1694
		return 0;
D
David S. Miller 已提交
1695

1696
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1697
		return 0;
D
David S. Miller 已提交
1698 1699

	iph = ip_hdr(skb);
1700
	th = tcp_hdr(skb);
D
David S. Miller 已提交
1701 1702

	if (th->doff < sizeof(struct tcphdr) / 4)
1703
		return 0;
D
David S. Miller 已提交
1704

1705
	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
D
David S. Miller 已提交
1706
				       iph->saddr, th->source,
1707
				       iph->daddr, ntohs(th->dest),
1708
				       skb->skb_iif, inet_sdif(skb));
D
David S. Miller 已提交
1709 1710 1711
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
1712
		if (sk_fullsock(sk)) {
1713
			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
E
Eric Dumazet 已提交
1714

D
David S. Miller 已提交
1715 1716
			if (dst)
				dst = dst_check(dst, 0);
1717
			if (dst &&
E
Eric Dumazet 已提交
1718
			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1719
				skb_dst_set_noref(skb, dst);
D
David S. Miller 已提交
1720 1721
		}
	}
1722
	return 0;
D
David S. Miller 已提交
1723 1724
}

E
Eric Dumazet 已提交
1725 1726
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
1727
	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1728 1729 1730 1731 1732 1733 1734 1735
	struct skb_shared_info *shinfo;
	const struct tcphdr *th;
	struct tcphdr *thtail;
	struct sk_buff *tail;
	unsigned int hdrlen;
	bool fragstolen;
	u32 gso_segs;
	int delta;
E
Eric Dumazet 已提交
1736 1737 1738 1739 1740 1741 1742

	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
	 * we can fix skb->truesize to its real value to avoid future drops.
	 * This is valid because skb is not yet charged to the socket.
	 * It has been noticed pure SACK packets were sometimes dropped
	 * (if cooked by drivers without copybreak feature).
	 */
1743
	skb_condense(skb);
E
Eric Dumazet 已提交
1744

E
Eric Dumazet 已提交
1745 1746
	skb_dst_drop(skb);

1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	if (unlikely(tcp_checksum_complete(skb))) {
		bh_unlock_sock(sk);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
		return true;
	}

	/* Attempt coalescing to last skb in backlog, even if we are
	 * above the limits.
	 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
	 */
	th = (const struct tcphdr *)skb->data;
	hdrlen = th->doff * 4;
	shinfo = skb_shinfo(skb);

	if (!shinfo->gso_size)
		shinfo->gso_size = skb->len - hdrlen;

	if (!shinfo->gso_segs)
		shinfo->gso_segs = 1;

	tail = sk->sk_backlog.tail;
	if (!tail)
		goto no_coalesce;
	thtail = (struct tcphdr *)tail->data;

	if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
	    TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
	    ((TCP_SKB_CB(tail)->tcp_flags |
1776 1777 1778
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
	    !((TCP_SKB_CB(tail)->tcp_flags &
	      TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	    ((TCP_SKB_CB(tail)->tcp_flags ^
	      TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
	    tail->decrypted != skb->decrypted ||
#endif
	    thtail->doff != th->doff ||
	    memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
		goto no_coalesce;

	__skb_pull(skb, hdrlen);
	if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
		thtail->window = th->window;

		TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;

		if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
			TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;

1797 1798 1799 1800 1801 1802 1803 1804 1805
		/* We have to update both TCP_SKB_CB(tail)->tcp_flags and
		 * thtail->fin, so that the fast path in tcp_rcv_established()
		 * is not entered if we append a packet with a FIN.
		 * SYN, RST, URG are not present.
		 * ACK is set on both packets.
		 * PSH : we do not really care in TCP stack,
		 *       at least for 'GRO' packets.
		 */
		thtail->fin |= th->fin;
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
		TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;

		if (TCP_SKB_CB(skb)->has_rxtstamp) {
			TCP_SKB_CB(tail)->has_rxtstamp = true;
			tail->tstamp = skb->tstamp;
			skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
		}

		/* Not as strict as GRO. We only need to carry mss max value */
		skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
						 skb_shinfo(tail)->gso_size);

		gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
		skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);

		sk->sk_backlog.len += delta;
		__NET_INC_STATS(sock_net(sk),
				LINUX_MIB_TCPBACKLOGCOALESCE);
		kfree_skb_partial(skb, fragstolen);
		return false;
	}
	__skb_push(skb, hdrlen);

no_coalesce:
	/* Only socket owner can try to collapse/prune rx queues
	 * to reduce memory overhead, so add a little headroom here.
	 * Few sockets backlog are possibly concurrently non empty.
	 */
	limit += 64*1024;

E
Eric Dumazet 已提交
1836 1837 1838 1839 1840 1841 1842 1843 1844
	if (unlikely(sk_add_backlog(sk, skb, limit))) {
		bh_unlock_sock(sk);
		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
		return true;
	}
	return false;
}
EXPORT_SYMBOL(tcp_add_backlog);

1845 1846 1847 1848
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
	struct tcphdr *th = (struct tcphdr *)skb->data;

1849
	return sk_filter_trim_cap(sk, skb, th->doff * 4);
1850 1851 1852
}
EXPORT_SYMBOL(tcp_filter);

1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
static void tcp_v4_restore_cb(struct sk_buff *skb)
{
	memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
		sizeof(struct inet_skb_parm));
}

static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
			   const struct tcphdr *th)
{
	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
	 * barrier() makes sure compiler wont play fool^Waliasing games.
	 */
	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
		sizeof(struct inet_skb_parm));
	barrier();

	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
	TCP_SKB_CB(skb)->sacked	 = 0;
	TCP_SKB_CB(skb)->has_rxtstamp =
			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
}

L
Linus Torvalds 已提交
1881 1882 1883 1884 1885 1886
/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1887
	struct net *net = dev_net(skb->dev);
E
Eric Dumazet 已提交
1888
	struct sk_buff *skb_to_free;
1889
	int sdif = inet_sdif(skb);
1890
	int dif = inet_iif(skb);
1891
	const struct iphdr *iph;
1892
	const struct tcphdr *th;
1893
	bool refcounted;
L
Linus Torvalds 已提交
1894 1895 1896 1897 1898 1899 1900
	struct sock *sk;
	int ret;

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
E
Eric Dumazet 已提交
1901
	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1902 1903 1904 1905

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1906
	th = (const struct tcphdr *)skb->data;
L
Linus Torvalds 已提交
1907

1908
	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
L
Linus Torvalds 已提交
1909 1910 1911 1912 1913 1914
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1915
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1916
	 * So, we defer the checks. */
1917 1918

	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1919
		goto csum_error;
L
Linus Torvalds 已提交
1920

1921
	th = (const struct tcphdr *)skb->data;
1922
	iph = ip_hdr(skb);
1923
lookup:
1924
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1925
			       th->dest, sdif, &refcounted);
L
Linus Torvalds 已提交
1926 1927 1928
	if (!sk)
		goto no_tcp_socket;

E
Eric Dumazet 已提交
1929 1930 1931 1932
process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

1933 1934
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		struct request_sock *req = inet_reqsk(sk);
1935
		bool req_stolen = false;
1936
		struct sock *nsk;
1937 1938

		sk = req->rsk_listener;
1939
		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
1940
			sk_drops_add(sk, skb);
1941 1942 1943
			reqsk_put(req);
			goto discard_it;
		}
1944 1945 1946 1947
		if (tcp_checksum_complete(skb)) {
			reqsk_put(req);
			goto csum_error;
		}
1948
		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1949
			inet_csk_reqsk_queue_drop_and_put(sk, req);
1950 1951
			goto lookup;
		}
1952 1953 1954
		/* We own a reference on the listener, increase it again
		 * as we might lose it too soon.
		 */
1955
		sock_hold(sk);
1956
		refcounted = true;
E
Eric Dumazet 已提交
1957
		nsk = NULL;
1958 1959 1960 1961
		if (!tcp_filter(sk, skb)) {
			th = (const struct tcphdr *)skb->data;
			iph = ip_hdr(skb);
			tcp_v4_fill_cb(skb, iph, th);
1962
			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1963
		}
1964 1965
		if (!nsk) {
			reqsk_put(req);
1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
			if (req_stolen) {
				/* Another cpu got exclusive access to req
				 * and created a full blown socket.
				 * Try to feed this packet to this socket
				 * instead of discarding it.
				 */
				tcp_v4_restore_cb(skb);
				sock_put(sk);
				goto lookup;
			}
1976
			goto discard_and_relse;
1977 1978 1979
		}
		if (nsk == sk) {
			reqsk_put(req);
1980
			tcp_v4_restore_cb(skb);
1981 1982
		} else if (tcp_child_process(sk, nsk, skb)) {
			tcp_v4_send_reset(nsk, skb);
1983
			goto discard_and_relse;
1984
		} else {
1985
			sock_put(sk);
1986 1987 1988
			return 0;
		}
	}
1989
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1990
		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1991
		goto discard_and_relse;
1992
	}
1993

L
Linus Torvalds 已提交
1994 1995
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
1996

1997
	if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
1998 1999
		goto discard_and_relse;

2000
	nf_reset_ct(skb);
L
Linus Torvalds 已提交
2001

2002
	if (tcp_filter(sk, skb))
L
Linus Torvalds 已提交
2003
		goto discard_and_relse;
2004 2005
	th = (const struct tcphdr *)skb->data;
	iph = ip_hdr(skb);
2006
	tcp_v4_fill_cb(skb, iph, th);
L
Linus Torvalds 已提交
2007 2008 2009

	skb->dev = NULL;

2010 2011 2012 2013 2014 2015 2016
	if (sk->sk_state == TCP_LISTEN) {
		ret = tcp_v4_do_rcv(sk, skb);
		goto put_and_return;
	}

	sk_incoming_cpu_update(sk);

2017
	bh_lock_sock_nested(sk);
2018
	tcp_segs_in(tcp_sk(sk), skb);
L
Linus Torvalds 已提交
2019 2020
	ret = 0;
	if (!sock_owned_by_user(sk)) {
E
Eric Dumazet 已提交
2021 2022
		skb_to_free = sk->sk_rx_skb_cache;
		sk->sk_rx_skb_cache = NULL;
F
Florian Westphal 已提交
2023
		ret = tcp_v4_do_rcv(sk, skb);
E
Eric Dumazet 已提交
2024 2025 2026 2027
	} else {
		if (tcp_add_backlog(sk, skb))
			goto discard_and_relse;
		skb_to_free = NULL;
Z
Zhu Yi 已提交
2028
	}
L
Linus Torvalds 已提交
2029
	bh_unlock_sock(sk);
E
Eric Dumazet 已提交
2030 2031
	if (skb_to_free)
		__kfree_skb(skb_to_free);
L
Linus Torvalds 已提交
2032

2033
put_and_return:
2034 2035
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2036 2037 2038 2039 2040 2041 2042

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

2043 2044
	tcp_v4_fill_cb(skb, iph, th);

E
Eric Dumazet 已提交
2045
	if (tcp_checksum_complete(skb)) {
2046
csum_error:
E
Eric Dumazet 已提交
2047
		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
2048
bad_packet:
E
Eric Dumazet 已提交
2049
		__TCP_INC_STATS(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
2050
	} else {
2051
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
2052 2053 2054 2055 2056
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
2057
	return 0;
L
Linus Torvalds 已提交
2058 2059

discard_and_relse:
2060
	sk_drops_add(sk, skb);
2061 2062
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
2063 2064 2065 2066
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2067
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2068 2069 2070
		goto discard_it;
	}

2071 2072
	tcp_v4_fill_cb(skb, iph, th);

2073 2074 2075
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
2076
	}
2077
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
2078
	case TCP_TW_SYN: {
2079
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2080 2081
							&tcp_hashinfo, skb,
							__tcp_hdrlen(th),
2082
							iph->saddr, th->source,
2083
							iph->daddr, th->dest,
2084 2085
							inet_iif(skb),
							sdif);
L
Linus Torvalds 已提交
2086
		if (sk2) {
2087
			inet_twsk_deschedule_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2088
			sk = sk2;
2089
			tcp_v4_restore_cb(skb);
2090
			refcounted = false;
L
Linus Torvalds 已提交
2091 2092 2093
			goto process;
		}
	}
2094
		/* to ACK */
J
Joe Perches 已提交
2095
		fallthrough;
L
Linus Torvalds 已提交
2096 2097 2098 2099
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
2100 2101 2102
		tcp_v4_send_reset(sk, skb);
		inet_twsk_deschedule_put(inet_twsk(sk));
		goto discard_it;
L
Linus Torvalds 已提交
2103 2104 2105 2106 2107
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

2108 2109 2110 2111 2112
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
	.twsk_destructor= tcp_twsk_destructor,
};
L
Linus Torvalds 已提交
2113

2114
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
E
Eric Dumazet 已提交
2115 2116 2117
{
	struct dst_entry *dst = skb_dst(skb);

E
Eric Dumazet 已提交
2118
	if (dst && dst_hold_safe(dst)) {
2119 2120 2121
		sk->sk_rx_dst = dst;
		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
	}
E
Eric Dumazet 已提交
2122
}
2123
EXPORT_SYMBOL(inet_sk_rx_dst_set);
E
Eric Dumazet 已提交
2124

2125
const struct inet_connection_sock_af_ops ipv4_specific = {
2126 2127 2128
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
E
Eric Dumazet 已提交
2129
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2130 2131 2132 2133 2134 2135 2136
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
2137
#ifdef CONFIG_COMPAT
2138 2139
	.compat_setsockopt = compat_ip_setsockopt,
	.compat_getsockopt = compat_ip_getsockopt,
2140
#endif
2141
	.mtu_reduced	   = tcp_v4_mtu_reduced,
L
Linus Torvalds 已提交
2142
};
E
Eric Dumazet 已提交
2143
EXPORT_SYMBOL(ipv4_specific);
L
Linus Torvalds 已提交
2144

2145
#ifdef CONFIG_TCP_MD5SIG
2146
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2147
	.md5_lookup		= tcp_v4_md5_lookup,
2148
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2149 2150
	.md5_parse		= tcp_v4_parse_md5_keys,
};
2151
#endif
2152

L
Linus Torvalds 已提交
2153 2154 2155 2156 2157
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
2158
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
2159

2160
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
2161

2162
	icsk->icsk_af_ops = &ipv4_specific;
2163

2164
#ifdef CONFIG_TCP_MD5SIG
2165
	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2166
#endif
L
Linus Torvalds 已提交
2167 2168 2169 2170

	return 0;
}

2171
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
2172 2173 2174
{
	struct tcp_sock *tp = tcp_sk(sk);

2175 2176
	trace_tcp_destroy_sock(sk);

L
Linus Torvalds 已提交
2177 2178
	tcp_clear_xmit_timers(sk);

2179
	tcp_cleanup_congestion_control(sk);
2180

D
Dave Watson 已提交
2181 2182
	tcp_cleanup_ulp(sk);

L
Linus Torvalds 已提交
2183
	/* Cleanup up the write buffer. */
2184
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
2185

2186 2187 2188
	/* Check if we want to disable active TFO */
	tcp_fastopen_active_disable_ofo_check(sk);

L
Linus Torvalds 已提交
2189
	/* Cleans up our, hopefully empty, out_of_order_queue. */
2190
	skb_rbtree_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
2191

2192 2193 2194
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
E
Eric Dumazet 已提交
2195
		tcp_clear_md5_list(sk);
2196
		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2197 2198 2199
		tp->md5sig_info = NULL;
	}
#endif
C
Chris Leech 已提交
2200

L
Linus Torvalds 已提交
2201
	/* Clean up a referenced TCP bind bucket. */
2202
	if (inet_csk(sk)->icsk_bind_hash)
2203
		inet_put_port(sk);
L
Linus Torvalds 已提交
2204

2205
	BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2206

2207 2208
	/* If socket is aborted during connect operation */
	tcp_free_fastopen_req(tp);
2209
	tcp_fastopen_destroy_cipher(sk);
2210
	tcp_saved_syn_free(tp);
2211

2212
	sk_sockets_allocated_dec(sk);
L
Linus Torvalds 已提交
2213 2214 2215 2216 2217 2218
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

2219 2220 2221 2222 2223
/*
 * Get next listener socket follow cur.  If cur is NULL, get first socket
 * starting from bucket given in st->bucket; when st->bucket is zero the
 * very first socket in the hash table is returned.
 */
L
Linus Torvalds 已提交
2224 2225
static void *listening_get_next(struct seq_file *seq, void *cur)
{
2226
	struct tcp_seq_afinfo *afinfo;
J
Jianjun Kong 已提交
2227
	struct tcp_iter_state *st = seq->private;
2228
	struct net *net = seq_file_net(seq);
2229
	struct inet_listen_hashbucket *ilb;
2230
	struct hlist_nulls_node *node;
2231
	struct sock *sk = cur;
L
Linus Torvalds 已提交
2232

2233 2234 2235 2236 2237
	if (st->bpf_seq_afinfo)
		afinfo = st->bpf_seq_afinfo;
	else
		afinfo = PDE_DATA(file_inode(seq->file));

L
Linus Torvalds 已提交
2238
	if (!sk) {
2239
get_head:
2240
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2241
		spin_lock(&ilb->lock);
2242
		sk = sk_nulls_head(&ilb->nulls_head);
2243
		st->offset = 0;
L
Linus Torvalds 已提交
2244 2245
		goto get_sk;
	}
2246
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
2247
	++st->num;
2248
	++st->offset;
L
Linus Torvalds 已提交
2249

2250
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2251
get_sk:
2252
	sk_nulls_for_each_from(sk, node) {
2253 2254
		if (!net_eq(sock_net(sk), net))
			continue;
2255 2256
		if (afinfo->family == AF_UNSPEC ||
		    sk->sk_family == afinfo->family)
2257
			return sk;
L
Linus Torvalds 已提交
2258
	}
2259
	spin_unlock(&ilb->lock);
2260
	st->offset = 0;
2261 2262 2263
	if (++st->bucket < INET_LHTABLE_SIZE)
		goto get_head;
	return NULL;
L
Linus Torvalds 已提交
2264 2265 2266 2267
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
2268 2269 2270 2271 2272 2273
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	st->offset = 0;
	rc = listening_get_next(seq, NULL);
L
Linus Torvalds 已提交
2274 2275 2276 2277 2278 2279 2280 2281

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

E
Eric Dumazet 已提交
2282
static inline bool empty_bucket(const struct tcp_iter_state *st)
2283
{
E
Eric Dumazet 已提交
2284
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2285 2286
}

2287 2288 2289 2290
/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
L
Linus Torvalds 已提交
2291 2292
static void *established_get_first(struct seq_file *seq)
{
2293
	struct tcp_seq_afinfo *afinfo;
J
Jianjun Kong 已提交
2294
	struct tcp_iter_state *st = seq->private;
2295
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2296 2297
	void *rc = NULL;

2298 2299 2300 2301 2302
	if (st->bpf_seq_afinfo)
		afinfo = st->bpf_seq_afinfo;
	else
		afinfo = PDE_DATA(file_inode(seq->file));

2303 2304
	st->offset = 0;
	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2305
		struct sock *sk;
2306
		struct hlist_nulls_node *node;
2307
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2308

2309 2310 2311 2312
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2313
		spin_lock_bh(lock);
2314
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2315 2316
			if ((afinfo->family != AF_UNSPEC &&
			     sk->sk_family != afinfo->family) ||
2317
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2318 2319 2320 2321 2322
				continue;
			}
			rc = sk;
			goto out;
		}
2323
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2324 2325 2326 2327 2328 2329 2330
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
2331
	struct tcp_seq_afinfo *afinfo;
L
Linus Torvalds 已提交
2332
	struct sock *sk = cur;
2333
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2334
	struct tcp_iter_state *st = seq->private;
2335
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2336

2337 2338 2339 2340 2341
	if (st->bpf_seq_afinfo)
		afinfo = st->bpf_seq_afinfo;
	else
		afinfo = PDE_DATA(file_inode(seq->file));

L
Linus Torvalds 已提交
2342
	++st->num;
2343
	++st->offset;
L
Linus Torvalds 已提交
2344

E
Eric Dumazet 已提交
2345
	sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2346

2347
	sk_nulls_for_each_from(sk, node) {
2348 2349
		if ((afinfo->family == AF_UNSPEC ||
		     sk->sk_family == afinfo->family) &&
2350
		    net_eq(sock_net(sk), net))
E
Eric Dumazet 已提交
2351
			return sk;
L
Linus Torvalds 已提交
2352 2353
	}

E
Eric Dumazet 已提交
2354 2355 2356
	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
	++st->bucket;
	return established_get_first(seq);
L
Linus Torvalds 已提交
2357 2358 2359 2360
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
2361 2362 2363 2364 2365
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	rc = established_get_first(seq);
L
Linus Torvalds 已提交
2366 2367 2368 2369

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2370
	}
L
Linus Torvalds 已提交
2371 2372 2373 2374 2375 2376
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2377
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
static void *tcp_seek_last_pos(struct seq_file *seq)
{
	struct tcp_iter_state *st = seq->private;
	int offset = st->offset;
	int orig_num = st->num;
	void *rc = NULL;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (st->bucket >= INET_LHTABLE_SIZE)
			break;
		st->state = TCP_SEQ_STATE_LISTENING;
		rc = listening_get_next(seq, NULL);
		while (offset-- && rc)
			rc = listening_get_next(seq, rc);
		if (rc)
			break;
		st->bucket = 0;
E
Eric Dumazet 已提交
2408
		st->state = TCP_SEQ_STATE_ESTABLISHED;
J
Joe Perches 已提交
2409
		fallthrough;
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
	case TCP_SEQ_STATE_ESTABLISHED:
		if (st->bucket > tcp_hashinfo.ehash_mask)
			break;
		rc = established_get_first(seq);
		while (offset-- && rc)
			rc = established_get_next(seq, rc);
	}

	st->num = orig_num;

	return rc;
}

2423
void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2424
{
J
Jianjun Kong 已提交
2425
	struct tcp_iter_state *st = seq->private;
2426 2427 2428 2429 2430 2431 2432 2433
	void *rc;

	if (*pos && *pos == st->last_pos) {
		rc = tcp_seek_last_pos(seq);
		if (rc)
			goto out;
	}

L
Linus Torvalds 已提交
2434 2435
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
2436 2437 2438 2439 2440 2441 2442
	st->bucket = 0;
	st->offset = 0;
	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;

out:
	st->last_pos = *pos;
	return rc;
L
Linus Torvalds 已提交
2443
}
2444
EXPORT_SYMBOL(tcp_seq_start);
L
Linus Torvalds 已提交
2445

2446
void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2447
{
2448
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
	void *rc = NULL;

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
2461 2462
			st->bucket = 0;
			st->offset = 0;
L
Linus Torvalds 已提交
2463 2464 2465 2466 2467 2468 2469 2470 2471
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
2472
	st->last_pos = *pos;
L
Linus Torvalds 已提交
2473 2474
	return rc;
}
2475
EXPORT_SYMBOL(tcp_seq_next);
L
Linus Torvalds 已提交
2476

2477
void tcp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2478
{
J
Jianjun Kong 已提交
2479
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2480 2481 2482 2483

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2484
			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2485 2486 2487
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2488
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2489 2490 2491
		break;
	}
}
2492
EXPORT_SYMBOL(tcp_seq_stop);
L
Linus Torvalds 已提交
2493

2494
static void get_openreq4(const struct request_sock *req,
E
Eric Dumazet 已提交
2495
			 struct seq_file *f, int i)
L
Linus Torvalds 已提交
2496
{
2497
	const struct inet_request_sock *ireq = inet_rsk(req);
2498
	long delta = req->rsk_timer.expires - jiffies;
L
Linus Torvalds 已提交
2499

2500
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2501
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
L
Linus Torvalds 已提交
2502
		i,
2503
		ireq->ir_loc_addr,
2504
		ireq->ir_num,
2505 2506
		ireq->ir_rmt_addr,
		ntohs(ireq->ir_rmt_port),
L
Linus Torvalds 已提交
2507 2508 2509
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
2510
		jiffies_delta_to_clock_t(delta),
2511
		req->num_timeout,
E
Eric Dumazet 已提交
2512 2513
		from_kuid_munged(seq_user_ns(f),
				 sock_i_uid(req->rsk_listener)),
L
Linus Torvalds 已提交
2514 2515
		0,  /* non standard timer */
		0, /* open_requests have no inode */
2516
		0,
2517
		req);
L
Linus Torvalds 已提交
2518 2519
}

2520
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
L
Linus Torvalds 已提交
2521 2522 2523
{
	int timer_active;
	unsigned long timer_expires;
2524
	const struct tcp_sock *tp = tcp_sk(sk);
2525
	const struct inet_connection_sock *icsk = inet_csk(sk);
2526
	const struct inet_sock *inet = inet_sk(sk);
2527
	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
E
Eric Dumazet 已提交
2528 2529 2530 2531
	__be32 dest = inet->inet_daddr;
	__be32 src = inet->inet_rcv_saddr;
	__u16 destp = ntohs(inet->inet_dport);
	__u16 srcp = ntohs(inet->inet_sport);
2532
	int rx_queue;
2533
	int state;
L
Linus Torvalds 已提交
2534

N
Nandita Dukkipati 已提交
2535
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2536
	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
N
Nandita Dukkipati 已提交
2537
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2538
		timer_active	= 1;
2539 2540
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2541
		timer_active	= 4;
2542
		timer_expires	= icsk->icsk_timeout;
2543
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2544
		timer_active	= 2;
2545
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2546 2547 2548 2549 2550
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2551
	state = inet_sk_state_load(sk);
2552
	if (state == TCP_LISTEN)
2553
		rx_queue = READ_ONCE(sk->sk_ack_backlog);
2554
	else
2555 2556
		/* Because we don't lock the socket,
		 * we might find a transient negative value.
2557
		 */
2558
		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2559
				      READ_ONCE(tp->copied_seq), 0);
2560

2561
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2562
			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2563
		i, src, srcp, dest, destp, state,
2564
		READ_ONCE(tp->write_seq) - tp->snd_una,
2565
		rx_queue,
L
Linus Torvalds 已提交
2566
		timer_active,
2567
		jiffies_delta_to_clock_t(timer_expires - jiffies),
2568
		icsk->icsk_retransmits,
2569
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2570
		icsk->icsk_probes_out,
2571
		sock_i_ino(sk),
2572
		refcount_read(&sk->sk_refcnt), sk,
2573 2574
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
W
Wei Wang 已提交
2575
		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
L
Linus Torvalds 已提交
2576
		tp->snd_cwnd,
2577 2578
		state == TCP_LISTEN ?
		    fastopenq->max_qlen :
2579
		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
L
Linus Torvalds 已提交
2580 2581
}

2582
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2583
			       struct seq_file *f, int i)
L
Linus Torvalds 已提交
2584
{
2585
	long delta = tw->tw_timer.expires - jiffies;
2586
	__be32 dest, src;
L
Linus Torvalds 已提交
2587 2588 2589 2590 2591 2592 2593
	__u16 destp, srcp;

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2594
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2595
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
L
Linus Torvalds 已提交
2596
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2597
		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2598
		refcount_read(&tw->tw_refcnt), tw);
L
Linus Torvalds 已提交
2599 2600 2601 2602 2603 2604
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2605
	struct tcp_iter_state *st;
E
Eric Dumazet 已提交
2606
	struct sock *sk = v;
L
Linus Torvalds 已提交
2607

2608
	seq_setwidth(seq, TMPSZ - 1);
L
Linus Torvalds 已提交
2609
	if (v == SEQ_START_TOKEN) {
2610
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
L
Linus Torvalds 已提交
2611 2612 2613 2614 2615 2616
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

2617 2618 2619
	if (sk->sk_state == TCP_TIME_WAIT)
		get_timewait4_sock(v, seq, st->num);
	else if (sk->sk_state == TCP_NEW_SYN_RECV)
E
Eric Dumazet 已提交
2620
		get_openreq4(v, seq, st->num);
2621 2622
	else
		get_tcp4_sock(v, seq, st->num);
L
Linus Torvalds 已提交
2623
out:
2624
	seq_pad(seq, '\n');
L
Linus Torvalds 已提交
2625 2626 2627
	return 0;
}

2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
#ifdef CONFIG_BPF_SYSCALL
struct bpf_iter__tcp {
	__bpf_md_ptr(struct bpf_iter_meta *, meta);
	__bpf_md_ptr(struct sock_common *, sk_common);
	uid_t uid __aligned(8);
};

static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
			     struct sock_common *sk_common, uid_t uid)
{
	struct bpf_iter__tcp ctx;

	meta->seq_num--;  /* skip SEQ_START_TOKEN */
	ctx.meta = meta;
	ctx.sk_common = sk_common;
	ctx.uid = uid;
	return bpf_iter_run_prog(prog, &ctx);
}

static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
{
	struct bpf_iter_meta meta;
	struct bpf_prog *prog;
	struct sock *sk = v;
	uid_t uid;

	if (v == SEQ_START_TOKEN)
		return 0;

	if (sk->sk_state == TCP_TIME_WAIT) {
		uid = 0;
	} else if (sk->sk_state == TCP_NEW_SYN_RECV) {
		const struct request_sock *req = v;

		uid = from_kuid_munged(seq_user_ns(seq),
				       sock_i_uid(req->rsk_listener));
	} else {
		uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
	}

	meta.seq = seq;
	prog = bpf_iter_get_info(&meta, false);
	return tcp_prog_seq_show(prog, &meta, v, uid);
}

static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
{
	struct bpf_iter_meta meta;
	struct bpf_prog *prog;

	if (!v) {
		meta.seq = seq;
		prog = bpf_iter_get_info(&meta, true);
		if (prog)
			(void)tcp_prog_seq_show(prog, &meta, v, 0);
	}

	tcp_seq_stop(seq, v);
}

static const struct seq_operations bpf_iter_tcp_seq_ops = {
	.show		= bpf_iter_tcp_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= bpf_iter_tcp_seq_stop,
};
#endif

2696 2697 2698 2699 2700 2701 2702
static const struct seq_operations tcp4_seq_ops = {
	.show		= tcp4_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= tcp_seq_stop,
};

L
Linus Torvalds 已提交
2703 2704 2705 2706
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.family		= AF_INET,
};

2707
static int __net_init tcp4_proc_init_net(struct net *net)
2708
{
2709 2710
	if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
			sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2711 2712
		return -ENOMEM;
	return 0;
2713 2714
}

2715
static void __net_exit tcp4_proc_exit_net(struct net *net)
2716
{
2717
	remove_proc_entry("tcp", net->proc_net);
2718 2719 2720 2721 2722 2723 2724
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2725 2726
int __init tcp4_proc_init(void)
{
2727
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2728 2729 2730 2731
}

void tcp4_proc_exit(void)
{
2732
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2733 2734 2735 2736 2737 2738 2739
}
#endif /* CONFIG_PROC_FS */

struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
A
Andrey Ignatov 已提交
2740
	.pre_connect		= tcp_v4_pre_connect,
L
Linus Torvalds 已提交
2741 2742
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2743
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2744 2745 2746 2747 2748 2749
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
2750
	.keepalive		= tcp_set_keepalive,
L
Linus Torvalds 已提交
2751
	.recvmsg		= tcp_recvmsg,
2752 2753
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2754
	.backlog_rcv		= tcp_v4_do_rcv,
E
Eric Dumazet 已提交
2755
	.release_cb		= tcp_release_cb,
2756 2757 2758
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2759
	.enter_memory_pressure	= tcp_enter_memory_pressure,
2760
	.leave_memory_pressure	= tcp_leave_memory_pressure,
2761
	.stream_memory_free	= tcp_stream_memory_free,
L
Linus Torvalds 已提交
2762
	.sockets_allocated	= &tcp_sockets_allocated,
2763
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2764 2765
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2766
	.sysctl_mem		= sysctl_tcp_mem,
2767 2768
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
L
Linus Torvalds 已提交
2769 2770
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2771
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2772
	.twsk_prot		= &tcp_timewait_sock_ops,
2773
	.rsk_prot		= &tcp_request_sock_ops,
2774
	.h.hashinfo		= &tcp_hashinfo,
2775
	.no_autobind		= true,
2776 2777 2778
#ifdef CONFIG_COMPAT
	.compat_setsockopt	= compat_tcp_setsockopt,
	.compat_getsockopt	= compat_tcp_getsockopt,
G
Glauber Costa 已提交
2779
#endif
2780
	.diag_destroy		= tcp_abort,
L
Linus Torvalds 已提交
2781
};
E
Eric Dumazet 已提交
2782
EXPORT_SYMBOL(tcp_prot);
L
Linus Torvalds 已提交
2783

2784 2785 2786 2787
static void __net_exit tcp_sk_exit(struct net *net)
{
	int cpu;

2788
	if (net->ipv4.tcp_congestion_control)
2789 2790
		bpf_module_put(net->ipv4.tcp_congestion_control,
			       net->ipv4.tcp_congestion_control->owner);
2791

2792 2793 2794 2795 2796
	for_each_possible_cpu(cpu)
		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
	free_percpu(net->ipv4.tcp_sk);
}

2797 2798
static int __net_init tcp_sk_init(struct net *net)
{
2799
	int res, cpu, cnt;
2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811

	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
	if (!net->ipv4.tcp_sk)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		struct sock *sk;

		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
					   IPPROTO_TCP, net);
		if (res)
			goto fail;
2812
		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2813 2814 2815 2816 2817 2818

		/* Please enforce IP_DF and IPID==0 for RST and
		 * ACK sent in SYN-RECV and TIME-WAIT state.
		 */
		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;

2819 2820
		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
	}
2821

2822
	net->ipv4.sysctl_tcp_ecn = 2;
2823 2824
	net->ipv4.sysctl_tcp_ecn_fallback = 1;

F
Fan Du 已提交
2825
	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
E
Eric Dumazet 已提交
2826
	net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2827
	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2828
	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2829
	net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2830

2831
	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2832
	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2833
	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2834

2835
	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2836
	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2837
	net->ipv4.sysctl_tcp_syncookies = 1;
2838
	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2839
	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2840
	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2841
	net->ipv4.sysctl_tcp_orphan_retries = 0;
2842
	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2843
	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2844
	net->ipv4.sysctl_tcp_tw_reuse = 2;
2845
	net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
2846

2847
	cnt = tcp_hashinfo.ehash_mask + 1;
2848
	net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2849 2850
	net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;

2851
	net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
E
Eric Dumazet 已提交
2852
	net->ipv4.sysctl_tcp_sack = 1;
2853
	net->ipv4.sysctl_tcp_window_scaling = 1;
2854
	net->ipv4.sysctl_tcp_timestamps = 1;
2855
	net->ipv4.sysctl_tcp_early_retrans = 3;
2856
	net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2857
	net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
2858
	net->ipv4.sysctl_tcp_retrans_collapse = 1;
2859
	net->ipv4.sysctl_tcp_max_reordering = 300;
E
Eric Dumazet 已提交
2860
	net->ipv4.sysctl_tcp_dsack = 1;
2861
	net->ipv4.sysctl_tcp_app_win = 31;
2862
	net->ipv4.sysctl_tcp_adv_win_scale = 1;
E
Eric Dumazet 已提交
2863
	net->ipv4.sysctl_tcp_frto = 2;
2864
	net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2865 2866 2867 2868 2869
	/* This limits the percentage of the congestion window which we
	 * will allow a single TSO frame to consume.  Building TSO frames
	 * which are too large can cause TCP streams to be bursty.
	 */
	net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2870 2871
	/* Default TSQ limit of 16 TSO segments */
	net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2872 2873
	/* rfc5961 challenge ack rate limiting */
	net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2874
	net->ipv4.sysctl_tcp_min_tso_segs = 2;
2875
	net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2876
	net->ipv4.sysctl_tcp_autocorking = 1;
2877
	net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2878
	net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2879
	net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2880 2881 2882 2883 2884 2885 2886 2887
	if (net != &init_net) {
		memcpy(net->ipv4.sysctl_tcp_rmem,
		       init_net.ipv4.sysctl_tcp_rmem,
		       sizeof(init_net.ipv4.sysctl_tcp_rmem));
		memcpy(net->ipv4.sysctl_tcp_wmem,
		       init_net.ipv4.sysctl_tcp_wmem,
		       sizeof(init_net.ipv4.sysctl_tcp_wmem));
	}
2888
	net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2889
	net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
E
Eric Dumazet 已提交
2890
	net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2891
	net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2892
	spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2893 2894
	net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
	atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2895

2896 2897
	/* Reno is always built in */
	if (!net_eq(net, &init_net) &&
2898 2899
	    bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
			       init_net.ipv4.tcp_congestion_control->owner))
2900 2901 2902 2903
		net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
	else
		net->ipv4.tcp_congestion_control = &tcp_reno;

2904
	return 0;
2905 2906 2907 2908
fail:
	tcp_sk_exit(net);

	return res;
E
Eric W. Biederman 已提交
2909 2910 2911 2912
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
2913 2914
	struct net *net;

2915
	inet_twsk_purge(&tcp_hashinfo, AF_INET);
2916 2917 2918

	list_for_each_entry(net, net_exit_list, exit_list)
		tcp_fastopen_ctx_destroy(net);
2919 2920 2921
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
E
Eric W. Biederman 已提交
2922 2923 2924
       .init	   = tcp_sk_init,
       .exit	   = tcp_sk_exit,
       .exit_batch = tcp_sk_exit_batch,
2925 2926
};

2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
		     struct sock_common *sk_common, uid_t uid)

static int bpf_iter_init_tcp(void *priv_data)
{
	struct tcp_iter_state *st = priv_data;
	struct tcp_seq_afinfo *afinfo;
	int ret;

	afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
	if (!afinfo)
		return -ENOMEM;

	afinfo->family = AF_UNSPEC;
	st->bpf_seq_afinfo = afinfo;
	ret = bpf_iter_init_seq_net(priv_data);
	if (ret)
		kfree(afinfo);
	return ret;
}

static void bpf_iter_fini_tcp(void *priv_data)
{
	struct tcp_iter_state *st = priv_data;

	kfree(st->bpf_seq_afinfo);
	bpf_iter_fini_seq_net(priv_data);
}

static const struct bpf_iter_reg tcp_reg_info = {
	.target			= "tcp",
	.seq_ops		= &bpf_iter_tcp_seq_ops,
	.init_seq_private	= bpf_iter_init_tcp,
	.fini_seq_private	= bpf_iter_fini_tcp,
	.seq_priv_size		= sizeof(struct tcp_iter_state),
	.ctx_arg_info_size	= 1,
	.ctx_arg_info		= {
		{ offsetof(struct bpf_iter__tcp, sk_common),
		  PTR_TO_BTF_ID_OR_NULL },
	},
};

static void __init bpf_iter_register(void)
{
	if (bpf_iter_reg_target(&tcp_reg_info))
		pr_warn("Warning: could not register bpf iterator tcp\n");
}

#endif

2978
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2979
{
2980
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2981
		panic("Failed to create the TCP control socket.\n");
2982 2983 2984 2985

#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
	bpf_iter_register();
#endif
L
Linus Torvalds 已提交
2986
}