tcp_ipv4.c 75.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
37
 *					request_sock handling and moved
L
Linus Torvalds 已提交
38 39
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
40
 *					Added new listen semantics.
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */

53
#define pr_fmt(fmt) "TCP: " fmt
L
Linus Torvalds 已提交
54

H
Herbert Xu 已提交
55
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
64
#include <linux/slab.h>
L
Linus Torvalds 已提交
65

66
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
67
#include <net/icmp.h>
68
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
69
#include <net/tcp.h>
70
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
71 72
#include <net/ipv6.h>
#include <net/inet_common.h>
73
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
74
#include <net/xfrm.h>
C
Chris Leech 已提交
75
#include <net/netdma.h>
76
#include <net/secure_seq.h>
G
Glauber Costa 已提交
77
#include <net/tcp_memcontrol.h>
L
Linus Torvalds 已提交
78 79 80 81 82 83 84

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>

85 86 87
#include <linux/crypto.h>
#include <linux/scatterlist.h>

88 89
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
E
Eric Dumazet 已提交
90
EXPORT_SYMBOL(sysctl_tcp_low_latency);
L
Linus Torvalds 已提交
91 92


93
#ifdef CONFIG_TCP_MD5SIG
E
Eric Dumazet 已提交
94
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
95
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
96 97
#endif

98
struct inet_hashinfo tcp_hashinfo;
E
Eric Dumazet 已提交
99
EXPORT_SYMBOL(tcp_hashinfo);
L
Linus Torvalds 已提交
100

101
static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
L
Linus Torvalds 已提交
102
{
103 104
	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
					  ip_hdr(skb)->saddr,
105 106
					  tcp_hdr(skb)->dest,
					  tcp_hdr(skb)->source);
L
Linus Torvalds 已提交
107 108
}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
	    (twp == NULL || (sysctl_tcp_tw_reuse &&
127
			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 129 130 131 132 133 134 135 136 137 138 139 140
		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
		if (tp->write_seq == 0)
			tp->write_seq = 1;
		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		sock_hold(sktw);
		return 1;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(tcp_twsk_unique);

L
Linus Torvalds 已提交
141 142 143
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
144
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
L
Linus Torvalds 已提交
145 146
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
147
	__be16 orig_sport, orig_dport;
148
	__be32 daddr, nexthop;
149
	struct flowi4 *fl4;
150
	struct rtable *rt;
L
Linus Torvalds 已提交
151
	int err;
152
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
161 162 163
	inet_opt = rcu_dereference_protected(inet->inet_opt,
					     sock_owned_by_user(sk));
	if (inet_opt && inet_opt->opt.srr) {
L
Linus Torvalds 已提交
164 165
		if (!daddr)
			return -EINVAL;
166
		nexthop = inet_opt->opt.faddr;
L
Linus Torvalds 已提交
167 168
	}

169 170
	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
171 172
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 174 175 176 177 178
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_TCP,
			      orig_sport, orig_dport, sk, true);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
179
			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180
		return err;
181
	}
L
Linus Torvalds 已提交
182 183 184 185 186 187

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

188
	if (!inet_opt || !inet_opt->opt.srr)
189
		daddr = fl4->daddr;
L
Linus Torvalds 已提交
190

E
Eric Dumazet 已提交
191
	if (!inet->inet_saddr)
192
		inet->inet_saddr = fl4->saddr;
E
Eric Dumazet 已提交
193
	inet->inet_rcv_saddr = inet->inet_saddr;
L
Linus Torvalds 已提交
194

E
Eric Dumazet 已提交
195
	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
L
Linus Torvalds 已提交
196 197 198
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
P
Pavel Emelyanov 已提交
199 200
		if (likely(!tp->repair))
			tp->write_seq	   = 0;
L
Linus Torvalds 已提交
201 202
	}

203
	if (tcp_death_row.sysctl_tw_recycle &&
204 205
	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
		tcp_fetch_timewait_stamp(sk, &rt->dst);
L
Linus Torvalds 已提交
206

E
Eric Dumazet 已提交
207 208
	inet->inet_dport = usin->sin_port;
	inet->inet_daddr = daddr;
L
Linus Torvalds 已提交
209

210
	inet_csk(sk)->icsk_ext_hdr_len = 0;
211 212
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
L
Linus Torvalds 已提交
213

214
	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
L
Linus Torvalds 已提交
215 216 217 218 219 220 221

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
222
	err = inet_hash_connect(&tcp_death_row, sk);
L
Linus Torvalds 已提交
223 224 225
	if (err)
		goto failure;

226
	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 228 229 230
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
L
Linus Torvalds 已提交
231
		goto failure;
232
	}
L
Linus Torvalds 已提交
233
	/* OK, now commit destination to socket.  */
234
	sk->sk_gso_type = SKB_GSO_TCPV4;
235
	sk_setup_caps(sk, &rt->dst);
L
Linus Torvalds 已提交
236

P
Pavel Emelyanov 已提交
237
	if (!tp->write_seq && likely(!tp->repair))
E
Eric Dumazet 已提交
238 239 240
		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
							   inet->inet_daddr,
							   inet->inet_sport,
L
Linus Torvalds 已提交
241 242
							   usin->sin_port);

E
Eric Dumazet 已提交
243
	inet->inet_id = tp->write_seq ^ jiffies;
L
Linus Torvalds 已提交
244

A
Andrey Vagin 已提交
245
	err = tcp_connect(sk);
P
Pavel Emelyanov 已提交
246

L
Linus Torvalds 已提交
247 248 249 250 251 252 253
	rt = NULL;
	if (err)
		goto failure;

	return 0;

failure:
254 255 256 257
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
258 259 260
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
E
Eric Dumazet 已提交
261
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
262 263
	return err;
}
E
Eric Dumazet 已提交
264
EXPORT_SYMBOL(tcp_v4_connect);
L
Linus Torvalds 已提交
265 266

/*
267 268 269
 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
 * It can be called through tcp_release_cb() if socket was owned by user
 * at the time tcp_v4_err() was called to handle ICMP message.
L
Linus Torvalds 已提交
270
 */
271
static void tcp_v4_mtu_reduced(struct sock *sk)
L
Linus Torvalds 已提交
272 273 274
{
	struct dst_entry *dst;
	struct inet_sock *inet = inet_sk(sk);
275
	u32 mtu = tcp_sk(sk)->mtu_info;
L
Linus Torvalds 已提交
276

277 278
	dst = inet_csk_update_pmtu(sk, mtu);
	if (!dst)
L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286 287 288 289
		return;

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
290
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
291 292 293 294 295 296 297 298 299 300 301
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}

302 303 304 305
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);

306
	if (dst)
307
		dst->ops->redirect(dst, sk, skb);
308 309
}

L
Linus Torvalds 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

326
void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
L
Linus Torvalds 已提交
327
{
328
	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
329
	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
330
	struct inet_connection_sock *icsk;
L
Linus Torvalds 已提交
331 332
	struct tcp_sock *tp;
	struct inet_sock *inet;
333 334
	const int type = icmp_hdr(icmp_skb)->type;
	const int code = icmp_hdr(icmp_skb)->code;
L
Linus Torvalds 已提交
335
	struct sock *sk;
336
	struct sk_buff *skb;
337
	struct request_sock *req;
L
Linus Torvalds 已提交
338
	__u32 seq;
339
	__u32 remaining;
L
Linus Torvalds 已提交
340
	int err;
341
	struct net *net = dev_net(icmp_skb->dev);
L
Linus Torvalds 已提交
342

343
	if (icmp_skb->len < (iph->ihl << 2) + 8) {
344
		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
L
Linus Torvalds 已提交
345 346 347
		return;
	}

348
	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
349
			iph->saddr, th->source, inet_iif(icmp_skb));
L
Linus Torvalds 已提交
350
	if (!sk) {
351
		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
L
Linus Torvalds 已提交
352 353 354
		return;
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
355
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
356 357 358 359 360 361
		return;
	}

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
362 363
	 * We do take care of PMTU discovery (RFC1191) special case :
	 * we can receive locally generated ICMP messages while socket is held.
L
Linus Torvalds 已提交
364
	 */
365 366 367 368
	if (sock_owned_by_user(sk)) {
		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
	}
L
Linus Torvalds 已提交
369 370 371
	if (sk->sk_state == TCP_CLOSE)
		goto out;

372 373 374 375 376
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
		goto out;
	}

377
	icsk = inet_csk(sk);
L
Linus Torvalds 已提交
378
	tp = tcp_sk(sk);
379
	req = tp->fastopen_rsk;
L
Linus Torvalds 已提交
380 381
	seq = ntohl(th->seq);
	if (sk->sk_state != TCP_LISTEN &&
382 383 384
	    !between(seq, tp->snd_una, tp->snd_nxt) &&
	    (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
		/* For a Fast Open socket, allow seq to be snt_isn. */
385
		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
386 387 388 389
		goto out;
	}

	switch (type) {
390 391 392
	case ICMP_REDIRECT:
		do_redirect(icmp_skb, sk);
		goto out;
L
Linus Torvalds 已提交
393 394 395 396 397 398 399 400 401 402 403
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 405 406 407 408 409 410
			/* We are not interested in TCP_LISTEN and open_requests
			 * (SYN-ACKs send out by Linux are always <576bytes so
			 * they should go through unfragmented).
			 */
			if (sk->sk_state == TCP_LISTEN)
				goto out;

411
			tp->mtu_info = info;
412
			if (!sock_owned_by_user(sk)) {
413
				tcp_v4_mtu_reduced(sk);
414 415 416 417
			} else {
				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
					sock_hold(sk);
			}
L
Linus Torvalds 已提交
418 419 420 421
			goto out;
		}

		err = icmp_err_convert[code].errno;
422 423 424 425 426 427 428 429
		/* check if icmp_skb allows revert of backoff
		 * (see draft-zimmermann-tcp-lcd) */
		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
			break;
		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
		    !icsk->icsk_backoff)
			break;

430 431
		/* XXX (TFO) - revisit the following logic for TFO */

432 433 434
		if (sock_owned_by_user(sk))
			break;

435
		icsk->icsk_backoff--;
436 437
		inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
			TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
		tcp_bound_rto(sk);

		skb = tcp_write_queue_head(sk);
		BUG_ON(!skb);

		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
				tcp_time_stamp - TCP_SKB_CB(skb)->when);

		if (remaining) {
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
						  remaining, TCP_RTO_MAX);
		} else {
			/* RTO revert clocked out retransmission.
			 * Will retransmit now */
			tcp_retransmit_timer(sk);
		}

L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

463 464 465 466 467 468 469 470
	/* XXX (TFO) - if it's a TFO socket and has been accepted, rather
	 * than following the TCP_SYN_RECV case and closing the socket,
	 * we ignore the ICMP error and keep trying like a fully established
	 * socket. Is this the right thing to do?
	 */
	if (req && req->sk == NULL)
		goto out;

L
Linus Torvalds 已提交
471
	switch (sk->sk_state) {
472
		struct request_sock *req, **prev;
L
Linus Torvalds 已提交
473 474 475 476
	case TCP_LISTEN:
		if (sock_owned_by_user(sk))
			goto out;

477 478
		req = inet_csk_search_req(sk, &prev, th->dest,
					  iph->daddr, iph->saddr);
L
Linus Torvalds 已提交
479 480 481 482 483 484
		if (!req)
			goto out;

		/* ICMPs are not backlogged, hence we cannot get
		   an established socket here.
		 */
485
		WARN_ON(req->sk);
L
Linus Torvalds 已提交
486

487
		if (seq != tcp_rsk(req)->snt_isn) {
488
			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
489 490 491 492 493 494 495 496 497
			goto out;
		}

		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
498
		inet_csk_reqsk_queue_drop(sk, req, prev);
499
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
L
Linus Torvalds 已提交
500 501 502 503
		goto out;

	case TCP_SYN_SENT:
	case TCP_SYN_RECV:  /* Cannot happen.
504 505
			       It can f.e. if SYNs crossed,
			       or Fast Open.
L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
			     */
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
}

548 549
static void __tcp_v4_send_check(struct sk_buff *skb,
				__be32 saddr, __be32 daddr)
L
Linus Torvalds 已提交
550
{
551
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
552

553
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
554
		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
555
		skb->csum_start = skb_transport_header(skb) - skb->head;
A
Al Viro 已提交
556
		skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
557
	} else {
558
		th->check = tcp_v4_check(skb->len, saddr, daddr,
559
					 csum_partial(th,
L
Linus Torvalds 已提交
560 561 562 563 564
						      th->doff << 2,
						      skb->csum));
	}
}

565
/* This routine computes an IPv4 TCP checksum. */
566
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
567
{
568
	const struct inet_sock *inet = inet_sk(sk);
569 570 571

	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
E
Eric Dumazet 已提交
572
EXPORT_SYMBOL(tcp_v4_send_check);
573

574 575
int tcp_v4_gso_send_check(struct sk_buff *skb)
{
576
	const struct iphdr *iph;
577 578 579 580 581
	struct tcphdr *th;

	if (!pskb_may_pull(skb, sizeof(*th)))
		return -EINVAL;

582
	iph = ip_hdr(skb);
583
	th = tcp_hdr(skb);
584 585

	th->check = 0;
586
	skb->ip_summed = CHECKSUM_PARTIAL;
587
	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
588 589 590
	return 0;
}

L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

604
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
605
{
606
	const struct tcphdr *th = tcp_hdr(skb);
607 608 609
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
610
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
611 612
#endif
	} rep;
L
Linus Torvalds 已提交
613
	struct ip_reply_arg arg;
614 615
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key *key;
616 617 618 619
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
620
#endif
621
	struct net *net;
L
Linus Torvalds 已提交
622 623 624 625 626

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

E
Eric Dumazet 已提交
627
	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
628 629 630
		return;

	/* Swap the send and the receive. */
631 632 633 634 635
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
636 637

	if (th->ack) {
638
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
639
	} else {
640 641 642
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
643 644
	}

645
	memset(&arg, 0, sizeof(arg));
646 647 648 649
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

#ifdef CONFIG_TCP_MD5SIG
650 651 652 653 654 655 656 657 658 659
	hash_location = tcp_parse_md5sig_option(th);
	if (!sk && hash_location) {
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
		sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660 661
					     &tcp_hashinfo, ip_hdr(skb)->saddr,
					     th->source, ip_hdr(skb)->daddr,
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
					     ntohs(th->source), inet_iif(skb));
		/* don't send rst if it can't find key */
		if (!sk1)
			return;
		rcu_read_lock();
		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
					&ip_hdr(skb)->saddr, AF_INET);
		if (!key)
			goto release_sk1;

		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
			goto release_sk1;
	} else {
		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
					     &ip_hdr(skb)->saddr,
					     AF_INET) : NULL;
	}

681 682 683 684 685 686 687 688 689
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

690
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
691 692
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
693 694
	}
#endif
695 696
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
697
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
698
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
699
	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
700
	/* When socket is gone, all binding information is lost.
A
Alexey Kuznetsov 已提交
701 702
	 * routing might fail in this case. No choice here, if we choose to force
	 * input interface, we will misroute in case of asymmetric route.
703
	 */
A
Alexey Kuznetsov 已提交
704 705
	if (sk)
		arg.bound_dev_if = sk->sk_bound_dev_if;
L
Linus Torvalds 已提交
706

E
Eric Dumazet 已提交
707
	net = dev_net(skb_dst(skb)->dev);
708
	arg.tos = ip_hdr(skb)->tos;
709
	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
710
			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
L
Linus Torvalds 已提交
711

712 713
	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
714 715 716 717 718 719 720 721

#ifdef CONFIG_TCP_MD5SIG
release_sk1:
	if (sk1) {
		rcu_read_unlock();
		sock_put(sk1);
	}
#endif
L
Linus Torvalds 已提交
722 723 724 725 726 727
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

728
static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
729
			    u32 win, u32 tsval, u32 tsecr, int oif,
730
			    struct tcp_md5sig_key *key,
731
			    int reply_flags, u8 tos)
L
Linus Torvalds 已提交
732
{
733
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
734 735
	struct {
		struct tcphdr th;
736
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
737
#ifdef CONFIG_TCP_MD5SIG
738
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
739 740
#endif
			];
L
Linus Torvalds 已提交
741 742
	} rep;
	struct ip_reply_arg arg;
E
Eric Dumazet 已提交
743
	struct net *net = dev_net(skb_dst(skb)->dev);
L
Linus Torvalds 已提交
744 745

	memset(&rep.th, 0, sizeof(struct tcphdr));
746
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
747 748 749

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
750
	if (tsecr) {
751 752 753
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
754 755
		rep.opt[1] = htonl(tsval);
		rep.opt[2] = htonl(tsecr);
756
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
757 758 759 760 761 762 763 764 765 766 767
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

768 769
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
770
		int offset = (tsecr) ? 3 : 0;
771 772 773 774 775 776 777 778

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

779
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
780 781
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
782 783
	}
#endif
784
	arg.flags = reply_flags;
785 786
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
787 788
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
789 790
	if (oif)
		arg.bound_dev_if = oif;
791
	arg.tos = tos;
792
	ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
793
			      ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
L
Linus Torvalds 已提交
794

795
	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
L
Linus Torvalds 已提交
796 797 798 799
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
800
	struct inet_timewait_sock *tw = inet_twsk(sk);
801
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
802

803
	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
804
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
805
			tcp_time_stamp + tcptw->tw_ts_offset,
806 807
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
808
			tcp_twsk_md5_key(tcptw),
809 810
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
			tw->tw_tos
811
			);
L
Linus Torvalds 已提交
812

813
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
814 815
}

816
static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
817
				  struct request_sock *req)
L
Linus Torvalds 已提交
818
{
819 820 821 822 823 824
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825
			tcp_time_stamp,
826 827
			req->ts_recent,
			0,
E
Eric Dumazet 已提交
828 829
			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
					  AF_INET),
830 831
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			ip_hdr(skb)->tos);
L
Linus Torvalds 已提交
832 833 834
}

/*
835
 *	Send a SYN-ACK after having received a SYN.
836
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
837 838
 *	socket.
 */
839 840
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
			      struct request_sock *req,
841 842
			      u16 queue_mapping,
			      bool nocache)
L
Linus Torvalds 已提交
843
{
844
	const struct inet_request_sock *ireq = inet_rsk(req);
845
	struct flowi4 fl4;
L
Linus Torvalds 已提交
846 847 848 849
	int err = -1;
	struct sk_buff * skb;

	/* First, grab a route. */
850
	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
851
		return -1;
L
Linus Torvalds 已提交
852

C
Christoph Paasch 已提交
853
	skb = tcp_make_synack(sk, dst, req, NULL);
L
Linus Torvalds 已提交
854 855

	if (skb) {
856
		__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
L
Linus Torvalds 已提交
857

858
		skb_set_queue_mapping(skb, queue_mapping);
859 860 861
		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
					    ireq->rmt_addr,
					    ireq->opt);
862
		err = net_xmit_eval(err);
863 864
		if (!tcp_rsk(req)->snt_synack && !err)
			tcp_rsk(req)->snt_synack = tcp_time_stamp;
L
Linus Torvalds 已提交
865 866 867 868 869
	}

	return err;
}

C
Christoph Paasch 已提交
870
static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
871
{
C
Christoph Paasch 已提交
872
	int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
873 874 875 876

	if (!res)
		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
	return res;
877 878
}

L
Linus Torvalds 已提交
879
/*
880
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
881
 */
882
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
883
{
J
Jesper Juhl 已提交
884
	kfree(inet_rsk(req)->opt);
L
Linus Torvalds 已提交
885 886
}

887
/*
E
Eric Dumazet 已提交
888
 * Return true if a syncookie should be sent
889
 */
E
Eric Dumazet 已提交
890
bool tcp_syn_flood_action(struct sock *sk,
891 892
			 const struct sk_buff *skb,
			 const char *proto)
L
Linus Torvalds 已提交
893
{
894
	const char *msg = "Dropping request";
E
Eric Dumazet 已提交
895
	bool want_cookie = false;
896 897 898
	struct listen_sock *lopt;


L
Linus Torvalds 已提交
899

900
#ifdef CONFIG_SYN_COOKIES
901
	if (sysctl_tcp_syncookies) {
902
		msg = "Sending cookies";
E
Eric Dumazet 已提交
903
		want_cookie = true;
904 905
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
	} else
906
#endif
907 908 909 910 911
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);

	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
	if (!lopt->synflood_warned) {
		lopt->synflood_warned = 1;
912
		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
913 914 915
			proto, ntohs(tcp_hdr(skb)->dest), msg);
	}
	return want_cookie;
916
}
917
EXPORT_SYMBOL(tcp_syn_flood_action);
L
Linus Torvalds 已提交
918 919

/*
920
 * Save and compile IPv4 options into the request_sock if needed.
L
Linus Torvalds 已提交
921
 */
922
static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
L
Linus Torvalds 已提交
923
{
924 925
	const struct ip_options *opt = &(IPCB(skb)->opt);
	struct ip_options_rcu *dopt = NULL;
L
Linus Torvalds 已提交
926 927

	if (opt && opt->optlen) {
928 929
		int opt_size = sizeof(*dopt) + opt->optlen;

L
Linus Torvalds 已提交
930 931
		dopt = kmalloc(opt_size, GFP_ATOMIC);
		if (dopt) {
932
			if (ip_options_echo(&dopt->opt, skb)) {
L
Linus Torvalds 已提交
933 934 935 936 937 938 939 940
				kfree(dopt);
				dopt = NULL;
			}
		}
	}
	return dopt;
}

941 942 943 944 945 946 947 948
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

/* Find the Key structure for an address.  */
E
Eric Dumazet 已提交
949 950 951
struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
					 const union tcp_md5_addr *addr,
					 int family)
952 953
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
954 955
	struct tcp_md5sig_key *key;
	unsigned int size = sizeof(struct in_addr);
956
	struct tcp_md5sig_info *md5sig;
957

958 959
	/* caller either holds rcu_read_lock() or socket lock */
	md5sig = rcu_dereference_check(tp->md5sig_info,
960 961
				       sock_owned_by_user(sk) ||
				       lockdep_is_held(&sk->sk_lock.slock));
962
	if (!md5sig)
963
		return NULL;
E
Eric Dumazet 已提交
964 965 966 967
#if IS_ENABLED(CONFIG_IPV6)
	if (family == AF_INET6)
		size = sizeof(struct in6_addr);
#endif
968
	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
E
Eric Dumazet 已提交
969 970 971 972
		if (key->family != family)
			continue;
		if (!memcmp(&key->addr, addr, size))
			return key;
973 974 975
	}
	return NULL;
}
E
Eric Dumazet 已提交
976
EXPORT_SYMBOL(tcp_md5_do_lookup);
977 978 979 980

struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
					 struct sock *addr_sk)
{
E
Eric Dumazet 已提交
981 982 983 984
	union tcp_md5_addr *addr;

	addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
	return tcp_md5_do_lookup(sk, addr, AF_INET);
985 986 987
}
EXPORT_SYMBOL(tcp_v4_md5_lookup);

A
Adrian Bunk 已提交
988 989
static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
						      struct request_sock *req)
990
{
E
Eric Dumazet 已提交
991 992 993 994
	union tcp_md5_addr *addr;

	addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
	return tcp_md5_do_lookup(sk, addr, AF_INET);
995 996 997
}

/* This can be called on a newly created socket, from other files */
E
Eric Dumazet 已提交
998 999
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1000 1001
{
	/* Add Key to the list */
1002
	struct tcp_md5sig_key *key;
1003
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1004
	struct tcp_md5sig_info *md5sig;
1005

E
Eric Dumazet 已提交
1006
	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1007 1008
	if (key) {
		/* Pre-existing entry - just update that one. */
E
Eric Dumazet 已提交
1009
		memcpy(key->key, newkey, newkeylen);
1010
		key->keylen = newkeylen;
E
Eric Dumazet 已提交
1011 1012
		return 0;
	}
1013

1014 1015
	md5sig = rcu_dereference_protected(tp->md5sig_info,
					   sock_owned_by_user(sk));
E
Eric Dumazet 已提交
1016 1017 1018
	if (!md5sig) {
		md5sig = kmalloc(sizeof(*md5sig), gfp);
		if (!md5sig)
1019 1020
			return -ENOMEM;

E
Eric Dumazet 已提交
1021 1022
		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
		INIT_HLIST_HEAD(&md5sig->head);
1023
		rcu_assign_pointer(tp->md5sig_info, md5sig);
E
Eric Dumazet 已提交
1024
	}
1025

1026
	key = sock_kmalloc(sk, sizeof(*key), gfp);
E
Eric Dumazet 已提交
1027 1028
	if (!key)
		return -ENOMEM;
1029
	if (!tcp_alloc_md5sig_pool()) {
1030
		sock_kfree_s(sk, key, sizeof(*key));
E
Eric Dumazet 已提交
1031
		return -ENOMEM;
1032
	}
E
Eric Dumazet 已提交
1033 1034 1035 1036 1037 1038 1039 1040

	memcpy(key->key, newkey, newkeylen);
	key->keylen = newkeylen;
	key->family = family;
	memcpy(&key->addr, addr,
	       (family == AF_INET6) ? sizeof(struct in6_addr) :
				      sizeof(struct in_addr));
	hlist_add_head_rcu(&key->node, &md5sig->head);
1041 1042
	return 0;
}
E
Eric Dumazet 已提交
1043
EXPORT_SYMBOL(tcp_md5_do_add);
1044

E
Eric Dumazet 已提交
1045
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1046
{
E
Eric Dumazet 已提交
1047 1048 1049 1050 1051 1052
	struct tcp_md5sig_key *key;

	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
	if (!key)
		return -ENOENT;
	hlist_del_rcu(&key->node);
1053
	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1054 1055
	kfree_rcu(key, rcu);
	return 0;
1056
}
E
Eric Dumazet 已提交
1057
EXPORT_SYMBOL(tcp_md5_do_del);
1058

1059
static void tcp_clear_md5_list(struct sock *sk)
1060 1061
{
	struct tcp_sock *tp = tcp_sk(sk);
E
Eric Dumazet 已提交
1062
	struct tcp_md5sig_key *key;
1063
	struct hlist_node *n;
1064
	struct tcp_md5sig_info *md5sig;
1065

1066 1067
	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);

1068
	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
E
Eric Dumazet 已提交
1069
		hlist_del_rcu(&key->node);
1070
		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
E
Eric Dumazet 已提交
1071
		kfree_rcu(key, rcu);
1072 1073 1074
	}
}

1075 1076
static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
				 int optlen)
1077 1078 1079 1080 1081 1082 1083
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;

	if (optlen < sizeof(cmd))
		return -EINVAL;

1084
	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1085 1086 1087 1088 1089
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

1090
	if (!cmd.tcpm_key || !cmd.tcpm_keylen)
E
Eric Dumazet 已提交
1091 1092
		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
				      AF_INET);
1093 1094 1095 1096

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

E
Eric Dumazet 已提交
1097 1098 1099
	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
			      GFP_KERNEL);
1100 1101
}

1102 1103
static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
					__be32 daddr, __be32 saddr, int nbytes)
1104 1105
{
	struct tcp4_pseudohdr *bp;
1106
	struct scatterlist sg;
1107 1108 1109 1110

	bp = &hp->md5_blk.ip4;

	/*
1111
	 * 1. the TCP pseudo-header (in the order: source IP address,
1112 1113 1114 1115 1116 1117
	 * destination IP address, zero-padded protocol number, and
	 * segment length)
	 */
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1118
	bp->protocol = IPPROTO_TCP;
1119
	bp->len = cpu_to_be16(nbytes);
1120

1121 1122 1123 1124
	sg_init_one(&sg, bp, sizeof(*bp));
	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
}

E
Eric Dumazet 已提交
1125
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1126
			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
{
	struct tcp_md5sig_pool *hp;
	struct hash_desc *desc;

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
	desc = &hp->md5_desc;

	if (crypto_hash_init(desc))
		goto clear_hash;
	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_header(hp, th))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
	if (crypto_hash_final(desc, md5_hash))
1145 1146 1147 1148
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1149

1150 1151 1152 1153
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1154
	return 1;
1155 1156
}

1157
int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
1158 1159
			const struct sock *sk, const struct request_sock *req,
			const struct sk_buff *skb)
1160
{
1161 1162
	struct tcp_md5sig_pool *hp;
	struct hash_desc *desc;
E
Eric Dumazet 已提交
1163
	const struct tcphdr *th = tcp_hdr(skb);
1164 1165 1166
	__be32 saddr, daddr;

	if (sk) {
E
Eric Dumazet 已提交
1167 1168
		saddr = inet_sk(sk)->inet_saddr;
		daddr = inet_sk(sk)->inet_daddr;
1169 1170 1171
	} else if (req) {
		saddr = inet_rsk(req)->loc_addr;
		daddr = inet_rsk(req)->rmt_addr;
1172
	} else {
1173 1174 1175
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1176
	}
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
	desc = &hp->md5_desc;

	if (crypto_hash_init(desc))
		goto clear_hash;

	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
		goto clear_hash;
	if (tcp_md5_hash_header(hp, th))
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
	if (crypto_hash_final(desc, md5_hash))
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1205
}
1206
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1207

E
Eric Dumazet 已提交
1208
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1209 1210 1211 1212 1213 1214 1215 1216 1217
{
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
1218
	const __u8 *hash_location = NULL;
1219
	struct tcp_md5sig_key *hash_expected;
1220
	const struct iphdr *iph = ip_hdr(skb);
1221
	const struct tcphdr *th = tcp_hdr(skb);
1222 1223 1224
	int genhash;
	unsigned char newhash[16];

E
Eric Dumazet 已提交
1225 1226
	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
					  AF_INET);
1227
	hash_location = tcp_parse_md5sig_option(th);
1228 1229 1230

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
E
Eric Dumazet 已提交
1231
		return false;
1232 1233

	if (hash_expected && !hash_location) {
1234
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
E
Eric Dumazet 已提交
1235
		return true;
1236 1237 1238
	}

	if (!hash_expected && hash_location) {
1239
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
E
Eric Dumazet 已提交
1240
		return true;
1241 1242 1243 1244 1245
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1246 1247 1248
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
				      NULL, NULL, skb);
1249 1250

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1251 1252 1253 1254 1255
		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
				     &iph->saddr, ntohs(th->source),
				     &iph->daddr, ntohs(th->dest),
				     genhash ? " tcp_v4_calc_md5_hash failed"
				     : "");
E
Eric Dumazet 已提交
1256
		return true;
1257
	}
E
Eric Dumazet 已提交
1258
	return false;
1259 1260 1261 1262
}

#endif

1263
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1264
	.family		=	PF_INET,
1265
	.obj_size	=	sizeof(struct tcp_request_sock),
1266
	.rtx_syn_ack	=	tcp_v4_rtx_synack,
1267 1268
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1269
	.send_reset	=	tcp_v4_send_reset,
1270
	.syn_ack_timeout = 	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
1271 1272
};

1273
#ifdef CONFIG_TCP_MD5SIG
1274
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1275
	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1276
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1277
};
1278
#endif
1279

1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
			       struct request_sock *req,
			       struct tcp_fastopen_cookie *foc,
			       struct tcp_fastopen_cookie *valid_foc)
{
	bool skip_cookie = false;
	struct fastopen_queue *fastopenq;

	if (likely(!fastopen_cookie_present(foc))) {
		/* See include/net/tcp.h for the meaning of these knobs */
		if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
		    ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
		    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
			skip_cookie = true; /* no cookie to validate */
		else
			return false;
	}
	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
	/* A FO option is present; bump the counter. */
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);

	/* Make sure the listener has enabled fastopen, and we don't
	 * exceed the max # of pending TFO requests allowed before trying
	 * to validating the cookie in order to avoid burning CPU cycles
	 * unnecessarily.
	 *
	 * XXX (TFO) - The implication of checking the max_qlen before
	 * processing a cookie request is that clients can't differentiate
	 * between qlen overflow causing Fast Open to be disabled
	 * temporarily vs a server not supporting Fast Open at all.
	 */
	if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
	    fastopenq == NULL || fastopenq->max_qlen == 0)
		return false;

	if (fastopenq->qlen >= fastopenq->max_qlen) {
		struct request_sock *req1;
		spin_lock(&fastopenq->lock);
		req1 = fastopenq->rskq_rst_head;
		if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
			spin_unlock(&fastopenq->lock);
			NET_INC_STATS_BH(sock_net(sk),
			    LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
			/* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
			foc->len = -1;
			return false;
		}
		fastopenq->rskq_rst_head = req1->dl_next;
		fastopenq->qlen--;
		spin_unlock(&fastopenq->lock);
		reqsk_free(req1);
	}
	if (skip_cookie) {
		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
		return true;
	}
	if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
		if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
			tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
			if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
			    memcmp(&foc->val[0], &valid_foc->val[0],
			    TCP_FASTOPEN_COOKIE_SIZE) != 0)
				return false;
			valid_foc->len = -1;
		}
		/* Acknowledge the data received from the peer. */
		tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
		return true;
	} else if (foc->len == 0) { /* Client requesting a cookie */
		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
		NET_INC_STATS_BH(sock_net(sk),
		    LINUX_MIB_TCPFASTOPENCOOKIEREQD);
	} else {
		/* Client sent a cookie with wrong size. Treat it
		 * the same as invalid and return a valid one.
		 */
		tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
	}
	return false;
}

static int tcp_v4_conn_req_fastopen(struct sock *sk,
				    struct sk_buff *skb,
				    struct sk_buff *skb_synack,
C
Christoph Paasch 已提交
1364
				    struct request_sock *req)
1365 1366 1367 1368 1369
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
	const struct inet_request_sock *ireq = inet_rsk(req);
	struct sock *child;
1370
	int err;
1371

1372 1373
	req->num_retrans = 0;
	req->num_timeout = 0;
1374 1375 1376 1377 1378 1379 1380 1381 1382
	req->sk = NULL;

	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
	if (child == NULL) {
		NET_INC_STATS_BH(sock_net(sk),
				 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
		kfree_skb(skb_synack);
		return -1;
	}
1383 1384 1385 1386 1387
	err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
				    ireq->rmt_addr, ireq->opt);
	err = net_xmit_eval(err);
	if (!err)
		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
	/* XXX (TFO) - is it ok to ignore error and continue? */

	spin_lock(&queue->fastopenq->lock);
	queue->fastopenq->qlen++;
	spin_unlock(&queue->fastopenq->lock);

	/* Initialize the child socket. Have to fix some values to take
	 * into account the child is a Fast Open socket and is created
	 * only out of the bits carried in the SYN packet.
	 */
	tp = tcp_sk(child);

	tp->fastopen_rsk = req;
	/* Do a hold on the listner sk so that if the listener is being
	 * closed, the child that has been accepted can live on and still
	 * access listen_lock.
	 */
	sock_hold(sk);
	tcp_rsk(req)->listener = sk;

	/* RFC1323: The window in SYN & SYN/ACK segments is never
	 * scaled. So correct it appropriately.
	 */
	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);

	/* Activate the retrans timer so that SYNACK can be retransmitted.
	 * The request socket is not added to the SYN table of the parent
	 * because it's been added to the accept queue directly.
	 */
	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
	    TCP_TIMEOUT_INIT, TCP_RTO_MAX);

	/* Add the child socket directly into the accept queue */
	inet_csk_reqsk_queue_add(sk, req, child);

	/* Now finish processing the fastopen child socket. */
	inet_csk(child)->icsk_af_ops->rebuild_header(child);
	tcp_init_congestion_control(child);
	tcp_mtup_init(child);
	tcp_init_buffer_space(child);
	tcp_init_metrics(child);

	/* Queue the data carried in the SYN packet. We need to first
	 * bump skb's refcnt because the caller will attempt to free it.
	 *
	 * XXX (TFO) - we honor a zero-payload TFO request for now.
	 * (Any reason not to?)
	 */
	if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
		/* Don't queue the skb if there is no payload in SYN.
		 * XXX (TFO) - How about SYN+FIN?
		 */
		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
	} else {
		skb = skb_get(skb);
		skb_dst_drop(skb);
		__skb_pull(skb, tcp_hdr(skb)->doff * 4);
		skb_set_owner_r(skb, child);
		__skb_queue_tail(&child->sk_receive_queue, skb);
		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
Y
Yuchung Cheng 已提交
1448
		tp->syn_data_acked = 1;
1449 1450 1451 1452 1453 1454 1455 1456
	}
	sk->sk_data_ready(sk, 0);
	bh_unlock_sock(child);
	sock_put(child);
	WARN_ON(req->sk == NULL);
	return 0;
}

L
Linus Torvalds 已提交
1457 1458 1459
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
	struct tcp_options_received tmp_opt;
1460
	struct request_sock *req;
1461
	struct inet_request_sock *ireq;
1462
	struct tcp_sock *tp = tcp_sk(sk);
1463
	struct dst_entry *dst = NULL;
1464 1465
	__be32 saddr = ip_hdr(skb)->saddr;
	__be32 daddr = ip_hdr(skb)->daddr;
L
Linus Torvalds 已提交
1466
	__u32 isn = TCP_SKB_CB(skb)->when;
E
Eric Dumazet 已提交
1467
	bool want_cookie = false;
1468 1469 1470 1471 1472
	struct flowi4 fl4;
	struct tcp_fastopen_cookie foc = { .len = -1 };
	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
	struct sk_buff *skb_synack;
	int do_fastopen;
L
Linus Torvalds 已提交
1473 1474

	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1475
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1476 1477 1478 1479 1480 1481
		goto drop;

	/* TW buckets are converted to open requests without
	 * limitations, they conserve resources and peer is
	 * evidently real one.
	 */
1482
	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1483 1484 1485
		want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
		if (!want_cookie)
			goto drop;
L
Linus Torvalds 已提交
1486 1487 1488 1489 1490 1491 1492
	}

	/* Accept backlog is full. If we have already queued enough
	 * of warm entries in syn queue, drop request. It is better than
	 * clogging syn queue with openreqs with exponentially increasing
	 * timeout.
	 */
1493 1494
	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
L
Linus Torvalds 已提交
1495
		goto drop;
1496
	}
L
Linus Torvalds 已提交
1497

1498
	req = inet_reqsk_alloc(&tcp_request_sock_ops);
L
Linus Torvalds 已提交
1499 1500 1501
	if (!req)
		goto drop;

1502 1503 1504 1505
#ifdef CONFIG_TCP_MD5SIG
	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif

L
Linus Torvalds 已提交
1506
	tcp_clear_options(&tmp_opt);
1507
	tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1508
	tmp_opt.user_mss  = tp->rx_opt.user_mss;
C
Christoph Paasch 已提交
1509
	tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
L
Linus Torvalds 已提交
1510

1511
	if (want_cookie && !tmp_opt.saw_tstamp)
L
Linus Torvalds 已提交
1512 1513 1514 1515 1516
		tcp_clear_options(&tmp_opt);

	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
	tcp_openreq_init(req, &tmp_opt, skb);

1517 1518 1519 1520
	ireq = inet_rsk(req);
	ireq->loc_addr = daddr;
	ireq->rmt_addr = saddr;
	ireq->no_srccheck = inet_sk(sk)->transparent;
1521
	ireq->opt = tcp_v4_save_options(skb);
1522

1523
	if (security_inet_conn_request(sk, skb, req))
1524
		goto drop_and_free;
1525

1526
	if (!want_cookie || tmp_opt.tstamp_ok)
1527
		TCP_ECN_create_request(req, skb, sock_net(sk));
L
Linus Torvalds 已提交
1528 1529 1530

	if (want_cookie) {
		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1531
		req->cookie_ts = tmp_opt.tstamp_ok;
L
Linus Torvalds 已提交
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
	} else if (!isn) {
		/* VJ's idea. We save last timestamp seen
		 * from the destination in peer table, when entering
		 * state TIME-WAIT, and check against it before
		 * accepting new connection request.
		 *
		 * If "isn" is not zero, this request hit alive
		 * timewait bucket, so that all the necessary checks
		 * are made in the function processing timewait state.
		 */
		if (tmp_opt.saw_tstamp &&
1543
		    tcp_death_row.sysctl_tw_recycle &&
1544
		    (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1545 1546
		    fl4.daddr == saddr) {
			if (!tcp_peer_is_proven(req, dst, true)) {
1547
				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1548
				goto drop_and_release;
L
Linus Torvalds 已提交
1549 1550 1551 1552
			}
		}
		/* Kill the following clause, if you dislike this way. */
		else if (!sysctl_tcp_syncookies &&
1553
			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
L
Linus Torvalds 已提交
1554
			  (sysctl_max_syn_backlog >> 2)) &&
1555
			 !tcp_peer_is_proven(req, dst, false)) {
L
Linus Torvalds 已提交
1556 1557 1558 1559 1560 1561 1562
			/* Without syncookies last quarter of
			 * backlog is filled with destinations,
			 * proven to be alive.
			 * It means that we continue to communicate
			 * to destinations, already remembered
			 * to the moment of synflood.
			 */
1563
			LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1564
				       &saddr, ntohs(tcp_hdr(skb)->source));
1565
			goto drop_and_release;
L
Linus Torvalds 已提交
1566 1567
		}

1568
		isn = tcp_v4_init_sequence(skb);
L
Linus Torvalds 已提交
1569
	}
1570
	tcp_rsk(req)->snt_isn = isn;
L
Linus Torvalds 已提交
1571

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
	if (dst == NULL) {
		dst = inet_csk_route_req(sk, &fl4, req);
		if (dst == NULL)
			goto drop_and_free;
	}
	do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);

	/* We don't call tcp_v4_send_synack() directly because we need
	 * to make sure a child socket can be created successfully before
	 * sending back synack!
	 *
	 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
	 * (or better yet, call tcp_send_synack() in the child context
	 * directly, but will have to fix bunch of other code first)
	 * after syn_recv_sock() except one will need to first fix the
	 * latter to remove its dependency on the current implementation
	 * of tcp_v4_send_synack()->tcp_select_initial_window().
	 */
	skb_synack = tcp_make_synack(sk, dst, req,
	    fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);

	if (skb_synack) {
		__tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
		skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
	} else
		goto drop_and_free;

	if (likely(!do_fastopen)) {
		int err;
		err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
		     ireq->rmt_addr, ireq->opt);
		err = net_xmit_eval(err);
		if (err || want_cookie)
			goto drop_and_free;

1607
		tcp_rsk(req)->snt_synack = tcp_time_stamp;
1608 1609 1610 1611 1612 1613
		tcp_rsk(req)->listener = NULL;
		/* Add the request_sock to the SYN table */
		inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
		if (fastopen_cookie_present(&foc) && foc.len != 0)
			NET_INC_STATS_BH(sock_net(sk),
			    LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
C
Christoph Paasch 已提交
1614
	} else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
L
Linus Torvalds 已提交
1615 1616 1617 1618
		goto drop_and_free;

	return 0;

1619 1620
drop_and_release:
	dst_release(dst);
L
Linus Torvalds 已提交
1621
drop_and_free:
1622
	reqsk_free(req);
L
Linus Torvalds 已提交
1623
drop:
1624
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
L
Linus Torvalds 已提交
1625 1626
	return 0;
}
E
Eric Dumazet 已提交
1627
EXPORT_SYMBOL(tcp_v4_conn_request);
L
Linus Torvalds 已提交
1628 1629 1630 1631 1632 1633 1634


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1635
				  struct request_sock *req,
L
Linus Torvalds 已提交
1636 1637
				  struct dst_entry *dst)
{
1638
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1639 1640 1641
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1642 1643 1644
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key *key;
#endif
1645
	struct ip_options_rcu *inet_opt;
L
Linus Torvalds 已提交
1646 1647 1648 1649 1650 1651

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
1652
		goto exit_nonewsk;
L
Linus Torvalds 已提交
1653

1654
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1655
	inet_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1656 1657 1658

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1659
	ireq		      = inet_rsk(req);
E
Eric Dumazet 已提交
1660 1661 1662
	newinet->inet_daddr   = ireq->rmt_addr;
	newinet->inet_rcv_saddr = ireq->loc_addr;
	newinet->inet_saddr	      = ireq->loc_addr;
1663 1664
	inet_opt	      = ireq->opt;
	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1665
	ireq->opt	      = NULL;
1666
	newinet->mc_index     = inet_iif(skb);
1667
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1668
	newinet->rcv_tos      = ip_hdr(skb)->tos;
1669
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1670 1671
	if (inet_opt)
		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
E
Eric Dumazet 已提交
1672
	newinet->inet_id = newtp->write_seq ^ jiffies;
L
Linus Torvalds 已提交
1673

E
Eric Dumazet 已提交
1674 1675 1676 1677 1678 1679 1680
	if (!dst) {
		dst = inet_csk_route_child_sock(sk, newsk, req);
		if (!dst)
			goto put_and_exit;
	} else {
		/* syncookie case : see end of cookie_v4_check() */
	}
1681 1682
	sk_setup_caps(newsk, dst);

J
John Heffner 已提交
1683
	tcp_mtup_init(newsk);
L
Linus Torvalds 已提交
1684
	tcp_sync_mss(newsk, dst_mtu(dst));
1685
	newtp->advmss = dst_metric_advmss(dst);
1686 1687 1688 1689
	if (tcp_sk(sk)->rx_opt.user_mss &&
	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;

L
Linus Torvalds 已提交
1690
	tcp_initialize_rcv_mss(newsk);
1691
	tcp_synack_rtt_meas(newsk, req);
1692
	newtp->total_retrans = req->num_retrans;
L
Linus Torvalds 已提交
1693

1694 1695
#ifdef CONFIG_TCP_MD5SIG
	/* Copy over the MD5 key from the original socket */
E
Eric Dumazet 已提交
1696 1697
	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
				AF_INET);
E
Eric Dumazet 已提交
1698
	if (key != NULL) {
1699 1700 1701 1702 1703 1704
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
E
Eric Dumazet 已提交
1705 1706
		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
E
Eric Dumazet 已提交
1707
		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1708 1709 1710
	}
#endif

1711 1712
	if (__inet_inherit_port(sk, newsk) < 0)
		goto put_and_exit;
1713
	__inet_hash_nolisten(newsk, NULL);
L
Linus Torvalds 已提交
1714 1715 1716 1717

	return newsk;

exit_overflow:
1718
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1719 1720
exit_nonewsk:
	dst_release(dst);
L
Linus Torvalds 已提交
1721
exit:
1722
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
L
Linus Torvalds 已提交
1723
	return NULL;
1724
put_and_exit:
1725 1726
	inet_csk_prepare_forced_close(newsk);
	tcp_done(newsk);
1727
	goto exit;
L
Linus Torvalds 已提交
1728
}
E
Eric Dumazet 已提交
1729
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
L
Linus Torvalds 已提交
1730 1731 1732

static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
1733
	struct tcphdr *th = tcp_hdr(skb);
1734
	const struct iphdr *iph = ip_hdr(skb);
L
Linus Torvalds 已提交
1735
	struct sock *nsk;
1736
	struct request_sock **prev;
L
Linus Torvalds 已提交
1737
	/* Find possible connection requests. */
1738 1739
	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
						       iph->saddr, iph->daddr);
L
Linus Torvalds 已提交
1740
	if (req)
1741
		return tcp_check_req(sk, skb, req, prev, false);
L
Linus Torvalds 已提交
1742

1743
	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1744
			th->source, iph->daddr, th->dest, inet_iif(skb));
L
Linus Torvalds 已提交
1745 1746 1747 1748 1749 1750

	if (nsk) {
		if (nsk->sk_state != TCP_TIME_WAIT) {
			bh_lock_sock(nsk);
			return nsk;
		}
1751
		inet_twsk_put(inet_twsk(nsk));
L
Linus Torvalds 已提交
1752 1753 1754 1755
		return NULL;
	}

#ifdef CONFIG_SYN_COOKIES
1756
	if (!th->syn)
L
Linus Torvalds 已提交
1757 1758 1759 1760 1761
		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
#endif
	return sk;
}

1762
static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
L
Linus Torvalds 已提交
1763
{
1764 1765
	const struct iphdr *iph = ip_hdr(skb);

1766
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1767 1768
		if (!tcp_v4_check(skb->len, iph->saddr,
				  iph->daddr, skb->csum)) {
1769
			skb->ip_summed = CHECKSUM_UNNECESSARY;
L
Linus Torvalds 已提交
1770
			return 0;
1771
		}
L
Linus Torvalds 已提交
1772
	}
1773

1774
	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1775 1776
				       skb->len, IPPROTO_TCP, 0);

L
Linus Torvalds 已提交
1777
	if (skb->len <= 76) {
1778
		return __skb_checksum_complete(skb);
L
Linus Torvalds 已提交
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
	}
	return 0;
}


/* The socket must have it's spinlock held when we get
 * here.
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1794 1795 1796 1797 1798 1799 1800 1801
	struct sock *rsk;
#ifdef CONFIG_TCP_MD5SIG
	/*
	 * We really want to reject the packet as early as possible
	 * if:
	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
	 *  o There is an MD5 option and we're not expecting one
	 */
1802
	if (tcp_v4_inbound_md5_hash(sk, skb))
1803 1804 1805
		goto discard;
#endif

L
Linus Torvalds 已提交
1806
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1807 1808
		struct dst_entry *dst = sk->sk_rx_dst;

1809
		sock_rps_save_rxhash(sk, skb);
1810
		if (dst) {
E
Eric Dumazet 已提交
1811 1812
			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
			    dst->ops->check(dst, 0) == NULL) {
1813 1814 1815 1816
				dst_release(dst);
				sk->sk_rx_dst = NULL;
			}
		}
1817
		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1818
			rsk = sk;
L
Linus Torvalds 已提交
1819
			goto reset;
1820
		}
L
Linus Torvalds 已提交
1821 1822 1823
		return 0;
	}

1824
	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1825 1826 1827 1828 1829 1830 1831 1832
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
		if (!nsk)
			goto discard;

		if (nsk != sk) {
1833
			sock_rps_save_rxhash(nsk, skb);
1834 1835
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1836
				goto reset;
1837
			}
L
Linus Torvalds 已提交
1838 1839
			return 0;
		}
1840
	} else
1841
		sock_rps_save_rxhash(sk, skb);
1842

1843
	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1844
		rsk = sk;
L
Linus Torvalds 已提交
1845
		goto reset;
1846
	}
L
Linus Torvalds 已提交
1847 1848 1849
	return 0;

reset:
1850
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1861
	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1862
	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1863 1864
	goto discard;
}
E
Eric Dumazet 已提交
1865
EXPORT_SYMBOL(tcp_v4_do_rcv);
L
Linus Torvalds 已提交
1866

1867
void tcp_v4_early_demux(struct sk_buff *skb)
D
David S. Miller 已提交
1868 1869 1870 1871 1872 1873
{
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
1874
		return;
D
David S. Miller 已提交
1875

1876
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1877
		return;
D
David S. Miller 已提交
1878 1879

	iph = ip_hdr(skb);
1880
	th = tcp_hdr(skb);
D
David S. Miller 已提交
1881 1882

	if (th->doff < sizeof(struct tcphdr) / 4)
1883
		return;
D
David S. Miller 已提交
1884

1885
	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
D
David S. Miller 已提交
1886
				       iph->saddr, th->source,
1887
				       iph->daddr, ntohs(th->dest),
E
Eric Dumazet 已提交
1888
				       skb->skb_iif);
D
David S. Miller 已提交
1889 1890 1891 1892 1893
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
		if (sk->sk_state != TCP_TIME_WAIT) {
			struct dst_entry *dst = sk->sk_rx_dst;
E
Eric Dumazet 已提交
1894

D
David S. Miller 已提交
1895 1896
			if (dst)
				dst = dst_check(dst, 0);
1897
			if (dst &&
E
Eric Dumazet 已提交
1898
			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1899
				skb_dst_set_noref(skb, dst);
D
David S. Miller 已提交
1900 1901 1902 1903
		}
	}
}

E
Eric Dumazet 已提交
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
/* Packet is added to VJ-style prequeue for processing in process
 * context, if a reader task is waiting. Apparently, this exciting
 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
 * failed somewhere. Latency? Burstiness? Well, at least now we will
 * see, why it failed. 8)8)				  --ANK
 *
 */
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (sysctl_tcp_low_latency || !tp->ucopy.task)
		return false;

	if (skb->len <= tcp_hdrlen(skb) &&
	    skb_queue_len(&tp->ucopy.prequeue) == 0)
		return false;

1922
	skb_dst_force(skb);
E
Eric Dumazet 已提交
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
	__skb_queue_tail(&tp->ucopy.prequeue, skb);
	tp->ucopy.memory += skb->truesize;
	if (tp->ucopy.memory > sk->sk_rcvbuf) {
		struct sk_buff *skb1;

		BUG_ON(sock_owned_by_user(sk));

		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
			sk_backlog_rcv(sk, skb1);
			NET_INC_STATS_BH(sock_net(sk),
					 LINUX_MIB_TCPPREQUEUEDROPPED);
		}

		tp->ucopy.memory = 0;
	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
		wake_up_interruptible_sync_poll(sk_sleep(sk),
					   POLLIN | POLLRDNORM | POLLRDBAND);
		if (!inet_csk_ack_scheduled(sk))
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
						  (3 * tcp_rto_min(sk)) / 4,
						  TCP_RTO_MAX);
	}
	return true;
}
EXPORT_SYMBOL(tcp_prequeue);

L
Linus Torvalds 已提交
1949 1950 1951 1952 1953 1954
/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1955
	const struct iphdr *iph;
1956
	const struct tcphdr *th;
L
Linus Torvalds 已提交
1957 1958
	struct sock *sk;
	int ret;
1959
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
1960 1961 1962 1963 1964

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
1965
	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1966 1967 1968 1969

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1970
	th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1971 1972 1973 1974 1975 1976 1977 1978

	if (th->doff < sizeof(struct tcphdr) / 4)
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1979
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1980
	 * So, we defer the checks. */
1981
	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1982
		goto csum_error;
L
Linus Torvalds 已提交
1983

1984
	th = tcp_hdr(skb);
1985
	iph = ip_hdr(skb);
L
Linus Torvalds 已提交
1986 1987 1988 1989 1990
	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->when	 = 0;
1991
	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
L
Linus Torvalds 已提交
1992 1993
	TCP_SKB_CB(skb)->sacked	 = 0;

1994
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
L
Linus Torvalds 已提交
1995 1996 1997
	if (!sk)
		goto no_tcp_socket;

E
Eric Dumazet 已提交
1998 1999 2000 2001
process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

2002 2003
	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2004
		goto discard_and_relse;
2005
	}
2006

L
Linus Torvalds 已提交
2007 2008
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
2009
	nf_reset(skb);
L
Linus Torvalds 已提交
2010

2011
	if (sk_filter(sk, skb))
L
Linus Torvalds 已提交
2012 2013 2014 2015
		goto discard_and_relse;

	skb->dev = NULL;

2016
	bh_lock_sock_nested(sk);
L
Linus Torvalds 已提交
2017 2018
	ret = 0;
	if (!sock_owned_by_user(sk)) {
C
Chris Leech 已提交
2019 2020 2021
#ifdef CONFIG_NET_DMA
		struct tcp_sock *tp = tcp_sk(sk);
		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2022
			tp->ucopy.dma_chan = net_dma_find_channel();
C
Chris Leech 已提交
2023
		if (tp->ucopy.dma_chan)
L
Linus Torvalds 已提交
2024
			ret = tcp_v4_do_rcv(sk, skb);
C
Chris Leech 已提交
2025 2026 2027 2028
		else
#endif
		{
			if (!tcp_prequeue(sk, skb))
S
Shan Wei 已提交
2029
				ret = tcp_v4_do_rcv(sk, skb);
C
Chris Leech 已提交
2030
		}
2031 2032
	} else if (unlikely(sk_add_backlog(sk, skb,
					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
Z
Zhu Yi 已提交
2033
		bh_unlock_sock(sk);
2034
		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
Z
Zhu Yi 已提交
2035 2036
		goto discard_and_relse;
	}
L
Linus Torvalds 已提交
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
	bh_unlock_sock(sk);

	sock_put(sk);

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2048 2049
csum_error:
		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
2050
bad_packet:
2051
		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
2052
	} else {
2053
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
2054 2055 2056 2057 2058
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
2059
	return 0;
L
Linus Torvalds 已提交
2060 2061 2062 2063 2064 2065 2066

discard_and_relse:
	sock_put(sk);
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2067
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2068 2069 2070
		goto discard_it;
	}

2071
	if (skb->len < (th->doff << 2)) {
2072
		inet_twsk_put(inet_twsk(sk));
2073 2074 2075 2076 2077
		goto bad_packet;
	}
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
2078
	}
2079
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
2080
	case TCP_TW_SYN: {
2081
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2082
							&tcp_hashinfo,
2083
							iph->saddr, th->source,
2084
							iph->daddr, th->dest,
2085
							inet_iif(skb));
L
Linus Torvalds 已提交
2086
		if (sk2) {
2087 2088
			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
			inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
			sk = sk2;
			goto process;
		}
		/* Fall through to ACK */
	}
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
		goto no_tcp_socket;
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

2104 2105 2106 2107 2108
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
	.twsk_destructor= tcp_twsk_destructor,
};
L
Linus Torvalds 已提交
2109

2110
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
E
Eric Dumazet 已提交
2111 2112 2113 2114 2115 2116 2117
{
	struct dst_entry *dst = skb_dst(skb);

	dst_hold(dst);
	sk->sk_rx_dst = dst;
	inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
2118
EXPORT_SYMBOL(inet_sk_rx_dst_set);
E
Eric Dumazet 已提交
2119

2120
const struct inet_connection_sock_af_ops ipv4_specific = {
2121 2122 2123
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
E
Eric Dumazet 已提交
2124
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
2125 2126 2127 2128 2129 2130 2131
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
2132
	.bind_conflict	   = inet_csk_bind_conflict,
2133
#ifdef CONFIG_COMPAT
2134 2135
	.compat_setsockopt = compat_ip_setsockopt,
	.compat_getsockopt = compat_ip_getsockopt,
2136
#endif
L
Linus Torvalds 已提交
2137
};
E
Eric Dumazet 已提交
2138
EXPORT_SYMBOL(ipv4_specific);
L
Linus Torvalds 已提交
2139

2140
#ifdef CONFIG_TCP_MD5SIG
2141
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2142
	.md5_lookup		= tcp_v4_md5_lookup,
2143
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
2144 2145
	.md5_parse		= tcp_v4_parse_md5_keys,
};
2146
#endif
2147

L
Linus Torvalds 已提交
2148 2149 2150 2151 2152
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
2153
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
2154

2155
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
2156

2157
	icsk->icsk_af_ops = &ipv4_specific;
2158

2159
#ifdef CONFIG_TCP_MD5SIG
2160
	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2161
#endif
L
Linus Torvalds 已提交
2162 2163 2164 2165

	return 0;
}

2166
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
2167 2168 2169 2170 2171
{
	struct tcp_sock *tp = tcp_sk(sk);

	tcp_clear_xmit_timers(sk);

2172
	tcp_cleanup_congestion_control(sk);
2173

L
Linus Torvalds 已提交
2174
	/* Cleanup up the write buffer. */
2175
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
2176 2177

	/* Cleans up our, hopefully empty, out_of_order_queue. */
2178
	__skb_queue_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
2179

2180 2181 2182
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
E
Eric Dumazet 已提交
2183
		tcp_clear_md5_list(sk);
2184
		kfree_rcu(tp->md5sig_info, rcu);
2185 2186 2187 2188
		tp->md5sig_info = NULL;
	}
#endif

C
Chris Leech 已提交
2189 2190
#ifdef CONFIG_NET_DMA
	/* Cleans up our sk_async_wait_queue */
2191
	__skb_queue_purge(&sk->sk_async_wait_queue);
C
Chris Leech 已提交
2192 2193
#endif

L
Linus Torvalds 已提交
2194 2195 2196 2197
	/* Clean prequeue, it must be empty really */
	__skb_queue_purge(&tp->ucopy.prequeue);

	/* Clean up a referenced TCP bind bucket. */
2198
	if (inet_csk(sk)->icsk_bind_hash)
2199
		inet_put_port(sk);
L
Linus Torvalds 已提交
2200

2201
	BUG_ON(tp->fastopen_rsk != NULL);
2202

2203 2204 2205
	/* If socket is aborted during connect operation */
	tcp_free_fastopen_req(tp);

2206
	sk_sockets_allocated_dec(sk);
G
Glauber Costa 已提交
2207
	sock_release_memcg(sk);
L
Linus Torvalds 已提交
2208 2209 2210 2211 2212 2213
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

2214
static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
L
Linus Torvalds 已提交
2215
{
2216
	return hlist_nulls_empty(head) ? NULL :
2217
		list_entry(head->first, struct inet_timewait_sock, tw_node);
L
Linus Torvalds 已提交
2218 2219
}

2220
static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
L
Linus Torvalds 已提交
2221
{
2222 2223
	return !is_a_nulls(tw->tw_node.next) ?
		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
L
Linus Torvalds 已提交
2224 2225
}

2226 2227 2228 2229 2230
/*
 * Get next listener socket follow cur.  If cur is NULL, get first socket
 * starting from bucket given in st->bucket; when st->bucket is zero the
 * very first socket in the hash table is returned.
 */
L
Linus Torvalds 已提交
2231 2232
static void *listening_get_next(struct seq_file *seq, void *cur)
{
2233
	struct inet_connection_sock *icsk;
2234
	struct hlist_nulls_node *node;
L
Linus Torvalds 已提交
2235
	struct sock *sk = cur;
2236
	struct inet_listen_hashbucket *ilb;
J
Jianjun Kong 已提交
2237
	struct tcp_iter_state *st = seq->private;
2238
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2239 2240

	if (!sk) {
2241
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
2242
		spin_lock_bh(&ilb->lock);
2243
		sk = sk_nulls_head(&ilb->head);
2244
		st->offset = 0;
L
Linus Torvalds 已提交
2245 2246
		goto get_sk;
	}
2247
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
2248
	++st->num;
2249
	++st->offset;
L
Linus Torvalds 已提交
2250 2251

	if (st->state == TCP_SEQ_STATE_OPENREQ) {
2252
		struct request_sock *req = cur;
L
Linus Torvalds 已提交
2253

2254
		icsk = inet_csk(st->syn_wait_sk);
L
Linus Torvalds 已提交
2255 2256 2257
		req = req->dl_next;
		while (1) {
			while (req) {
2258
				if (req->rsk_ops->family == st->family) {
L
Linus Torvalds 已提交
2259 2260 2261 2262 2263
					cur = req;
					goto out;
				}
				req = req->dl_next;
			}
2264
			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
L
Linus Torvalds 已提交
2265 2266
				break;
get_req:
2267
			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
L
Linus Torvalds 已提交
2268
		}
E
Eric Dumazet 已提交
2269
		sk	  = sk_nulls_next(st->syn_wait_sk);
L
Linus Torvalds 已提交
2270
		st->state = TCP_SEQ_STATE_LISTENING;
2271
		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
2272
	} else {
2273
		icsk = inet_csk(sk);
2274 2275
		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
		if (reqsk_queue_len(&icsk->icsk_accept_queue))
L
Linus Torvalds 已提交
2276
			goto start_req;
2277
		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
E
Eric Dumazet 已提交
2278
		sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2279 2280
	}
get_sk:
2281
	sk_nulls_for_each_from(sk, node) {
2282 2283 2284
		if (!net_eq(sock_net(sk), net))
			continue;
		if (sk->sk_family == st->family) {
L
Linus Torvalds 已提交
2285 2286 2287
			cur = sk;
			goto out;
		}
2288
		icsk = inet_csk(sk);
2289 2290
		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
L
Linus Torvalds 已提交
2291 2292 2293 2294 2295 2296 2297
start_req:
			st->uid		= sock_i_uid(sk);
			st->syn_wait_sk = sk;
			st->state	= TCP_SEQ_STATE_OPENREQ;
			st->sbucket	= 0;
			goto get_req;
		}
2298
		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
2299
	}
2300
	spin_unlock_bh(&ilb->lock);
2301
	st->offset = 0;
2302
	if (++st->bucket < INET_LHTABLE_SIZE) {
2303 2304
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
		spin_lock_bh(&ilb->lock);
2305
		sk = sk_nulls_head(&ilb->head);
L
Linus Torvalds 已提交
2306 2307 2308 2309 2310 2311 2312 2313 2314
		goto get_sk;
	}
	cur = NULL;
out:
	return cur;
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
2315 2316 2317 2318 2319 2320
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	st->offset = 0;
	rc = listening_get_next(seq, NULL);
L
Linus Torvalds 已提交
2321 2322 2323 2324 2325 2326 2327 2328

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

E
Eric Dumazet 已提交
2329
static inline bool empty_bucket(struct tcp_iter_state *st)
2330
{
2331 2332
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2333 2334
}

2335 2336 2337 2338
/*
 * Get first established socket starting from bucket given in st->bucket.
 * If st->bucket is zero, the very first socket in the hash is returned.
 */
L
Linus Torvalds 已提交
2339 2340
static void *established_get_first(struct seq_file *seq)
{
J
Jianjun Kong 已提交
2341
	struct tcp_iter_state *st = seq->private;
2342
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2343 2344
	void *rc = NULL;

2345 2346
	st->offset = 0;
	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2347
		struct sock *sk;
2348
		struct hlist_nulls_node *node;
2349
		struct inet_timewait_sock *tw;
2350
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2351

2352 2353 2354 2355
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2356
		spin_lock_bh(lock);
2357
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2358
			if (sk->sk_family != st->family ||
2359
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2360 2361 2362 2363 2364 2365
				continue;
			}
			rc = sk;
			goto out;
		}
		st->state = TCP_SEQ_STATE_TIME_WAIT;
2366
		inet_twsk_for_each(tw, node,
2367
				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2368
			if (tw->tw_family != st->family ||
2369
			    !net_eq(twsk_net(tw), net)) {
L
Linus Torvalds 已提交
2370 2371 2372 2373 2374
				continue;
			}
			rc = tw;
			goto out;
		}
2375
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2376 2377 2378 2379 2380 2381 2382 2383 2384
		st->state = TCP_SEQ_STATE_ESTABLISHED;
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
	struct sock *sk = cur;
2385
	struct inet_timewait_sock *tw;
2386
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2387
	struct tcp_iter_state *st = seq->private;
2388
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2389 2390

	++st->num;
2391
	++st->offset;
L
Linus Torvalds 已提交
2392 2393 2394 2395 2396

	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
		tw = cur;
		tw = tw_next(tw);
get_tw:
2397
		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
L
Linus Torvalds 已提交
2398 2399 2400 2401 2402 2403
			tw = tw_next(tw);
		}
		if (tw) {
			cur = tw;
			goto out;
		}
2404
		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2405 2406
		st->state = TCP_SEQ_STATE_ESTABLISHED;

2407
		/* Look for next non empty bucket */
2408
		st->offset = 0;
2409
		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2410 2411
				empty_bucket(st))
			;
2412
		if (st->bucket > tcp_hashinfo.ehash_mask)
2413 2414
			return NULL;

2415
		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2416
		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
L
Linus Torvalds 已提交
2417
	} else
2418
		sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2419

2420
	sk_nulls_for_each_from(sk, node) {
2421
		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
L
Linus Torvalds 已提交
2422 2423 2424 2425
			goto found;
	}

	st->state = TCP_SEQ_STATE_TIME_WAIT;
2426
	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
L
Linus Torvalds 已提交
2427 2428 2429 2430 2431 2432 2433 2434 2435
	goto get_tw;
found:
	cur = sk;
out:
	return cur;
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
2436 2437 2438 2439 2440
	struct tcp_iter_state *st = seq->private;
	void *rc;

	st->bucket = 0;
	rc = established_get_first(seq);
L
Linus Torvalds 已提交
2441 2442 2443 2444

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2445
	}
L
Linus Torvalds 已提交
2446 2447 2448 2449 2450 2451
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2452
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
static void *tcp_seek_last_pos(struct seq_file *seq)
{
	struct tcp_iter_state *st = seq->private;
	int offset = st->offset;
	int orig_num = st->num;
	void *rc = NULL;

	switch (st->state) {
	case TCP_SEQ_STATE_OPENREQ:
	case TCP_SEQ_STATE_LISTENING:
		if (st->bucket >= INET_LHTABLE_SIZE)
			break;
		st->state = TCP_SEQ_STATE_LISTENING;
		rc = listening_get_next(seq, NULL);
		while (offset-- && rc)
			rc = listening_get_next(seq, rc);
		if (rc)
			break;
		st->bucket = 0;
		/* Fallthrough */
	case TCP_SEQ_STATE_ESTABLISHED:
	case TCP_SEQ_STATE_TIME_WAIT:
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		if (st->bucket > tcp_hashinfo.ehash_mask)
			break;
		rc = established_get_first(seq);
		while (offset-- && rc)
			rc = established_get_next(seq, rc);
	}

	st->num = orig_num;

	return rc;
}

L
Linus Torvalds 已提交
2500 2501
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{
J
Jianjun Kong 已提交
2502
	struct tcp_iter_state *st = seq->private;
2503 2504 2505 2506 2507 2508 2509 2510
	void *rc;

	if (*pos && *pos == st->last_pos) {
		rc = tcp_seek_last_pos(seq);
		if (rc)
			goto out;
	}

L
Linus Torvalds 已提交
2511 2512
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
2513 2514 2515 2516 2517 2518 2519
	st->bucket = 0;
	st->offset = 0;
	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;

out:
	st->last_pos = *pos;
	return rc;
L
Linus Torvalds 已提交
2520 2521 2522 2523
}

static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
2524
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
	void *rc = NULL;

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}

	switch (st->state) {
	case TCP_SEQ_STATE_OPENREQ:
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
2538 2539
			st->bucket = 0;
			st->offset = 0;
L
Linus Torvalds 已提交
2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
	case TCP_SEQ_STATE_TIME_WAIT:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
2550
	st->last_pos = *pos;
L
Linus Torvalds 已提交
2551 2552 2553 2554 2555
	return rc;
}

static void tcp_seq_stop(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2556
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2557 2558 2559 2560

	switch (st->state) {
	case TCP_SEQ_STATE_OPENREQ:
		if (v) {
2561 2562
			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
2563 2564 2565
		}
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2566
			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2567 2568 2569 2570
		break;
	case TCP_SEQ_STATE_TIME_WAIT:
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2571
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2572 2573 2574 2575
		break;
	}
}

2576
int tcp_seq_open(struct inode *inode, struct file *file)
L
Linus Torvalds 已提交
2577
{
A
Al Viro 已提交
2578
	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
L
Linus Torvalds 已提交
2579
	struct tcp_iter_state *s;
2580
	int err;
L
Linus Torvalds 已提交
2581

2582 2583 2584 2585
	err = seq_open_net(inode, file, &afinfo->seq_ops,
			  sizeof(struct tcp_iter_state));
	if (err < 0)
		return err;
2586

2587
	s = ((struct seq_file *)file->private_data)->private;
L
Linus Torvalds 已提交
2588
	s->family		= afinfo->family;
2589
	s->last_pos 		= 0;
2590 2591
	return 0;
}
2592
EXPORT_SYMBOL(tcp_seq_open);
2593

2594
int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
L
Linus Torvalds 已提交
2595 2596 2597 2598
{
	int rc = 0;
	struct proc_dir_entry *p;

2599 2600 2601 2602
	afinfo->seq_ops.start		= tcp_seq_start;
	afinfo->seq_ops.next		= tcp_seq_next;
	afinfo->seq_ops.stop		= tcp_seq_stop;

2603
	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2604
			     afinfo->seq_fops, afinfo);
2605
	if (!p)
L
Linus Torvalds 已提交
2606 2607 2608
		rc = -ENOMEM;
	return rc;
}
E
Eric Dumazet 已提交
2609
EXPORT_SYMBOL(tcp_proc_register);
L
Linus Torvalds 已提交
2610

2611
void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
L
Linus Torvalds 已提交
2612
{
2613
	remove_proc_entry(afinfo->name, net->proc_net);
L
Linus Torvalds 已提交
2614
}
E
Eric Dumazet 已提交
2615
EXPORT_SYMBOL(tcp_proc_unregister);
L
Linus Torvalds 已提交
2616

2617
static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2618
			 struct seq_file *f, int i, kuid_t uid, int *len)
L
Linus Torvalds 已提交
2619
{
2620
	const struct inet_request_sock *ireq = inet_rsk(req);
2621
	long delta = req->expires - jiffies;
L
Linus Torvalds 已提交
2622

2623
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
D
Dan Rosenberg 已提交
2624
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
L
Linus Torvalds 已提交
2625
		i,
2626
		ireq->loc_addr,
E
Eric Dumazet 已提交
2627
		ntohs(inet_sk(sk)->inet_sport),
2628 2629
		ireq->rmt_addr,
		ntohs(ireq->rmt_port),
L
Linus Torvalds 已提交
2630 2631 2632
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
2633
		jiffies_delta_to_clock_t(delta),
2634
		req->num_timeout,
2635
		from_kuid_munged(seq_user_ns(f), uid),
L
Linus Torvalds 已提交
2636 2637 2638
		0,  /* non standard timer */
		0, /* open_requests have no inode */
		atomic_read(&sk->sk_refcnt),
2639 2640
		req,
		len);
L
Linus Torvalds 已提交
2641 2642
}

2643
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
L
Linus Torvalds 已提交
2644 2645 2646
{
	int timer_active;
	unsigned long timer_expires;
2647
	const struct tcp_sock *tp = tcp_sk(sk);
2648
	const struct inet_connection_sock *icsk = inet_csk(sk);
2649
	const struct inet_sock *inet = inet_sk(sk);
2650
	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
E
Eric Dumazet 已提交
2651 2652 2653 2654
	__be32 dest = inet->inet_daddr;
	__be32 src = inet->inet_rcv_saddr;
	__u16 destp = ntohs(inet->inet_dport);
	__u16 srcp = ntohs(inet->inet_sport);
2655
	int rx_queue;
L
Linus Torvalds 已提交
2656

N
Nandita Dukkipati 已提交
2657 2658 2659
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2660
		timer_active	= 1;
2661 2662
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2663
		timer_active	= 4;
2664
		timer_expires	= icsk->icsk_timeout;
2665
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2666
		timer_active	= 2;
2667
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2668 2669 2670 2671 2672
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2673 2674 2675 2676 2677 2678 2679 2680
	if (sk->sk_state == TCP_LISTEN)
		rx_queue = sk->sk_ack_backlog;
	else
		/*
		 * because we dont lock socket, we might find a transient negative value
		 */
		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);

2681
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
D
Dan Rosenberg 已提交
2682
			"%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2683
		i, src, srcp, dest, destp, sk->sk_state,
2684
		tp->write_seq - tp->snd_una,
2685
		rx_queue,
L
Linus Torvalds 已提交
2686
		timer_active,
2687
		jiffies_delta_to_clock_t(timer_expires - jiffies),
2688
		icsk->icsk_retransmits,
2689
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2690
		icsk->icsk_probes_out,
2691 2692
		sock_i_ino(sk),
		atomic_read(&sk->sk_refcnt), sk,
2693 2694
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
2695
		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
L
Linus Torvalds 已提交
2696
		tp->snd_cwnd,
2697 2698 2699
		sk->sk_state == TCP_LISTEN ?
		    (fastopenq ? fastopenq->max_qlen : 0) :
		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2700
		len);
L
Linus Torvalds 已提交
2701 2702
}

2703
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2704
			       struct seq_file *f, int i, int *len)
L
Linus Torvalds 已提交
2705
{
2706
	__be32 dest, src;
L
Linus Torvalds 已提交
2707
	__u16 destp, srcp;
2708
	long delta = tw->tw_ttd - jiffies;
L
Linus Torvalds 已提交
2709 2710 2711 2712 2713 2714

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2715
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
D
Dan Rosenberg 已提交
2716
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
L
Linus Torvalds 已提交
2717
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2718
		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2719
		atomic_read(&tw->tw_refcnt), tw, len);
L
Linus Torvalds 已提交
2720 2721 2722 2723 2724 2725
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2726
	struct tcp_iter_state *st;
2727
	int len;
L
Linus Torvalds 已提交
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740

	if (v == SEQ_START_TOKEN) {
		seq_printf(seq, "%-*s\n", TMPSZ - 1,
			   "  sl  local_address rem_address   st tx_queue "
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
	case TCP_SEQ_STATE_ESTABLISHED:
2741
		get_tcp4_sock(v, seq, st->num, &len);
L
Linus Torvalds 已提交
2742 2743
		break;
	case TCP_SEQ_STATE_OPENREQ:
2744
		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
L
Linus Torvalds 已提交
2745 2746
		break;
	case TCP_SEQ_STATE_TIME_WAIT:
2747
		get_timewait4_sock(v, seq, st->num, &len);
L
Linus Torvalds 已提交
2748 2749
		break;
	}
2750
	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
L
Linus Torvalds 已提交
2751 2752 2753 2754
out:
	return 0;
}

2755 2756 2757 2758 2759 2760 2761 2762
static const struct file_operations tcp_afinfo_seq_fops = {
	.owner   = THIS_MODULE,
	.open    = tcp_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release_net
};

L
Linus Torvalds 已提交
2763 2764 2765
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.name		= "tcp",
	.family		= AF_INET,
2766
	.seq_fops	= &tcp_afinfo_seq_fops,
2767 2768 2769
	.seq_ops	= {
		.show		= tcp4_seq_show,
	},
L
Linus Torvalds 已提交
2770 2771
};

2772
static int __net_init tcp4_proc_init_net(struct net *net)
2773 2774 2775 2776
{
	return tcp_proc_register(net, &tcp4_seq_afinfo);
}

2777
static void __net_exit tcp4_proc_exit_net(struct net *net)
2778 2779 2780 2781 2782 2783 2784 2785 2786
{
	tcp_proc_unregister(net, &tcp4_seq_afinfo);
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2787 2788
int __init tcp4_proc_init(void)
{
2789
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2790 2791 2792 2793
}

void tcp4_proc_exit(void)
{
2794
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2795 2796 2797
}
#endif /* CONFIG_PROC_FS */

H
Herbert Xu 已提交
2798 2799
struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
2800
	const struct iphdr *iph = skb_gro_network_header(skb);
E
Eric Dumazet 已提交
2801 2802
	__wsum wsum;
	__sum16 sum;
H
Herbert Xu 已提交
2803 2804 2805

	switch (skb->ip_summed) {
	case CHECKSUM_COMPLETE:
2806
		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
H
Herbert Xu 已提交
2807 2808 2809 2810
				  skb->csum)) {
			skb->ip_summed = CHECKSUM_UNNECESSARY;
			break;
		}
E
Eric Dumazet 已提交
2811
flush:
H
Herbert Xu 已提交
2812 2813
		NAPI_GRO_CB(skb)->flush = 1;
		return NULL;
E
Eric Dumazet 已提交
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826

	case CHECKSUM_NONE:
		wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
					  skb_gro_len(skb), IPPROTO_TCP, 0);
		sum = csum_fold(skb_checksum(skb,
					     skb_gro_offset(skb),
					     skb_gro_len(skb),
					     wsum));
		if (sum)
			goto flush;

		skb->ip_summed = CHECKSUM_UNNECESSARY;
		break;
H
Herbert Xu 已提交
2827 2828 2829 2830 2831 2832 2833
	}

	return tcp_gro_receive(head, skb);
}

int tcp4_gro_complete(struct sk_buff *skb)
{
2834
	const struct iphdr *iph = ip_hdr(skb);
H
Herbert Xu 已提交
2835 2836 2837 2838 2839 2840 2841 2842 2843
	struct tcphdr *th = tcp_hdr(skb);

	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
				  iph->saddr, iph->daddr, 0);
	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;

	return tcp_gro_complete(skb);
}

L
Linus Torvalds 已提交
2844 2845 2846 2847 2848 2849
struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2850
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2851 2852 2853 2854 2855 2856 2857
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
	.recvmsg		= tcp_recvmsg,
2858 2859
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2860
	.backlog_rcv		= tcp_v4_do_rcv,
E
Eric Dumazet 已提交
2861
	.release_cb		= tcp_release_cb,
2862
	.mtu_reduced		= tcp_v4_mtu_reduced,
2863 2864 2865
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2866 2867
	.enter_memory_pressure	= tcp_enter_memory_pressure,
	.sockets_allocated	= &tcp_sockets_allocated,
2868
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2869 2870 2871 2872 2873 2874
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
	.sysctl_wmem		= sysctl_tcp_wmem,
	.sysctl_rmem		= sysctl_tcp_rmem,
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2875
	.slab_flags		= SLAB_DESTROY_BY_RCU,
2876
	.twsk_prot		= &tcp_timewait_sock_ops,
2877
	.rsk_prot		= &tcp_request_sock_ops,
2878
	.h.hashinfo		= &tcp_hashinfo,
2879
	.no_autobind		= true,
2880 2881 2882 2883
#ifdef CONFIG_COMPAT
	.compat_setsockopt	= compat_tcp_setsockopt,
	.compat_getsockopt	= compat_tcp_getsockopt,
#endif
A
Andrew Morton 已提交
2884
#ifdef CONFIG_MEMCG_KMEM
G
Glauber Costa 已提交
2885 2886 2887 2888
	.init_cgroup		= tcp_init_cgroup,
	.destroy_cgroup		= tcp_destroy_cgroup,
	.proto_cgroup		= tcp_proto_cgroup,
#endif
L
Linus Torvalds 已提交
2889
};
E
Eric Dumazet 已提交
2890
EXPORT_SYMBOL(tcp_prot);
L
Linus Torvalds 已提交
2891

2892 2893
static int __net_init tcp_sk_init(struct net *net)
{
2894
	net->ipv4.sysctl_tcp_ecn = 2;
2895
	return 0;
2896 2897 2898 2899
}

static void __net_exit tcp_sk_exit(struct net *net)
{
E
Eric W. Biederman 已提交
2900 2901 2902 2903 2904
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2905 2906 2907
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
E
Eric W. Biederman 已提交
2908 2909 2910
       .init	   = tcp_sk_init,
       .exit	   = tcp_sk_exit,
       .exit_batch = tcp_sk_exit_batch,
2911 2912
};

2913
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2914
{
2915
	inet_hashinfo_init(&tcp_hashinfo);
2916
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2917 2918
		panic("Failed to create the TCP control socket.\n");
}