tcp_ipv4.c 62.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
 *		IPv4 specific functions
 *
 *
 *		code split from:
 *		linux/ipv4/tcp.c
 *		linux/ipv4/tcp_input.c
 *		linux/ipv4/tcp_output.c
 *
 *		See tcp.c for author information
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*
 * Changes:
 *		David S. Miller	:	New socket lookup architecture.
 *					This code is dedicated to John Dyson.
 *		David S. Miller :	Change semantics of established hash,
 *					half is devoted to TIME_WAIT sockets
 *					and the rest go in the other half.
 *		Andi Kleen :		Add support for syncookies and fixed
 *					some bugs: ip options weren't passed to
 *					the TCP layer, missed a check for an
 *					ACK bit.
 *		Andi Kleen :		Implemented fast path mtu discovery.
 *	     				Fixed many serious bugs in the
37
 *					request_sock handling and moved
L
Linus Torvalds 已提交
38 39
 *					most of it into the af independent code.
 *					Added tail drop and some other bugfixes.
S
Stephen Hemminger 已提交
40
 *					Added new listen semantics.
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53
 *		Mike McLagan	:	Routing by source
 *	Juan Jose Ciarlante:		ip_dynaddr bits
 *		Andi Kleen:		various fixes.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year
 *					coma.
 *	Andi Kleen		:	Fix new listen.
 *	Andi Kleen		:	Fix accept error reporting.
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 */


H
Herbert Xu 已提交
54
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>

64
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
65
#include <net/icmp.h>
66
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
67
#include <net/tcp.h>
68
#include <net/transp_v6.h>
L
Linus Torvalds 已提交
69 70
#include <net/ipv6.h>
#include <net/inet_common.h>
71
#include <net/timewait_sock.h>
L
Linus Torvalds 已提交
72
#include <net/xfrm.h>
C
Chris Leech 已提交
73
#include <net/netdma.h>
L
Linus Torvalds 已提交
74 75 76 77 78 79 80

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>

81 82 83
#include <linux/crypto.h>
#include <linux/scatterlist.h>

84 85
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
L
Linus Torvalds 已提交
86 87


88
#ifdef CONFIG_TCP_MD5SIG
89 90
static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
						   __be32 addr);
91 92
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
			       __be32 daddr, __be32 saddr, struct tcphdr *th);
93 94 95 96 97 98
#else
static inline
struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
{
	return NULL;
}
99 100
#endif

101
struct inet_hashinfo tcp_hashinfo;
L
Linus Torvalds 已提交
102

103
static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
L
Linus Torvalds 已提交
104
{
105 106
	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
					  ip_hdr(skb)->saddr,
107 108
					  tcp_hdr(skb)->dest,
					  tcp_hdr(skb)->source);
L
Linus Torvalds 已提交
109 110
}

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
	struct tcp_sock *tp = tcp_sk(sk);

	/* With PAWS, it is safe from the viewpoint
	   of data integrity. Even without PAWS it is safe provided sequence
	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.

	   Actually, the idea is close to VJ's one, only timestamp cache is
	   held not per host, but per port pair and TW bucket is used as state
	   holder.

	   If TW bucket has been already destroyed we fall back to VJ's scheme
	   and use initial timestamp retrieved from peer table.
	 */
	if (tcptw->tw_ts_recent_stamp &&
	    (twp == NULL || (sysctl_tcp_tw_reuse &&
129
			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 131 132 133 134 135 136 137 138 139 140 141 142 143
		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
		if (tp->write_seq == 0)
			tp->write_seq = 1;
		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
		sock_hold(sktw);
		return 1;
	}

	return 0;
}

EXPORT_SYMBOL_GPL(tcp_twsk_unique);

L
Linus Torvalds 已提交
144 145 146 147 148 149 150
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
	struct rtable *rt;
151
	__be32 daddr, nexthop;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	int tmp;
	int err;

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;
	if (inet->opt && inet->opt->srr) {
		if (!daddr)
			return -EINVAL;
		nexthop = inet->opt->faddr;
	}

	tmp = ip_route_connect(&rt, nexthop, inet->saddr,
			       RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			       IPPROTO_TCP,
171
			       inet->sport, usin->sin_port, sk, 1);
172 173
	if (tmp < 0) {
		if (tmp == -ENETUNREACH)
174
			IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
L
Linus Torvalds 已提交
175
		return tmp;
176
	}
L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

	if (!inet->opt || !inet->opt->srr)
		daddr = rt->rt_dst;

	if (!inet->saddr)
		inet->saddr = rt->rt_src;
	inet->rcv_saddr = inet->saddr;

	if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
		/* Reset inherited state */
		tp->rx_opt.ts_recent	   = 0;
		tp->rx_opt.ts_recent_stamp = 0;
		tp->write_seq		   = 0;
	}

197
	if (tcp_death_row.sysctl_tw_recycle &&
L
Linus Torvalds 已提交
198 199
	    !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
		struct inet_peer *peer = rt_get_peer(rt);
200 201 202 203 204
		/*
		 * VJ's idea. We save last timestamp seen from
		 * the destination in peer table, when entering state
		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
		 * when trying new connection.
L
Linus Torvalds 已提交
205
		 */
206
		if (peer != NULL &&
207
		    peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
L
Linus Torvalds 已提交
208 209 210 211 212 213 214 215
			tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
			tp->rx_opt.ts_recent = peer->tcp_ts;
		}
	}

	inet->dport = usin->sin_port;
	inet->daddr = daddr;

216
	inet_csk(sk)->icsk_ext_hdr_len = 0;
L
Linus Torvalds 已提交
217
	if (inet->opt)
218
		inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226 227

	tp->rx_opt.mss_clamp = 536;

	/* Socket identity is still unknown (sport may be zero).
	 * However we set state to SYN-SENT and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	tcp_set_state(sk, TCP_SYN_SENT);
228
	err = inet_hash_connect(&tcp_death_row, sk);
L
Linus Torvalds 已提交
229 230 231
	if (err)
		goto failure;

232 233
	err = ip_route_newports(&rt, IPPROTO_TCP,
				inet->sport, inet->dport, sk);
L
Linus Torvalds 已提交
234 235 236 237
	if (err)
		goto failure;

	/* OK, now commit destination to socket.  */
238
	sk->sk_gso_type = SKB_GSO_TCPV4;
239
	sk_setup_caps(sk, &rt->u.dst);
L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

	if (!tp->write_seq)
		tp->write_seq = secure_tcp_sequence_number(inet->saddr,
							   inet->daddr,
							   inet->sport,
							   usin->sin_port);

	inet->id = tp->write_seq ^ jiffies;

	err = tcp_connect(sk);
	rt = NULL;
	if (err)
		goto failure;

	return 0;

failure:
257 258 259 260
	/*
	 * This unhashes the socket and releases the local port,
	 * if necessary.
	 */
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269 270
	tcp_set_state(sk, TCP_CLOSE);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
	inet->dport = 0;
	return err;
}

/*
 * This routine does path mtu discovery as defined in RFC1191.
 */
S
Stephen Hemminger 已提交
271
static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
L
Linus Torvalds 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285
{
	struct dst_entry *dst;
	struct inet_sock *inet = inet_sk(sk);

	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
	 * send out by Linux are always <576bytes so they should go through
	 * unfragmented).
	 */
	if (sk->sk_state == TCP_LISTEN)
		return;

	/* We don't check in the destentry if pmtu discovery is forbidden
	 * on this route. We just assume that no packet_to_big packets
	 * are send back when pmtu discovery is not active.
286
	 * There is a small race when the user changes this flag in the
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	 * route, but I think that's acceptable.
	 */
	if ((dst = __sk_dst_check(sk, 0)) == NULL)
		return;

	dst->ops->update_pmtu(dst, mtu);

	/* Something is about to be wrong... Remember soft error
	 * for the case, if this connection will not able to recover.
	 */
	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
		sk->sk_err_soft = EMSGSIZE;

	mtu = dst_mtu(dst);

	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
303
	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
		tcp_sync_mss(sk, mtu);

		/* Resend the TCP packet because it's
		 * clear that the old packet has been
		 * dropped. This is the new "fast" path mtu
		 * discovery.
		 */
		tcp_simple_retransmit(sk);
	} /* else let the usual retransmit timer handle it */
}

/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.  After adjustment
 * header points to the first 8 bytes of the tcp header.  We need
 * to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When
 * someone else accesses the socket the ICMP is just dropped
 * and for some paths there is no check at all.
 * A more general error queue to queue errors for later handling
 * is probably better.
 *
 */

331
void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
L
Linus Torvalds 已提交
332
{
333 334
	struct iphdr *iph = (struct iphdr *)icmp_skb->data;
	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
335
	struct inet_connection_sock *icsk;
L
Linus Torvalds 已提交
336 337
	struct tcp_sock *tp;
	struct inet_sock *inet;
338 339
	const int type = icmp_hdr(icmp_skb)->type;
	const int code = icmp_hdr(icmp_skb)->code;
L
Linus Torvalds 已提交
340
	struct sock *sk;
341
	struct sk_buff *skb;
L
Linus Torvalds 已提交
342
	__u32 seq;
343
	__u32 remaining;
L
Linus Torvalds 已提交
344
	int err;
345
	struct net *net = dev_net(icmp_skb->dev);
L
Linus Torvalds 已提交
346

347
	if (icmp_skb->len < (iph->ihl << 2) + 8) {
348
		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
L
Linus Torvalds 已提交
349 350 351
		return;
	}

352
	sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
353
			iph->saddr, th->source, inet_iif(icmp_skb));
L
Linus Torvalds 已提交
354
	if (!sk) {
355
		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
L
Linus Torvalds 已提交
356 357 358
		return;
	}
	if (sk->sk_state == TCP_TIME_WAIT) {
359
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
360 361 362 363 364 365 366 367
		return;
	}

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
	 */
	if (sock_owned_by_user(sk))
368
		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
L
Linus Torvalds 已提交
369 370 371 372

	if (sk->sk_state == TCP_CLOSE)
		goto out;

373
	icsk = inet_csk(sk);
L
Linus Torvalds 已提交
374 375 376 377
	tp = tcp_sk(sk);
	seq = ntohl(th->seq);
	if (sk->sk_state != TCP_LISTEN &&
	    !between(seq, tp->snd_una, tp->snd_nxt)) {
378
		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
		goto out;
	}

	switch (type) {
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
			if (!sock_owned_by_user(sk))
				do_pmtu_discovery(sk, iph, info);
			goto out;
		}

		err = icmp_err_convert[code].errno;
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
		/* check if icmp_skb allows revert of backoff
		 * (see draft-zimmermann-tcp-lcd) */
		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
			break;
		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
		    !icsk->icsk_backoff)
			break;

		icsk->icsk_backoff--;
		inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
					 icsk->icsk_backoff;
		tcp_bound_rto(sk);

		skb = tcp_write_queue_head(sk);
		BUG_ON(!skb);

		remaining = icsk->icsk_rto - min(icsk->icsk_rto,
				tcp_time_stamp - TCP_SKB_CB(skb)->when);

		if (remaining) {
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
						  remaining, TCP_RTO_MAX);
		} else if (sock_owned_by_user(sk)) {
			/* RTO revert clocked out retransmission,
			 * but socket is locked. Will defer. */
			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
						  HZ/20, TCP_RTO_MAX);
		} else {
			/* RTO revert clocked out retransmission.
			 * Will retransmit now */
			tcp_retransmit_timer(sk);
		}

L
Linus Torvalds 已提交
433 434 435 436 437 438 439 440 441
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
442
		struct request_sock *req, **prev;
L
Linus Torvalds 已提交
443 444 445 446
	case TCP_LISTEN:
		if (sock_owned_by_user(sk))
			goto out;

447 448
		req = inet_csk_search_req(sk, &prev, th->dest,
					  iph->daddr, iph->saddr);
L
Linus Torvalds 已提交
449 450 451 452 453 454
		if (!req)
			goto out;

		/* ICMPs are not backlogged, hence we cannot get
		   an established socket here.
		 */
455
		WARN_ON(req->sk);
L
Linus Torvalds 已提交
456

457
		if (seq != tcp_rsk(req)->snt_isn) {
458
			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
459 460 461 462 463 464 465 466 467
			goto out;
		}

		/*
		 * Still in SYN_RECV, just remove it silently.
		 * There is no good way to pass the error to the newly
		 * created socket, and POSIX does not want network
		 * errors returned from accept().
		 */
468
		inet_csk_reqsk_queue_drop(sk, req, prev);
L
Linus Torvalds 已提交
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
		goto out;

	case TCP_SYN_SENT:
	case TCP_SYN_RECV:  /* Cannot happen.
			       It can f.e. if SYNs crossed.
			     */
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;

			sk->sk_error_report(sk);

			tcp_done(sk);
		} else {
			sk->sk_err_soft = err;
		}
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else	{ /* Only an error on timeout */
		sk->sk_err_soft = err;
	}

out:
	bh_unlock_sock(sk);
	sock_put(sk);
}

/* This routine computes an IPv4 TCP checksum. */
517
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
L
Linus Torvalds 已提交
518 519
{
	struct inet_sock *inet = inet_sk(sk);
520
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
521

522
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
523 524
		th->check = ~tcp_v4_check(len, inet->saddr,
					  inet->daddr, 0);
525
		skb->csum_start = skb_transport_header(skb) - skb->head;
A
Al Viro 已提交
526
		skb->csum_offset = offsetof(struct tcphdr, check);
L
Linus Torvalds 已提交
527
	} else {
528
		th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
529
					 csum_partial(th,
L
Linus Torvalds 已提交
530 531 532 533 534
						      th->doff << 2,
						      skb->csum));
	}
}

535 536
int tcp_v4_gso_send_check(struct sk_buff *skb)
{
537
	const struct iphdr *iph;
538 539 540 541 542
	struct tcphdr *th;

	if (!pskb_may_pull(skb, sizeof(*th)))
		return -EINVAL;

543
	iph = ip_hdr(skb);
544
	th = tcp_hdr(skb);
545 546

	th->check = 0;
547
	th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
548
	skb->csum_start = skb_transport_header(skb) - skb->head;
A
Al Viro 已提交
549
	skb->csum_offset = offsetof(struct tcphdr, check);
550
	skb->ip_summed = CHECKSUM_PARTIAL;
551 552 553
	return 0;
}

L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561 562 563 564 565 566
/*
 *	This routine will send an RST to the other tcp.
 *
 *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
 *		      for reset.
 *	Answer: if a packet caused RST, it is not for a socket
 *		existing in our system, if it is matched to a socket,
 *		it is just duplicate segment or bug in other side's TCP.
 *		So that we build reply only basing on parameters
 *		arrived with segment.
 *	Exception: precedence violation. We do not implement it in any case.
 */

567
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
568
{
569
	struct tcphdr *th = tcp_hdr(skb);
570 571 572
	struct {
		struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
573
		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
574 575
#endif
	} rep;
L
Linus Torvalds 已提交
576
	struct ip_reply_arg arg;
577 578 579
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key *key;
#endif
580
	struct net *net;
L
Linus Torvalds 已提交
581 582 583 584 585

	/* Never send a reset in response to a reset. */
	if (th->rst)
		return;

E
Eric Dumazet 已提交
586
	if (skb_rtable(skb)->rt_type != RTN_LOCAL)
L
Linus Torvalds 已提交
587 588 589
		return;

	/* Swap the send and the receive. */
590 591 592 593 594
	memset(&rep, 0, sizeof(rep));
	rep.th.dest   = th->source;
	rep.th.source = th->dest;
	rep.th.doff   = sizeof(struct tcphdr) / 4;
	rep.th.rst    = 1;
L
Linus Torvalds 已提交
595 596

	if (th->ack) {
597
		rep.th.seq = th->ack_seq;
L
Linus Torvalds 已提交
598
	} else {
599 600 601
		rep.th.ack = 1;
		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
				       skb->len - (th->doff << 2));
L
Linus Torvalds 已提交
602 603
	}

604
	memset(&arg, 0, sizeof(arg));
605 606 607 608
	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);

#ifdef CONFIG_TCP_MD5SIG
609
	key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
610 611 612 613 614 615 616 617 618
	if (key) {
		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
				   (TCPOPT_NOP << 16) |
				   (TCPOPT_MD5SIG << 8) |
				   TCPOLEN_MD5SIG);
		/* Update length and the length the header thinks exists */
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len / 4;

619
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
620 621
				     key, ip_hdr(skb)->saddr,
				     ip_hdr(skb)->daddr, &rep.th);
622 623
	}
#endif
624 625
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
626
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
L
Linus Torvalds 已提交
627
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
628
	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
L
Linus Torvalds 已提交
629

E
Eric Dumazet 已提交
630
	net = dev_net(skb_dst(skb)->dev);
631
	ip_send_reply(net->ipv4.tcp_sock, skb,
632
		      &arg, arg.iov[0].iov_len);
L
Linus Torvalds 已提交
633

634 635
	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
L
Linus Torvalds 已提交
636 637 638 639 640 641
}

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
   outside socket context is ugly, certainly. What can I do?
 */

642 643
static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
			    u32 win, u32 ts, int oif,
644 645
			    struct tcp_md5sig_key *key,
			    int reply_flags)
L
Linus Torvalds 已提交
646
{
647
	struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
648 649
	struct {
		struct tcphdr th;
650
		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
651
#ifdef CONFIG_TCP_MD5SIG
652
			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
653 654
#endif
			];
L
Linus Torvalds 已提交
655 656
	} rep;
	struct ip_reply_arg arg;
E
Eric Dumazet 已提交
657
	struct net *net = dev_net(skb_dst(skb)->dev);
L
Linus Torvalds 已提交
658 659

	memset(&rep.th, 0, sizeof(struct tcphdr));
660
	memset(&arg, 0, sizeof(arg));
L
Linus Torvalds 已提交
661 662 663 664

	arg.iov[0].iov_base = (unsigned char *)&rep;
	arg.iov[0].iov_len  = sizeof(rep.th);
	if (ts) {
665 666 667 668 669
		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				   (TCPOPT_TIMESTAMP << 8) |
				   TCPOLEN_TIMESTAMP);
		rep.opt[1] = htonl(tcp_time_stamp);
		rep.opt[2] = htonl(ts);
670
		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
L
Linus Torvalds 已提交
671 672 673 674 675 676 677 678 679 680 681
	}

	/* Swap the send and the receive. */
	rep.th.dest    = th->source;
	rep.th.source  = th->dest;
	rep.th.doff    = arg.iov[0].iov_len / 4;
	rep.th.seq     = htonl(seq);
	rep.th.ack_seq = htonl(ack);
	rep.th.ack     = 1;
	rep.th.window  = htons(win);

682 683 684 685 686 687 688 689 690 691 692
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
		int offset = (ts) ? 3 : 0;

		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
					  (TCPOPT_NOP << 16) |
					  (TCPOPT_MD5SIG << 8) |
					  TCPOLEN_MD5SIG);
		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
		rep.th.doff = arg.iov[0].iov_len/4;

693
		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
694 695
				    key, ip_hdr(skb)->saddr,
				    ip_hdr(skb)->daddr, &rep.th);
696 697
	}
#endif
698
	arg.flags = reply_flags;
699 700
	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
				      ip_hdr(skb)->saddr, /* XXX */
L
Linus Torvalds 已提交
701 702
				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
703 704
	if (oif)
		arg.bound_dev_if = oif;
L
Linus Torvalds 已提交
705

706
	ip_send_reply(net->ipv4.tcp_sock, skb,
707
		      &arg, arg.iov[0].iov_len);
L
Linus Torvalds 已提交
708

709
	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
L
Linus Torvalds 已提交
710 711 712 713
}

static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
714
	struct inet_timewait_sock *tw = inet_twsk(sk);
715
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
716

717
	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
718
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
719 720
			tcptw->tw_ts_recent,
			tw->tw_bound_dev_if,
721 722
			tcp_twsk_md5_key(tcptw),
			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
723
			);
L
Linus Torvalds 已提交
724

725
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
726 727
}

728
static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
729
				  struct request_sock *req)
L
Linus Torvalds 已提交
730
{
731
	tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
732
			tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
733 734
			req->ts_recent,
			0,
735 736
			tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
L
Linus Torvalds 已提交
737 738 739
}

/*
740
 *	Send a SYN-ACK after having received a SYN.
741
 *	This still operates on a request_sock only, not on a big
L
Linus Torvalds 已提交
742 743
 *	socket.
 */
744 745
static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
				struct dst_entry *dst)
L
Linus Torvalds 已提交
746
{
747
	const struct inet_request_sock *ireq = inet_rsk(req);
L
Linus Torvalds 已提交
748 749 750 751
	int err = -1;
	struct sk_buff * skb;

	/* First, grab a route. */
752
	if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
753
		return -1;
L
Linus Torvalds 已提交
754 755 756 757

	skb = tcp_make_synack(sk, dst, req);

	if (skb) {
758
		struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
759

760
		th->check = tcp_v4_check(skb->len,
761 762
					 ireq->loc_addr,
					 ireq->rmt_addr,
763
					 csum_partial(th, skb->len,
L
Linus Torvalds 已提交
764 765
						      skb->csum));

766 767 768
		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
					    ireq->rmt_addr,
					    ireq->opt);
769
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
770 771 772 773 774 775
	}

	dst_release(dst);
	return err;
}

776 777 778 779 780
static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
{
	return __tcp_v4_send_synack(sk, req, NULL);
}

L
Linus Torvalds 已提交
781
/*
782
 *	IPv4 request_sock destructor.
L
Linus Torvalds 已提交
783
 */
784
static void tcp_v4_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
785
{
J
Jesper Juhl 已提交
786
	kfree(inet_rsk(req)->opt);
L
Linus Torvalds 已提交
787 788
}

789
#ifdef CONFIG_SYN_COOKIES
S
Stephen Hemminger 已提交
790
static void syn_flood_warning(struct sk_buff *skb)
L
Linus Torvalds 已提交
791 792 793 794 795 796 797
{
	static unsigned long warntime;

	if (time_after(jiffies, (warntime + HZ * 60))) {
		warntime = jiffies;
		printk(KERN_INFO
		       "possible SYN flooding on port %d. Sending cookies.\n",
798
		       ntohs(tcp_hdr(skb)->dest));
L
Linus Torvalds 已提交
799 800
	}
}
801
#endif
L
Linus Torvalds 已提交
802 803

/*
804
 * Save and compile IPv4 options into the request_sock if needed.
L
Linus Torvalds 已提交
805
 */
S
Stephen Hemminger 已提交
806 807
static struct ip_options *tcp_v4_save_options(struct sock *sk,
					      struct sk_buff *skb)
L
Linus Torvalds 已提交
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
{
	struct ip_options *opt = &(IPCB(skb)->opt);
	struct ip_options *dopt = NULL;

	if (opt && opt->optlen) {
		int opt_size = optlength(opt);
		dopt = kmalloc(opt_size, GFP_ATOMIC);
		if (dopt) {
			if (ip_options_echo(dopt, skb)) {
				kfree(dopt);
				dopt = NULL;
			}
		}
	}
	return dopt;
}

825 826 827 828 829 830 831 832
#ifdef CONFIG_TCP_MD5SIG
/*
 * RFC2385 MD5 checksumming requires a mapping of
 * IP address->MD5 Key.
 * We need to maintain these in the sk structure.
 */

/* Find the Key structure for an address.  */
833 834
static struct tcp_md5sig_key *
			tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
835 836 837 838 839 840 841 842
{
	struct tcp_sock *tp = tcp_sk(sk);
	int i;

	if (!tp->md5sig_info || !tp->md5sig_info->entries4)
		return NULL;
	for (i = 0; i < tp->md5sig_info->entries4; i++) {
		if (tp->md5sig_info->keys4[i].addr == addr)
843
			return &tp->md5sig_info->keys4[i].base;
844 845 846 847 848 849 850 851 852 853 854 855
	}
	return NULL;
}

struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
					 struct sock *addr_sk)
{
	return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
}

EXPORT_SYMBOL(tcp_v4_md5_lookup);

A
Adrian Bunk 已提交
856 857
static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
						      struct request_sock *req)
858 859 860 861 862 863 864 865 866
{
	return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
}

/* This can be called on a newly created socket, from other files */
int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
		      u8 *newkey, u8 newkeylen)
{
	/* Add Key to the list */
867
	struct tcp_md5sig_key *key;
868 869 870
	struct tcp_sock *tp = tcp_sk(sk);
	struct tcp4_md5sig_key *keys;

871
	key = tcp_v4_md5_do_lookup(sk, addr);
872 873
	if (key) {
		/* Pre-existing entry - just update that one. */
874 875 876
		kfree(key->key);
		key->key = newkey;
		key->keylen = newkeylen;
877
	} else {
878 879
		struct tcp_md5sig_info *md5sig;

880
		if (!tp->md5sig_info) {
881 882
			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
						  GFP_ATOMIC);
883 884 885 886
			if (!tp->md5sig_info) {
				kfree(newkey);
				return -ENOMEM;
			}
887
			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
888
		}
889
		if (tcp_alloc_md5sig_pool(sk) == NULL) {
890 891 892
			kfree(newkey);
			return -ENOMEM;
		}
893 894 895 896
		md5sig = tp->md5sig_info;

		if (md5sig->alloced4 == md5sig->entries4) {
			keys = kmalloc((sizeof(*keys) *
897
					(md5sig->entries4 + 1)), GFP_ATOMIC);
898 899 900 901 902 903
			if (!keys) {
				kfree(newkey);
				tcp_free_md5sig_pool();
				return -ENOMEM;
			}

904 905 906
			if (md5sig->entries4)
				memcpy(keys, md5sig->keys4,
				       sizeof(*keys) * md5sig->entries4);
907 908

			/* Free old key list, and reference new one */
909
			kfree(md5sig->keys4);
910 911
			md5sig->keys4 = keys;
			md5sig->alloced4++;
912
		}
913
		md5sig->entries4++;
914 915 916
		md5sig->keys4[md5sig->entries4 - 1].addr        = addr;
		md5sig->keys4[md5sig->entries4 - 1].base.key    = newkey;
		md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
	}
	return 0;
}

EXPORT_SYMBOL(tcp_v4_md5_do_add);

static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
			       u8 *newkey, u8 newkeylen)
{
	return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
				 newkey, newkeylen);
}

int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
{
	struct tcp_sock *tp = tcp_sk(sk);
	int i;

	for (i = 0; i < tp->md5sig_info->entries4; i++) {
		if (tp->md5sig_info->keys4[i].addr == addr) {
			/* Free the key */
938
			kfree(tp->md5sig_info->keys4[i].base.key);
939 940 941 942 943
			tp->md5sig_info->entries4--;

			if (tp->md5sig_info->entries4 == 0) {
				kfree(tp->md5sig_info->keys4);
				tp->md5sig_info->keys4 = NULL;
944
				tp->md5sig_info->alloced4 = 0;
945
			} else if (tp->md5sig_info->entries4 != i) {
946
				/* Need to do some manipulation */
947 948 949 950
				memmove(&tp->md5sig_info->keys4[i],
					&tp->md5sig_info->keys4[i+1],
					(tp->md5sig_info->entries4 - i) *
					 sizeof(struct tcp4_md5sig_key));
951 952 953 954 955 956 957 958 959 960
			}
			tcp_free_md5sig_pool();
			return 0;
		}
	}
	return -ENOENT;
}

EXPORT_SYMBOL(tcp_v4_md5_do_del);

961
static void tcp_v4_clear_md5_list(struct sock *sk)
962 963 964 965 966 967 968 969 970 971
{
	struct tcp_sock *tp = tcp_sk(sk);

	/* Free each key, then the set of key keys,
	 * the crypto element, and then decrement our
	 * hold on the last resort crypto.
	 */
	if (tp->md5sig_info->entries4) {
		int i;
		for (i = 0; i < tp->md5sig_info->entries4; i++)
972
			kfree(tp->md5sig_info->keys4[i].base.key);
973 974 975 976 977 978 979 980 981 982
		tp->md5sig_info->entries4 = 0;
		tcp_free_md5sig_pool();
	}
	if (tp->md5sig_info->keys4) {
		kfree(tp->md5sig_info->keys4);
		tp->md5sig_info->keys4 = NULL;
		tp->md5sig_info->alloced4  = 0;
	}
}

983 984
static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
				 int optlen)
985 986 987 988 989 990 991 992
{
	struct tcp_md5sig cmd;
	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
	u8 *newkey;

	if (optlen < sizeof(cmd))
		return -EINVAL;

993
	if (copy_from_user(&cmd, optval, sizeof(cmd)))
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		return -EFAULT;

	if (sin->sin_family != AF_INET)
		return -EINVAL;

	if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
		if (!tcp_sk(sk)->md5sig_info)
			return -ENOENT;
		return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
	}

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

	if (!tcp_sk(sk)->md5sig_info) {
		struct tcp_sock *tp = tcp_sk(sk);
1010
		struct tcp_md5sig_info *p;
1011

1012
		p = kzalloc(sizeof(*p), sk->sk_allocation);
1013 1014 1015 1016
		if (!p)
			return -EINVAL;

		tp->md5sig_info = p;
1017
		sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1018 1019
	}

1020
	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
1021 1022 1023 1024 1025 1026
	if (!newkey)
		return -ENOMEM;
	return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
				 newkey, cmd.tcpm_keylen);
}

1027 1028
static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
					__be32 daddr, __be32 saddr, int nbytes)
1029 1030
{
	struct tcp4_pseudohdr *bp;
1031
	struct scatterlist sg;
1032 1033 1034 1035

	bp = &hp->md5_blk.ip4;

	/*
1036
	 * 1. the TCP pseudo-header (in the order: source IP address,
1037 1038 1039 1040 1041 1042
	 * destination IP address, zero-padded protocol number, and
	 * segment length)
	 */
	bp->saddr = saddr;
	bp->daddr = daddr;
	bp->pad = 0;
1043
	bp->protocol = IPPROTO_TCP;
1044
	bp->len = cpu_to_be16(nbytes);
1045

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	sg_init_one(&sg, bp, sizeof(*bp));
	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
}

static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
			       __be32 daddr, __be32 saddr, struct tcphdr *th)
{
	struct tcp_md5sig_pool *hp;
	struct hash_desc *desc;

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
	desc = &hp->md5_desc;

	if (crypto_hash_init(desc))
		goto clear_hash;
	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_header(hp, th))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
	if (crypto_hash_final(desc, md5_hash))
1070 1071 1072 1073
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
1074

1075 1076 1077 1078
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
1079
	return 1;
1080 1081
}

1082 1083 1084
int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
			struct sock *sk, struct request_sock *req,
			struct sk_buff *skb)
1085
{
1086 1087 1088
	struct tcp_md5sig_pool *hp;
	struct hash_desc *desc;
	struct tcphdr *th = tcp_hdr(skb);
1089 1090 1091 1092 1093
	__be32 saddr, daddr;

	if (sk) {
		saddr = inet_sk(sk)->saddr;
		daddr = inet_sk(sk)->daddr;
1094 1095 1096
	} else if (req) {
		saddr = inet_rsk(req)->loc_addr;
		daddr = inet_rsk(req)->rmt_addr;
1097
	} else {
1098 1099 1100
		const struct iphdr *iph = ip_hdr(skb);
		saddr = iph->saddr;
		daddr = iph->daddr;
1101
	}
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
	desc = &hp->md5_desc;

	if (crypto_hash_init(desc))
		goto clear_hash;

	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
		goto clear_hash;
	if (tcp_md5_hash_header(hp, th))
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
	if (crypto_hash_final(desc, md5_hash))
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
1130 1131
}

1132
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1133

1134
static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
{
	/*
	 * This gets called for each TCP segment that arrives
	 * so we want to be efficient.
	 * We have 3 drop cases:
	 * o No MD5 hash and one expected.
	 * o MD5 hash and we're not expecting one.
	 * o MD5 hash and its wrong.
	 */
	__u8 *hash_location = NULL;
	struct tcp_md5sig_key *hash_expected;
1146
	const struct iphdr *iph = ip_hdr(skb);
1147
	struct tcphdr *th = tcp_hdr(skb);
1148 1149 1150 1151
	int genhash;
	unsigned char newhash[16];

	hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1152
	hash_location = tcp_parse_md5sig_option(th);
1153 1154 1155 1156 1157 1158

	/* We've parsed the options - do we have a hash? */
	if (!hash_expected && !hash_location)
		return 0;

	if (hash_expected && !hash_location) {
1159
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1160 1161 1162 1163
		return 1;
	}

	if (!hash_expected && hash_location) {
1164
		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1165 1166 1167 1168 1169 1170
		return 1;
	}

	/* Okay, so this is hash_expected and hash_location -
	 * so we need to calculate the checksum.
	 */
1171 1172 1173
	genhash = tcp_v4_md5_hash_skb(newhash,
				      hash_expected,
				      NULL, NULL, skb);
1174 1175 1176

	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
		if (net_ratelimit()) {
1177 1178 1179
			printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
			       &iph->saddr, ntohs(th->source),
			       &iph->daddr, ntohs(th->dest),
1180 1181 1182 1183 1184 1185 1186 1187 1188
			       genhash ? " tcp_v4_calc_md5_hash failed" : "");
		}
		return 1;
	}
	return 0;
}

#endif

1189
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
1190
	.family		=	PF_INET,
1191
	.obj_size	=	sizeof(struct tcp_request_sock),
L
Linus Torvalds 已提交
1192
	.rtx_syn_ack	=	tcp_v4_send_synack,
1193 1194
	.send_ack	=	tcp_v4_reqsk_send_ack,
	.destructor	=	tcp_v4_reqsk_destructor,
L
Linus Torvalds 已提交
1195 1196 1197
	.send_reset	=	tcp_v4_send_reset,
};

1198
#ifdef CONFIG_TCP_MD5SIG
1199
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1200
	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
1201
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1202
};
1203
#endif
1204

1205 1206 1207
static struct timewait_sock_ops tcp_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
1208
	.twsk_destructor= tcp_twsk_destructor,
1209 1210
};

L
Linus Torvalds 已提交
1211 1212
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
1213
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1214
	struct tcp_options_received tmp_opt;
1215
	struct request_sock *req;
1216 1217
	__be32 saddr = ip_hdr(skb)->saddr;
	__be32 daddr = ip_hdr(skb)->daddr;
L
Linus Torvalds 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226
	__u32 isn = TCP_SKB_CB(skb)->when;
	struct dst_entry *dst = NULL;
#ifdef CONFIG_SYN_COOKIES
	int want_cookie = 0;
#else
#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
#endif

	/* Never answer to SYNs send to broadcast or multicast */
E
Eric Dumazet 已提交
1227
	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
L
Linus Torvalds 已提交
1228 1229 1230 1231 1232 1233
		goto drop;

	/* TW buckets are converted to open requests without
	 * limitations, they conserve resources and peer is
	 * evidently real one.
	 */
1234
	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
L
Linus Torvalds 已提交
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
#ifdef CONFIG_SYN_COOKIES
		if (sysctl_tcp_syncookies) {
			want_cookie = 1;
		} else
#endif
		goto drop;
	}

	/* Accept backlog is full. If we have already queued enough
	 * of warm entries in syn queue, drop request. It is better than
	 * clogging syn queue with openreqs with exponentially increasing
	 * timeout.
	 */
1248
	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
L
Linus Torvalds 已提交
1249 1250
		goto drop;

1251
	req = inet_reqsk_alloc(&tcp_request_sock_ops);
L
Linus Torvalds 已提交
1252 1253 1254
	if (!req)
		goto drop;

1255 1256 1257 1258
#ifdef CONFIG_TCP_MD5SIG
	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif

L
Linus Torvalds 已提交
1259 1260 1261 1262 1263 1264
	tcp_clear_options(&tmp_opt);
	tmp_opt.mss_clamp = 536;
	tmp_opt.user_mss  = tcp_sk(sk)->rx_opt.user_mss;

	tcp_parse_options(skb, &tmp_opt, 0);

1265
	if (want_cookie && !tmp_opt.saw_tstamp)
L
Linus Torvalds 已提交
1266 1267 1268 1269 1270 1271
		tcp_clear_options(&tmp_opt);

	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;

	tcp_openreq_init(req, &tmp_opt, skb);

1272 1273 1274
	ireq = inet_rsk(req);
	ireq->loc_addr = daddr;
	ireq->rmt_addr = saddr;
1275
	ireq->no_srccheck = inet_sk(sk)->transparent;
1276
	ireq->opt = tcp_v4_save_options(sk, skb);
1277 1278 1279 1280

	if (security_inet_conn_request(sk, skb, req))
		goto drop_and_free;

L
Linus Torvalds 已提交
1281
	if (!want_cookie)
1282
		TCP_ECN_create_request(req, tcp_hdr(skb));
L
Linus Torvalds 已提交
1283 1284 1285 1286

	if (want_cookie) {
#ifdef CONFIG_SYN_COOKIES
		syn_flood_warning(skb);
1287
		req->cookie_ts = tmp_opt.tstamp_ok;
L
Linus Torvalds 已提交
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
#endif
		isn = cookie_v4_init_sequence(sk, skb, &req->mss);
	} else if (!isn) {
		struct inet_peer *peer = NULL;

		/* VJ's idea. We save last timestamp seen
		 * from the destination in peer table, when entering
		 * state TIME-WAIT, and check against it before
		 * accepting new connection request.
		 *
		 * If "isn" is not zero, this request hit alive
		 * timewait bucket, so that all the necessary checks
		 * are made in the function processing timewait state.
		 */
		if (tmp_opt.saw_tstamp &&
1303
		    tcp_death_row.sysctl_tw_recycle &&
1304
		    (dst = inet_csk_route_req(sk, req)) != NULL &&
L
Linus Torvalds 已提交
1305 1306
		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
		    peer->v4daddr == saddr) {
1307
			if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
L
Linus Torvalds 已提交
1308 1309
			    (s32)(peer->tcp_ts - req->ts_recent) >
							TCP_PAWS_WINDOW) {
1310
				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1311
				goto drop_and_release;
L
Linus Torvalds 已提交
1312 1313 1314 1315
			}
		}
		/* Kill the following clause, if you dislike this way. */
		else if (!sysctl_tcp_syncookies &&
1316
			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
L
Linus Torvalds 已提交
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
			  (sysctl_max_syn_backlog >> 2)) &&
			 (!peer || !peer->tcp_ts_stamp) &&
			 (!dst || !dst_metric(dst, RTAX_RTT))) {
			/* Without syncookies last quarter of
			 * backlog is filled with destinations,
			 * proven to be alive.
			 * It means that we continue to communicate
			 * to destinations, already remembered
			 * to the moment of synflood.
			 */
1327 1328
			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
				       &saddr, ntohs(tcp_hdr(skb)->source));
1329
			goto drop_and_release;
L
Linus Torvalds 已提交
1330 1331
		}

1332
		isn = tcp_v4_init_sequence(skb);
L
Linus Torvalds 已提交
1333
	}
1334
	tcp_rsk(req)->snt_isn = isn;
L
Linus Torvalds 已提交
1335

1336
	if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
L
Linus Torvalds 已提交
1337 1338
		goto drop_and_free;

1339
	inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
L
Linus Torvalds 已提交
1340 1341
	return 0;

1342 1343
drop_and_release:
	dst_release(dst);
L
Linus Torvalds 已提交
1344
drop_and_free:
1345
	reqsk_free(req);
L
Linus Torvalds 已提交
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
drop:
	return 0;
}


/*
 * The three way handshake has completed - we got a valid synack -
 * now create the new socket.
 */
struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1356
				  struct request_sock *req,
L
Linus Torvalds 已提交
1357 1358
				  struct dst_entry *dst)
{
1359
	struct inet_request_sock *ireq;
L
Linus Torvalds 已提交
1360 1361 1362
	struct inet_sock *newinet;
	struct tcp_sock *newtp;
	struct sock *newsk;
1363 1364 1365
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key *key;
#endif
L
Linus Torvalds 已提交
1366 1367 1368 1369

	if (sk_acceptq_is_full(sk))
		goto exit_overflow;

1370
	if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
L
Linus Torvalds 已提交
1371 1372 1373 1374 1375 1376
		goto exit;

	newsk = tcp_create_openreq_child(sk, req, skb);
	if (!newsk)
		goto exit;

1377
	newsk->sk_gso_type = SKB_GSO_TCPV4;
1378
	sk_setup_caps(newsk, dst);
L
Linus Torvalds 已提交
1379 1380 1381

	newtp		      = tcp_sk(newsk);
	newinet		      = inet_sk(newsk);
1382 1383 1384 1385 1386 1387
	ireq		      = inet_rsk(req);
	newinet->daddr	      = ireq->rmt_addr;
	newinet->rcv_saddr    = ireq->loc_addr;
	newinet->saddr	      = ireq->loc_addr;
	newinet->opt	      = ireq->opt;
	ireq->opt	      = NULL;
1388
	newinet->mc_index     = inet_iif(skb);
1389
	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1390
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
L
Linus Torvalds 已提交
1391
	if (newinet->opt)
1392
		inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
L
Linus Torvalds 已提交
1393 1394
	newinet->id = newtp->write_seq ^ jiffies;

J
John Heffner 已提交
1395
	tcp_mtup_init(newsk);
L
Linus Torvalds 已提交
1396 1397
	tcp_sync_mss(newsk, dst_mtu(dst));
	newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1398 1399 1400 1401
	if (tcp_sk(sk)->rx_opt.user_mss &&
	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;

L
Linus Torvalds 已提交
1402 1403
	tcp_initialize_rcv_mss(newsk);

1404 1405 1406 1407 1408 1409 1410 1411 1412
#ifdef CONFIG_TCP_MD5SIG
	/* Copy over the MD5 key from the original socket */
	if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
		/*
		 * We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1413 1414
		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
		if (newkey != NULL)
1415
			tcp_v4_md5_do_add(newsk, newinet->daddr,
1416
					  newkey, key->keylen);
1417
		newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1418 1419 1420
	}
#endif

1421 1422
	__inet_hash_nolisten(newsk);
	__inet_inherit_port(sk, newsk);
L
Linus Torvalds 已提交
1423 1424 1425 1426

	return newsk;

exit_overflow:
1427
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
L
Linus Torvalds 已提交
1428
exit:
1429
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
L
Linus Torvalds 已提交
1430 1431 1432 1433 1434 1435
	dst_release(dst);
	return NULL;
}

static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
1436
	struct tcphdr *th = tcp_hdr(skb);
1437
	const struct iphdr *iph = ip_hdr(skb);
L
Linus Torvalds 已提交
1438
	struct sock *nsk;
1439
	struct request_sock **prev;
L
Linus Torvalds 已提交
1440
	/* Find possible connection requests. */
1441 1442
	struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
						       iph->saddr, iph->daddr);
L
Linus Torvalds 已提交
1443 1444 1445
	if (req)
		return tcp_check_req(sk, skb, req, prev);

1446
	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1447
			th->source, iph->daddr, th->dest, inet_iif(skb));
L
Linus Torvalds 已提交
1448 1449 1450 1451 1452 1453

	if (nsk) {
		if (nsk->sk_state != TCP_TIME_WAIT) {
			bh_lock_sock(nsk);
			return nsk;
		}
1454
		inet_twsk_put(inet_twsk(nsk));
L
Linus Torvalds 已提交
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
		return NULL;
	}

#ifdef CONFIG_SYN_COOKIES
	if (!th->rst && !th->syn && th->ack)
		sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
#endif
	return sk;
}

1465
static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
L
Linus Torvalds 已提交
1466
{
1467 1468
	const struct iphdr *iph = ip_hdr(skb);

1469
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1470 1471
		if (!tcp_v4_check(skb->len, iph->saddr,
				  iph->daddr, skb->csum)) {
1472
			skb->ip_summed = CHECKSUM_UNNECESSARY;
L
Linus Torvalds 已提交
1473
			return 0;
1474
		}
L
Linus Torvalds 已提交
1475
	}
1476

1477
	skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1478 1479
				       skb->len, IPPROTO_TCP, 0);

L
Linus Torvalds 已提交
1480
	if (skb->len <= 76) {
1481
		return __skb_checksum_complete(skb);
L
Linus Torvalds 已提交
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
	}
	return 0;
}


/* The socket must have it's spinlock held when we get
 * here.
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
1497 1498 1499 1500 1501 1502 1503 1504
	struct sock *rsk;
#ifdef CONFIG_TCP_MD5SIG
	/*
	 * We really want to reject the packet as early as possible
	 * if:
	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
	 *  o There is an MD5 option and we're not expecting one
	 */
1505
	if (tcp_v4_inbound_md5_hash(sk, skb))
1506 1507 1508
		goto discard;
#endif

L
Linus Torvalds 已提交
1509 1510
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
		TCP_CHECK_TIMER(sk);
1511
		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1512
			rsk = sk;
L
Linus Torvalds 已提交
1513
			goto reset;
1514
		}
L
Linus Torvalds 已提交
1515 1516 1517 1518
		TCP_CHECK_TIMER(sk);
		return 0;
	}

1519
	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1520 1521 1522 1523 1524 1525 1526 1527
		goto csum_err;

	if (sk->sk_state == TCP_LISTEN) {
		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
		if (!nsk)
			goto discard;

		if (nsk != sk) {
1528 1529
			if (tcp_child_process(sk, nsk, skb)) {
				rsk = nsk;
L
Linus Torvalds 已提交
1530
				goto reset;
1531
			}
L
Linus Torvalds 已提交
1532 1533 1534 1535 1536
			return 0;
		}
	}

	TCP_CHECK_TIMER(sk);
1537
	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1538
		rsk = sk;
L
Linus Torvalds 已提交
1539
		goto reset;
1540
	}
L
Linus Torvalds 已提交
1541 1542 1543 1544
	TCP_CHECK_TIMER(sk);
	return 0;

reset:
1545
	tcp_v4_send_reset(rsk, skb);
L
Linus Torvalds 已提交
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
discard:
	kfree_skb(skb);
	/* Be careful here. If this function gets more complicated and
	 * gcc suffers from register pressure on the x86, sk (in %ebx)
	 * might be destroyed here. This current version compiles correctly,
	 * but you have been warned.
	 */
	return 0;

csum_err:
1556
	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1557 1558 1559 1560 1561 1562 1563 1564 1565
	goto discard;
}

/*
 *	From tcp_input.c
 */

int tcp_v4_rcv(struct sk_buff *skb)
{
1566
	const struct iphdr *iph;
L
Linus Torvalds 已提交
1567 1568 1569
	struct tcphdr *th;
	struct sock *sk;
	int ret;
1570
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
1571 1572 1573 1574 1575

	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/* Count it even if it's bad */
1576
	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1577 1578 1579 1580

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1581
	th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1582 1583 1584 1585 1586 1587 1588 1589

	if (th->doff < sizeof(struct tcphdr) / 4)
		goto bad_packet;
	if (!pskb_may_pull(skb, th->doff * 4))
		goto discard_it;

	/* An explanation is required here, I think.
	 * Packet length and doff are validated by header prediction,
S
Stephen Hemminger 已提交
1590
	 * provided case of th->doff==0 is eliminated.
L
Linus Torvalds 已提交
1591
	 * So, we defer the checks. */
1592
	if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
L
Linus Torvalds 已提交
1593 1594
		goto bad_packet;

1595
	th = tcp_hdr(skb);
1596
	iph = ip_hdr(skb);
L
Linus Torvalds 已提交
1597 1598 1599 1600 1601
	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff * 4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->when	 = 0;
1602
	TCP_SKB_CB(skb)->flags	 = iph->tos;
L
Linus Torvalds 已提交
1603 1604
	TCP_SKB_CB(skb)->sacked	 = 0;

1605
	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
L
Linus Torvalds 已提交
1606 1607 1608 1609 1610 1611 1612 1613 1614
	if (!sk)
		goto no_tcp_socket;

process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto discard_and_relse;
1615
	nf_reset(skb);
L
Linus Torvalds 已提交
1616

1617
	if (sk_filter(sk, skb))
L
Linus Torvalds 已提交
1618 1619 1620 1621
		goto discard_and_relse;

	skb->dev = NULL;

1622
	bh_lock_sock_nested(sk);
L
Linus Torvalds 已提交
1623 1624
	ret = 0;
	if (!sock_owned_by_user(sk)) {
C
Chris Leech 已提交
1625 1626 1627
#ifdef CONFIG_NET_DMA
		struct tcp_sock *tp = tcp_sk(sk);
		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1628
			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
C
Chris Leech 已提交
1629
		if (tp->ucopy.dma_chan)
L
Linus Torvalds 已提交
1630
			ret = tcp_v4_do_rcv(sk, skb);
C
Chris Leech 已提交
1631 1632 1633 1634
		else
#endif
		{
			if (!tcp_prequeue(sk, skb))
S
Shan Wei 已提交
1635
				ret = tcp_v4_do_rcv(sk, skb);
C
Chris Leech 已提交
1636
		}
L
Linus Torvalds 已提交
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
	} else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);

	sock_put(sk);

	return ret;

no_tcp_socket:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet:
1651
		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1652
	} else {
1653
		tcp_v4_send_reset(NULL, skb);
L
Linus Torvalds 已提交
1654 1655 1656 1657 1658
	}

discard_it:
	/* Discard frame. */
	kfree_skb(skb);
1659
	return 0;
L
Linus Torvalds 已提交
1660 1661 1662 1663 1664 1665 1666

discard_and_relse:
	sock_put(sk);
	goto discard_it;

do_time_wait:
	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1667
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
1668 1669 1670 1671
		goto discard_it;
	}

	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1672
		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1673
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
1674 1675
		goto discard_it;
	}
1676
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
1677
	case TCP_TW_SYN: {
1678
		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1679
							&tcp_hashinfo,
1680
							iph->daddr, th->dest,
1681
							inet_iif(skb));
L
Linus Torvalds 已提交
1682
		if (sk2) {
1683 1684
			inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
			inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
			sk = sk2;
			goto process;
		}
		/* Fall through to ACK */
	}
	case TCP_TW_ACK:
		tcp_v4_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
		goto no_tcp_socket;
	case TCP_TW_SUCCESS:;
	}
	goto discard_it;
}

/* VJ's idea. Save last timestamp seen from this destination
 * and hold it at least for normal timewait interval to use for duplicate
 * segment detection in subsequent connections, before they enter synchronized
 * state.
 */

int tcp_v4_remember_stamp(struct sock *sk)
{
	struct inet_sock *inet = inet_sk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
	struct inet_peer *peer = NULL;
	int release_it = 0;

	if (!rt || rt->rt_dst != inet->daddr) {
		peer = inet_getpeer(inet->daddr, 1);
		release_it = 1;
	} else {
		if (!rt->peer)
			rt_bind_peer(rt, 1);
		peer = rt->peer;
	}

	if (peer) {
		if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1725
		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
L
Linus Torvalds 已提交
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
		     peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
			peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
			peer->tcp_ts = tp->rx_opt.ts_recent;
		}
		if (release_it)
			inet_putpeer(peer);
		return 1;
	}

	return 0;
}

1738
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
L
Linus Torvalds 已提交
1739
{
1740
	struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
L
Linus Torvalds 已提交
1741 1742

	if (peer) {
1743 1744 1745
		const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);

		if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1746
		    (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1747 1748 1749
		     peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
			peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
			peer->tcp_ts	   = tcptw->tw_ts_recent;
L
Linus Torvalds 已提交
1750 1751 1752 1753 1754 1755 1756 1757
		}
		inet_putpeer(peer);
		return 1;
	}

	return 0;
}

1758
const struct inet_connection_sock_af_ops ipv4_specific = {
1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
	.conn_request	   = tcp_v4_conn_request,
	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
	.remember_stamp	   = tcp_v4_remember_stamp,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ip_setsockopt,
	.getsockopt	   = ip_getsockopt,
	.addr2sockaddr	   = inet_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in),
1770
	.bind_conflict	   = inet_csk_bind_conflict,
1771
#ifdef CONFIG_COMPAT
1772 1773
	.compat_setsockopt = compat_ip_setsockopt,
	.compat_getsockopt = compat_ip_getsockopt,
1774
#endif
L
Linus Torvalds 已提交
1775 1776
};

1777
#ifdef CONFIG_TCP_MD5SIG
1778
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1779
	.md5_lookup		= tcp_v4_md5_lookup,
1780
	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1781 1782 1783
	.md5_add		= tcp_v4_md5_add_func,
	.md5_parse		= tcp_v4_parse_md5_keys,
};
1784
#endif
1785

L
Linus Torvalds 已提交
1786 1787 1788 1789 1790
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v4_init_sock(struct sock *sk)
{
1791
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
1792 1793 1794 1795 1796 1797
	struct tcp_sock *tp = tcp_sk(sk);

	skb_queue_head_init(&tp->out_of_order_queue);
	tcp_init_xmit_timers(sk);
	tcp_prequeue_init(tp);

1798
	icsk->icsk_rto = TCP_TIMEOUT_INIT;
L
Linus Torvalds 已提交
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
	tp->mdev = TCP_TIMEOUT_INIT;

	/* So many TCP implementations out there (incorrectly) count the
	 * initial SYN frame in their delayed-ACK and congestion control
	 * algorithms that we must have the following bandaid to talk
	 * efficiently to them.  -DaveM
	 */
	tp->snd_cwnd = 2;

	/* See draft-stevens-tcpca-spec-01 for discussion of the
	 * initialization of these values.
	 */
I
Ilpo Järvinen 已提交
1811
	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
L
Linus Torvalds 已提交
1812
	tp->snd_cwnd_clamp = ~0;
1813
	tp->mss_cache = 536;
L
Linus Torvalds 已提交
1814 1815

	tp->reordering = sysctl_tcp_reordering;
1816
	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
L
Linus Torvalds 已提交
1817 1818 1819 1820 1821 1822

	sk->sk_state = TCP_CLOSE;

	sk->sk_write_space = sk_stream_write_space;
	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);

1823
	icsk->icsk_af_ops = &ipv4_specific;
1824
	icsk->icsk_sync_mss = tcp_sync_mss;
1825 1826 1827
#ifdef CONFIG_TCP_MD5SIG
	tp->af_specific = &tcp_sock_ipv4_specific;
#endif
L
Linus Torvalds 已提交
1828 1829 1830 1831

	sk->sk_sndbuf = sysctl_tcp_wmem[1];
	sk->sk_rcvbuf = sysctl_tcp_rmem[1];

H
Herbert Xu 已提交
1832
	local_bh_disable();
1833
	percpu_counter_inc(&tcp_sockets_allocated);
H
Herbert Xu 已提交
1834
	local_bh_enable();
L
Linus Torvalds 已提交
1835 1836 1837 1838

	return 0;
}

1839
void tcp_v4_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
1840 1841 1842 1843 1844
{
	struct tcp_sock *tp = tcp_sk(sk);

	tcp_clear_xmit_timers(sk);

1845
	tcp_cleanup_congestion_control(sk);
1846

L
Linus Torvalds 已提交
1847
	/* Cleanup up the write buffer. */
1848
	tcp_write_queue_purge(sk);
L
Linus Torvalds 已提交
1849 1850

	/* Cleans up our, hopefully empty, out_of_order_queue. */
1851
	__skb_queue_purge(&tp->out_of_order_queue);
L
Linus Torvalds 已提交
1852

1853 1854 1855 1856 1857 1858 1859 1860 1861
#ifdef CONFIG_TCP_MD5SIG
	/* Clean up the MD5 key list, if any */
	if (tp->md5sig_info) {
		tcp_v4_clear_md5_list(sk);
		kfree(tp->md5sig_info);
		tp->md5sig_info = NULL;
	}
#endif

C
Chris Leech 已提交
1862 1863
#ifdef CONFIG_NET_DMA
	/* Cleans up our sk_async_wait_queue */
1864
	__skb_queue_purge(&sk->sk_async_wait_queue);
C
Chris Leech 已提交
1865 1866
#endif

L
Linus Torvalds 已提交
1867 1868 1869 1870
	/* Clean prequeue, it must be empty really */
	__skb_queue_purge(&tp->ucopy.prequeue);

	/* Clean up a referenced TCP bind bucket. */
1871
	if (inet_csk(sk)->icsk_bind_hash)
1872
		inet_put_port(sk);
L
Linus Torvalds 已提交
1873 1874 1875 1876 1877 1878 1879 1880 1881

	/*
	 * If sendmsg cached page exists, toss it.
	 */
	if (sk->sk_sndmsg_page) {
		__free_page(sk->sk_sndmsg_page);
		sk->sk_sndmsg_page = NULL;
	}

1882
	percpu_counter_dec(&tcp_sockets_allocated);
L
Linus Torvalds 已提交
1883 1884 1885 1886 1887 1888 1889
}

EXPORT_SYMBOL(tcp_v4_destroy_sock);

#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */

1890
static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
L
Linus Torvalds 已提交
1891
{
1892
	return hlist_nulls_empty(head) ? NULL :
1893
		list_entry(head->first, struct inet_timewait_sock, tw_node);
L
Linus Torvalds 已提交
1894 1895
}

1896
static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
L
Linus Torvalds 已提交
1897
{
1898 1899
	return !is_a_nulls(tw->tw_node.next) ?
		hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
L
Linus Torvalds 已提交
1900 1901 1902 1903
}

static void *listening_get_next(struct seq_file *seq, void *cur)
{
1904
	struct inet_connection_sock *icsk;
1905
	struct hlist_nulls_node *node;
L
Linus Torvalds 已提交
1906
	struct sock *sk = cur;
1907
	struct inet_listen_hashbucket *ilb;
J
Jianjun Kong 已提交
1908
	struct tcp_iter_state *st = seq->private;
1909
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
1910 1911 1912

	if (!sk) {
		st->bucket = 0;
1913 1914
		ilb = &tcp_hashinfo.listening_hash[0];
		spin_lock_bh(&ilb->lock);
1915
		sk = sk_nulls_head(&ilb->head);
L
Linus Torvalds 已提交
1916 1917
		goto get_sk;
	}
1918
	ilb = &tcp_hashinfo.listening_hash[st->bucket];
L
Linus Torvalds 已提交
1919 1920 1921
	++st->num;

	if (st->state == TCP_SEQ_STATE_OPENREQ) {
1922
		struct request_sock *req = cur;
L
Linus Torvalds 已提交
1923

1924
		icsk = inet_csk(st->syn_wait_sk);
L
Linus Torvalds 已提交
1925 1926 1927
		req = req->dl_next;
		while (1) {
			while (req) {
1928
				if (req->rsk_ops->family == st->family) {
L
Linus Torvalds 已提交
1929 1930 1931 1932 1933
					cur = req;
					goto out;
				}
				req = req->dl_next;
			}
1934
			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
L
Linus Torvalds 已提交
1935 1936
				break;
get_req:
1937
			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
L
Linus Torvalds 已提交
1938 1939 1940
		}
		sk	  = sk_next(st->syn_wait_sk);
		st->state = TCP_SEQ_STATE_LISTENING;
1941
		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
1942
	} else {
1943
		icsk = inet_csk(sk);
1944 1945
		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
		if (reqsk_queue_len(&icsk->icsk_accept_queue))
L
Linus Torvalds 已提交
1946
			goto start_req;
1947
		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
1948 1949 1950
		sk = sk_next(sk);
	}
get_sk:
1951
	sk_nulls_for_each_from(sk, node) {
1952
		if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
1953 1954 1955
			cur = sk;
			goto out;
		}
1956
		icsk = inet_csk(sk);
1957 1958
		read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
L
Linus Torvalds 已提交
1959 1960 1961 1962 1963 1964 1965
start_req:
			st->uid		= sock_i_uid(sk);
			st->syn_wait_sk = sk;
			st->state	= TCP_SEQ_STATE_OPENREQ;
			st->sbucket	= 0;
			goto get_req;
		}
1966
		read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
1967
	}
1968
	spin_unlock_bh(&ilb->lock);
1969
	if (++st->bucket < INET_LHTABLE_SIZE) {
1970 1971
		ilb = &tcp_hashinfo.listening_hash[st->bucket];
		spin_lock_bh(&ilb->lock);
1972
		sk = sk_nulls_head(&ilb->head);
L
Linus Torvalds 已提交
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
		goto get_sk;
	}
	cur = NULL;
out:
	return cur;
}

static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
	void *rc = listening_get_next(seq, NULL);

	while (rc && *pos) {
		rc = listening_get_next(seq, rc);
		--*pos;
	}
	return rc;
}

1991 1992
static inline int empty_bucket(struct tcp_iter_state *st)
{
1993 1994
	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
		hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1995 1996
}

L
Linus Torvalds 已提交
1997 1998
static void *established_get_first(struct seq_file *seq)
{
J
Jianjun Kong 已提交
1999
	struct tcp_iter_state *st = seq->private;
2000
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2001 2002
	void *rc = NULL;

2003
	for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
L
Linus Torvalds 已提交
2004
		struct sock *sk;
2005
		struct hlist_nulls_node *node;
2006
		struct inet_timewait_sock *tw;
2007
		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
L
Linus Torvalds 已提交
2008

2009 2010 2011 2012
		/* Lockless fast path for the common case of empty buckets */
		if (empty_bucket(st))
			continue;

2013
		spin_lock_bh(lock);
2014
		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2015
			if (sk->sk_family != st->family ||
2016
			    !net_eq(sock_net(sk), net)) {
L
Linus Torvalds 已提交
2017 2018 2019 2020 2021 2022
				continue;
			}
			rc = sk;
			goto out;
		}
		st->state = TCP_SEQ_STATE_TIME_WAIT;
2023
		inet_twsk_for_each(tw, node,
2024
				   &tcp_hashinfo.ehash[st->bucket].twchain) {
2025
			if (tw->tw_family != st->family ||
2026
			    !net_eq(twsk_net(tw), net)) {
L
Linus Torvalds 已提交
2027 2028 2029 2030 2031
				continue;
			}
			rc = tw;
			goto out;
		}
2032
		spin_unlock_bh(lock);
L
Linus Torvalds 已提交
2033 2034 2035 2036 2037 2038 2039 2040 2041
		st->state = TCP_SEQ_STATE_ESTABLISHED;
	}
out:
	return rc;
}

static void *established_get_next(struct seq_file *seq, void *cur)
{
	struct sock *sk = cur;
2042
	struct inet_timewait_sock *tw;
2043
	struct hlist_nulls_node *node;
J
Jianjun Kong 已提交
2044
	struct tcp_iter_state *st = seq->private;
2045
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2046 2047 2048 2049 2050 2051 2052

	++st->num;

	if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
		tw = cur;
		tw = tw_next(tw);
get_tw:
2053
		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
L
Linus Torvalds 已提交
2054 2055 2056 2057 2058 2059
			tw = tw_next(tw);
		}
		if (tw) {
			cur = tw;
			goto out;
		}
2060
		spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2061 2062
		st->state = TCP_SEQ_STATE_ESTABLISHED;

2063
		/* Look for next non empty bucket */
2064
		while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2065 2066
				empty_bucket(st))
			;
2067
		if (st->bucket > tcp_hashinfo.ehash_mask)
2068 2069
			return NULL;

2070
		spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2071
		sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
L
Linus Torvalds 已提交
2072
	} else
2073
		sk = sk_nulls_next(sk);
L
Linus Torvalds 已提交
2074

2075
	sk_nulls_for_each_from(sk, node) {
2076
		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
L
Linus Torvalds 已提交
2077 2078 2079 2080
			goto found;
	}

	st->state = TCP_SEQ_STATE_TIME_WAIT;
2081
	tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
L
Linus Torvalds 已提交
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
	goto get_tw;
found:
	cur = sk;
out:
	return cur;
}

static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc = established_get_first(seq);

	while (rc && pos) {
		rc = established_get_next(seq, rc);
		--pos;
2096
	}
L
Linus Torvalds 已提交
2097 2098 2099 2100 2101 2102
	return rc;
}

static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
	void *rc;
J
Jianjun Kong 已提交
2103
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117

	st->state = TCP_SEQ_STATE_LISTENING;
	rc	  = listening_get_idx(seq, &pos);

	if (!rc) {
		st->state = TCP_SEQ_STATE_ESTABLISHED;
		rc	  = established_get_idx(seq, pos);
	}

	return rc;
}

static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{
J
Jianjun Kong 已提交
2118
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2119 2120 2121 2122 2123 2124 2125 2126
	st->state = TCP_SEQ_STATE_LISTENING;
	st->num = 0;
	return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}

static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	void *rc = NULL;
J
Jianjun Kong 已提交
2127
	struct tcp_iter_state *st;
L
Linus Torvalds 已提交
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155

	if (v == SEQ_START_TOKEN) {
		rc = tcp_get_idx(seq, 0);
		goto out;
	}
	st = seq->private;

	switch (st->state) {
	case TCP_SEQ_STATE_OPENREQ:
	case TCP_SEQ_STATE_LISTENING:
		rc = listening_get_next(seq, v);
		if (!rc) {
			st->state = TCP_SEQ_STATE_ESTABLISHED;
			rc	  = established_get_first(seq);
		}
		break;
	case TCP_SEQ_STATE_ESTABLISHED:
	case TCP_SEQ_STATE_TIME_WAIT:
		rc = established_get_next(seq, v);
		break;
	}
out:
	++*pos;
	return rc;
}

static void tcp_seq_stop(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2156
	struct tcp_iter_state *st = seq->private;
L
Linus Torvalds 已提交
2157 2158 2159 2160

	switch (st->state) {
	case TCP_SEQ_STATE_OPENREQ:
		if (v) {
2161 2162
			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
			read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
L
Linus Torvalds 已提交
2163 2164 2165
		}
	case TCP_SEQ_STATE_LISTENING:
		if (v != SEQ_START_TOKEN)
2166
			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
L
Linus Torvalds 已提交
2167 2168 2169 2170
		break;
	case TCP_SEQ_STATE_TIME_WAIT:
	case TCP_SEQ_STATE_ESTABLISHED:
		if (v)
2171
			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
L
Linus Torvalds 已提交
2172 2173 2174 2175 2176 2177 2178 2179
		break;
	}
}

static int tcp_seq_open(struct inode *inode, struct file *file)
{
	struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
	struct tcp_iter_state *s;
2180
	int err;
L
Linus Torvalds 已提交
2181

2182 2183 2184 2185
	err = seq_open_net(inode, file, &afinfo->seq_ops,
			  sizeof(struct tcp_iter_state));
	if (err < 0)
		return err;
2186

2187
	s = ((struct seq_file *)file->private_data)->private;
L
Linus Torvalds 已提交
2188
	s->family		= afinfo->family;
2189 2190 2191
	return 0;
}

2192
int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
L
Linus Torvalds 已提交
2193 2194 2195 2196
{
	int rc = 0;
	struct proc_dir_entry *p;

2197 2198 2199 2200
	afinfo->seq_fops.open		= tcp_seq_open;
	afinfo->seq_fops.read		= seq_read;
	afinfo->seq_fops.llseek		= seq_lseek;
	afinfo->seq_fops.release	= seq_release_net;
2201

2202 2203 2204 2205
	afinfo->seq_ops.start		= tcp_seq_start;
	afinfo->seq_ops.next		= tcp_seq_next;
	afinfo->seq_ops.stop		= tcp_seq_stop;

2206 2207 2208
	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
			     &afinfo->seq_fops, afinfo);
	if (!p)
L
Linus Torvalds 已提交
2209 2210 2211 2212
		rc = -ENOMEM;
	return rc;
}

2213
void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
L
Linus Torvalds 已提交
2214
{
2215
	proc_net_remove(net, afinfo->name);
L
Linus Torvalds 已提交
2216 2217
}

2218
static void get_openreq4(struct sock *sk, struct request_sock *req,
2219
			 struct seq_file *f, int i, int uid, int *len)
L
Linus Torvalds 已提交
2220
{
2221
	const struct inet_request_sock *ireq = inet_rsk(req);
L
Linus Torvalds 已提交
2222 2223
	int ttd = req->expires - jiffies;

2224 2225
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
L
Linus Torvalds 已提交
2226
		i,
2227
		ireq->loc_addr,
L
Linus Torvalds 已提交
2228
		ntohs(inet_sk(sk)->sport),
2229 2230
		ireq->rmt_addr,
		ntohs(ireq->rmt_port),
L
Linus Torvalds 已提交
2231 2232 2233 2234 2235 2236 2237 2238 2239
		TCP_SYN_RECV,
		0, 0, /* could print option size, but that is af dependent. */
		1,    /* timers active (only the expire timer) */
		jiffies_to_clock_t(ttd),
		req->retrans,
		uid,
		0,  /* non standard timer */
		0, /* open_requests have no inode */
		atomic_read(&sk->sk_refcnt),
2240 2241
		req,
		len);
L
Linus Torvalds 已提交
2242 2243
}

2244
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
L
Linus Torvalds 已提交
2245 2246 2247
{
	int timer_active;
	unsigned long timer_expires;
2248 2249 2250
	struct tcp_sock *tp = tcp_sk(sk);
	const struct inet_connection_sock *icsk = inet_csk(sk);
	struct inet_sock *inet = inet_sk(sk);
2251 2252
	__be32 dest = inet->daddr;
	__be32 src = inet->rcv_saddr;
L
Linus Torvalds 已提交
2253 2254 2255
	__u16 destp = ntohs(inet->dport);
	__u16 srcp = ntohs(inet->sport);

2256
	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
L
Linus Torvalds 已提交
2257
		timer_active	= 1;
2258 2259
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2260
		timer_active	= 4;
2261
		timer_expires	= icsk->icsk_timeout;
2262
	} else if (timer_pending(&sk->sk_timer)) {
L
Linus Torvalds 已提交
2263
		timer_active	= 2;
2264
		timer_expires	= sk->sk_timer.expires;
L
Linus Torvalds 已提交
2265 2266 2267 2268 2269
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2270
	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2271
			"%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
2272
		i, src, srcp, dest, destp, sk->sk_state,
2273
		tp->write_seq - tp->snd_una,
2274
		sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
2275
					     (tp->rcv_nxt - tp->copied_seq),
L
Linus Torvalds 已提交
2276 2277
		timer_active,
		jiffies_to_clock_t(timer_expires - jiffies),
2278
		icsk->icsk_retransmits,
2279
		sock_i_uid(sk),
2280
		icsk->icsk_probes_out,
2281 2282
		sock_i_ino(sk),
		atomic_read(&sk->sk_refcnt), sk,
2283 2284
		jiffies_to_clock_t(icsk->icsk_rto),
		jiffies_to_clock_t(icsk->icsk_ack.ato),
2285
		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
L
Linus Torvalds 已提交
2286
		tp->snd_cwnd,
I
Ilpo Järvinen 已提交
2287
		tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2288
		len);
L
Linus Torvalds 已提交
2289 2290
}

2291
static void get_timewait4_sock(struct inet_timewait_sock *tw,
2292
			       struct seq_file *f, int i, int *len)
L
Linus Torvalds 已提交
2293
{
2294
	__be32 dest, src;
L
Linus Torvalds 已提交
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
	__u16 destp, srcp;
	int ttd = tw->tw_ttd - jiffies;

	if (ttd < 0)
		ttd = 0;

	dest  = tw->tw_daddr;
	src   = tw->tw_rcv_saddr;
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

2306 2307
	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
L
Linus Torvalds 已提交
2308 2309
		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2310
		atomic_read(&tw->tw_refcnt), tw, len);
L
Linus Torvalds 已提交
2311 2312 2313 2314 2315 2316
}

#define TMPSZ 150

static int tcp4_seq_show(struct seq_file *seq, void *v)
{
J
Jianjun Kong 已提交
2317
	struct tcp_iter_state *st;
2318
	int len;
L
Linus Torvalds 已提交
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331

	if (v == SEQ_START_TOKEN) {
		seq_printf(seq, "%-*s\n", TMPSZ - 1,
			   "  sl  local_address rem_address   st tx_queue "
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
			   "inode");
		goto out;
	}
	st = seq->private;

	switch (st->state) {
	case TCP_SEQ_STATE_LISTENING:
	case TCP_SEQ_STATE_ESTABLISHED:
2332
		get_tcp4_sock(v, seq, st->num, &len);
L
Linus Torvalds 已提交
2333 2334
		break;
	case TCP_SEQ_STATE_OPENREQ:
2335
		get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
L
Linus Torvalds 已提交
2336 2337
		break;
	case TCP_SEQ_STATE_TIME_WAIT:
2338
		get_timewait4_sock(v, seq, st->num, &len);
L
Linus Torvalds 已提交
2339 2340
		break;
	}
2341
	seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
L
Linus Torvalds 已提交
2342 2343 2344 2345 2346 2347 2348
out:
	return 0;
}

static struct tcp_seq_afinfo tcp4_seq_afinfo = {
	.name		= "tcp",
	.family		= AF_INET,
2349 2350 2351
	.seq_fops	= {
		.owner		= THIS_MODULE,
	},
2352 2353 2354
	.seq_ops	= {
		.show		= tcp4_seq_show,
	},
L
Linus Torvalds 已提交
2355 2356
};

2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
static int tcp4_proc_init_net(struct net *net)
{
	return tcp_proc_register(net, &tcp4_seq_afinfo);
}

static void tcp4_proc_exit_net(struct net *net)
{
	tcp_proc_unregister(net, &tcp4_seq_afinfo);
}

static struct pernet_operations tcp4_net_ops = {
	.init = tcp4_proc_init_net,
	.exit = tcp4_proc_exit_net,
};

L
Linus Torvalds 已提交
2372 2373
int __init tcp4_proc_init(void)
{
2374
	return register_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2375 2376 2377 2378
}

void tcp4_proc_exit(void)
{
2379
	unregister_pernet_subsys(&tcp4_net_ops);
L
Linus Torvalds 已提交
2380 2381 2382
}
#endif /* CONFIG_PROC_FS */

H
Herbert Xu 已提交
2383 2384
struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
H
Herbert Xu 已提交
2385
	struct iphdr *iph = skb_gro_network_header(skb);
H
Herbert Xu 已提交
2386 2387 2388

	switch (skb->ip_summed) {
	case CHECKSUM_COMPLETE:
2389
		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
H
Herbert Xu 已提交
2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
				  skb->csum)) {
			skb->ip_summed = CHECKSUM_UNNECESSARY;
			break;
		}

		/* fall through */
	case CHECKSUM_NONE:
		NAPI_GRO_CB(skb)->flush = 1;
		return NULL;
	}

	return tcp_gro_receive(head, skb);
}
EXPORT_SYMBOL(tcp4_gro_receive);

int tcp4_gro_complete(struct sk_buff *skb)
{
	struct iphdr *iph = ip_hdr(skb);
	struct tcphdr *th = tcp_hdr(skb);

	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
				  iph->saddr, iph->daddr, 0);
	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;

	return tcp_gro_complete(skb);
}
EXPORT_SYMBOL(tcp4_gro_complete);

L
Linus Torvalds 已提交
2418 2419 2420 2421 2422 2423
struct proto tcp_prot = {
	.name			= "TCP",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
	.connect		= tcp_v4_connect,
	.disconnect		= tcp_disconnect,
2424
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2425 2426 2427 2428 2429 2430 2431 2432
	.ioctl			= tcp_ioctl,
	.init			= tcp_v4_init_sock,
	.destroy		= tcp_v4_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
	.recvmsg		= tcp_recvmsg,
	.backlog_rcv		= tcp_v4_do_rcv,
2433 2434 2435
	.hash			= inet_hash,
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
L
Linus Torvalds 已提交
2436 2437
	.enter_memory_pressure	= tcp_enter_memory_pressure,
	.sockets_allocated	= &tcp_sockets_allocated,
2438
	.orphan_count		= &tcp_orphan_count,
L
Linus Torvalds 已提交
2439 2440 2441 2442 2443 2444 2445
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
	.sysctl_mem		= sysctl_tcp_mem,
	.sysctl_wmem		= sysctl_tcp_wmem,
	.sysctl_rmem		= sysctl_tcp_rmem,
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp_sock),
2446
	.slab_flags		= SLAB_DESTROY_BY_RCU,
2447
	.twsk_prot		= &tcp_timewait_sock_ops,
2448
	.rsk_prot		= &tcp_request_sock_ops,
2449
	.h.hashinfo		= &tcp_hashinfo,
2450 2451 2452 2453
#ifdef CONFIG_COMPAT
	.compat_setsockopt	= compat_tcp_setsockopt,
	.compat_getsockopt	= compat_tcp_getsockopt,
#endif
L
Linus Torvalds 已提交
2454 2455
};

2456 2457 2458 2459 2460 2461 2462 2463 2464 2465

static int __net_init tcp_sk_init(struct net *net)
{
	return inet_ctl_sock_create(&net->ipv4.tcp_sock,
				    PF_INET, SOCK_RAW, IPPROTO_TCP, net);
}

static void __net_exit tcp_sk_exit(struct net *net)
{
	inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2466
	inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2467 2468 2469 2470 2471 2472 2473
}

static struct pernet_operations __net_initdata tcp_sk_ops = {
       .init = tcp_sk_init,
       .exit = tcp_sk_exit,
};

2474
void __init tcp_v4_init(void)
L
Linus Torvalds 已提交
2475
{
2476
	inet_hashinfo_init(&tcp_hashinfo);
2477
	if (register_pernet_subsys(&tcp_sk_ops))
L
Linus Torvalds 已提交
2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
		panic("Failed to create the TCP control socket.\n");
}

EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_prot);
EXPORT_SYMBOL(tcp_v4_conn_request);
EXPORT_SYMBOL(tcp_v4_connect);
EXPORT_SYMBOL(tcp_v4_do_rcv);
EXPORT_SYMBOL(tcp_v4_remember_stamp);
EXPORT_SYMBOL(tcp_v4_send_check);
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);

#ifdef CONFIG_PROC_FS
EXPORT_SYMBOL(tcp_proc_register);
EXPORT_SYMBOL(tcp_proc_unregister);
#endif
EXPORT_SYMBOL(sysctl_tcp_low_latency);