tcp_ipv6.c 58.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3
/*
 *	TCP over IPv6
4
 *	Linux INET6 implementation
L
Linus Torvalds 已提交
5 6
 *
 *	Authors:
7
 *	Pedro Roque		<roque@di.fc.ul.pt>
L
Linus Torvalds 已提交
8
 *
9
 *	Based on:
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17 18 19 20 21
 *	linux/net/ipv4/tcp.c
 *	linux/net/ipv4/tcp_input.c
 *	linux/net/ipv4/tcp_output.c
 *
 *	Fixes:
 *	Hideaki YOSHIFUJI	:	sin6_scope_id support
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
 */

H
Herbert Xu 已提交
22
#include <linux/bottom_half.h>
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/jiffies.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/ipsec.h>
#include <linux/times.h>
37
#include <linux/slab.h>
W
Wang Yufen 已提交
38
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
39 40 41
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/random.h>
42
#include <linux/indirect_call_wrapper.h>
L
Linus Torvalds 已提交
43 44 45

#include <net/tcp.h>
#include <net/ndisc.h>
46
#include <net/inet6_hashtables.h>
47
#include <net/inet6_connection_sock.h>
L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57
#include <net/ipv6.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/snmp.h>
#include <net/dsfield.h>
58
#include <net/timewait_sock.h>
59
#include <net/inet_common.h>
60
#include <net/secure_seq.h>
61
#include <net/busy_poll.h>
L
Linus Torvalds 已提交
62 63 64 65

#include <linux/proc_fs.h>
#include <linux/seq_file.h>

H
Herbert Xu 已提交
66
#include <crypto/hash.h>
67 68
#include <linux/scatterlist.h>

69 70
#include <trace/events/tcp.h>

71 72
static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
73
				      struct request_sock *req);
L
Linus Torvalds 已提交
74

75
INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
L
Linus Torvalds 已提交
76

77
static const struct inet_connection_sock_af_ops ipv6_mapped;
78
const struct inet_connection_sock_af_ops ipv6_specific;
79
#ifdef CONFIG_TCP_MD5SIG
80 81
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
82
#else
83
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
84 85
						   const struct in6_addr *addr,
						   int l3index)
86 87 88
{
	return NULL;
}
89
#endif
L
Linus Torvalds 已提交
90

E
Eric Dumazet 已提交
91 92 93
/* Helper returning the inet6 address from a given tcp socket.
 * It can be used in TCP stack instead of inet6_sk(sk).
 * This avoids a dereference and allow compiler optimizations.
94
 * It is a specialized version of inet6_sk_generic().
E
Eric Dumazet 已提交
95 96 97
 */
static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
{
98
	unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
E
Eric Dumazet 已提交
99

100
	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
E
Eric Dumazet 已提交
101 102
}

103 104 105 106
static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);

E
Eric Dumazet 已提交
107
	if (dst && dst_hold_safe(dst)) {
108 109
		const struct rt6_info *rt = (const struct rt6_info *)dst;

110
		rcu_assign_pointer(sk->sk_rx_dst, dst);
111
		sk->sk_rx_dst_ifindex = skb->skb_iif;
112
		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
113
	}
114 115
}

116
static u32 tcp_v6_init_seq(const struct sk_buff *skb)
L
Linus Torvalds 已提交
117
{
118 119 120 121 122 123
	return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
				ipv6_hdr(skb)->saddr.s6_addr32,
				tcp_hdr(skb)->dest,
				tcp_hdr(skb)->source);
}

124
static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
125
{
126
	return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
127
				   ipv6_hdr(skb)->saddr.s6_addr32);
L
Linus Torvalds 已提交
128 129
}

A
Andrey Ignatov 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
			      int addr_len)
{
	/* This check is replicated from tcp_v6_connect() and intended to
	 * prevent BPF program called below from accessing bytes that are out
	 * of the bound specified by user in addr_len.
	 */
	if (addr_len < SIN6_LEN_RFC2133)
		return -EINVAL;

	sock_owned_by_me(sk);

	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
}

145
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
L
Linus Torvalds 已提交
146 147 148
			  int addr_len)
{
	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
149
	struct inet_sock *inet = inet_sk(sk);
150
	struct inet_connection_sock *icsk = inet_csk(sk);
151
	struct inet_timewait_death_row *tcp_death_row;
E
Eric Dumazet 已提交
152
	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
L
Linus Torvalds 已提交
153
	struct tcp_sock *tp = tcp_sk(sk);
154
	struct in6_addr *saddr = NULL, *final_p, final;
155
	struct ipv6_txoptions *opt;
156
	struct flowi6 fl6;
L
Linus Torvalds 已提交
157 158 159 160
	struct dst_entry *dst;
	int addr_type;
	int err;

161
	if (addr_len < SIN6_LEN_RFC2133)
L
Linus Torvalds 已提交
162 163
		return -EINVAL;

164
	if (usin->sin6_family != AF_INET6)
E
Eric Dumazet 已提交
165
		return -EAFNOSUPPORT;
L
Linus Torvalds 已提交
166

167
	memset(&fl6, 0, sizeof(fl6));
L
Linus Torvalds 已提交
168 169

	if (np->sndflow) {
170 171 172
		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
		IP6_ECN_flow_init(fl6.flowlabel);
		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
L
Linus Torvalds 已提交
173
			struct ip6_flowlabel *flowlabel;
174
			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
175
			if (IS_ERR(flowlabel))
L
Linus Torvalds 已提交
176 177 178 179 180 181
				return -EINVAL;
			fl6_sock_release(flowlabel);
		}
	}

	/*
182 183 184
	 *	connect() to INADDR_ANY means loopback (BSD'ism).
	 */

185 186 187 188 189 190 191
	if (ipv6_addr_any(&usin->sin6_addr)) {
		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
			ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
					       &usin->sin6_addr);
		else
			usin->sin6_addr = in6addr_loopback;
	}
L
Linus Torvalds 已提交
192 193 194

	addr_type = ipv6_addr_type(&usin->sin6_addr);

W
Weilong Chen 已提交
195
	if (addr_type & IPV6_ADDR_MULTICAST)
L
Linus Torvalds 已提交
196 197 198 199 200 201 202 203
		return -ENETUNREACH;

	if (addr_type&IPV6_ADDR_LINKLOCAL) {
		if (addr_len >= sizeof(struct sockaddr_in6) &&
		    usin->sin6_scope_id) {
			/* If interface is set while binding, indices
			 * must coincide.
			 */
204
			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213 214 215
				return -EINVAL;

			sk->sk_bound_dev_if = usin->sin6_scope_id;
		}

		/* Connect to link-local address requires an interface */
		if (!sk->sk_bound_dev_if)
			return -EINVAL;
	}

	if (tp->rx_opt.ts_recent_stamp &&
216
	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
L
Linus Torvalds 已提交
217 218
		tp->rx_opt.ts_recent = 0;
		tp->rx_opt.ts_recent_stamp = 0;
219
		WRITE_ONCE(tp->write_seq, 0);
L
Linus Torvalds 已提交
220 221
	}

222
	sk->sk_v6_daddr = usin->sin6_addr;
223
	np->flow_label = fl6.flowlabel;
L
Linus Torvalds 已提交
224 225 226 227 228

	/*
	 *	TCP over IPv4
	 */

229
	if (addr_type & IPV6_ADDR_MAPPED) {
230
		u32 exthdrlen = icsk->icsk_ext_hdr_len;
L
Linus Torvalds 已提交
231 232
		struct sockaddr_in sin;

233
		if (ipv6_only_sock(sk))
L
Linus Torvalds 已提交
234 235 236 237 238 239
			return -ENETUNREACH;

		sin.sin_family = AF_INET;
		sin.sin_port = usin->sin6_port;
		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];

240
		icsk->icsk_af_ops = &ipv6_mapped;
241
		if (sk_is_mptcp(sk))
242
			mptcpv6_handle_mapped(sk, true);
L
Linus Torvalds 已提交
243
		sk->sk_backlog_rcv = tcp_v4_do_rcv;
244 245 246
#ifdef CONFIG_TCP_MD5SIG
		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
L
Linus Torvalds 已提交
247 248 249 250

		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));

		if (err) {
251 252
			icsk->icsk_ext_hdr_len = exthdrlen;
			icsk->icsk_af_ops = &ipv6_specific;
253
			if (sk_is_mptcp(sk))
254
				mptcpv6_handle_mapped(sk, false);
L
Linus Torvalds 已提交
255
			sk->sk_backlog_rcv = tcp_v6_do_rcv;
256 257 258
#ifdef CONFIG_TCP_MD5SIG
			tp->af_specific = &tcp_sock_ipv6_specific;
#endif
L
Linus Torvalds 已提交
259 260
			goto failure;
		}
261
		np->saddr = sk->sk_v6_rcv_saddr;
L
Linus Torvalds 已提交
262 263 264 265

		return err;
	}

266 267
	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
		saddr = &sk->sk_v6_rcv_saddr;
L
Linus Torvalds 已提交
268

269
	fl6.flowi6_proto = IPPROTO_TCP;
270
	fl6.daddr = sk->sk_v6_daddr;
A
Alexey Dobriyan 已提交
271
	fl6.saddr = saddr ? *saddr : np->saddr;
272 273
	fl6.flowi6_oif = sk->sk_bound_dev_if;
	fl6.flowi6_mark = sk->sk_mark;
274 275
	fl6.fl6_dport = usin->sin6_port;
	fl6.fl6_sport = inet->inet_sport;
276
	fl6.flowi6_uid = sk->sk_uid;
L
Linus Torvalds 已提交
277

278
	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
279
	final_p = fl6_update_dst(&fl6, opt, &final);
L
Linus Torvalds 已提交
280

281
	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
V
Venkat Yekkirala 已提交
282

283
	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
284 285
	if (IS_ERR(dst)) {
		err = PTR_ERR(dst);
L
Linus Torvalds 已提交
286
		goto failure;
287
	}
L
Linus Torvalds 已提交
288

289
	if (!saddr) {
290 291 292 293 294 295 296 297 298
		struct inet_bind_hashbucket *prev_addr_hashbucket = NULL;
		struct in6_addr prev_v6_rcv_saddr;

		if (icsk->icsk_bind2_hash) {
			prev_addr_hashbucket = inet_bhashfn_portaddr(&tcp_hashinfo,
								     sk, sock_net(sk),
								     inet->inet_num);
			prev_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
		}
299
		saddr = &fl6.saddr;
300
		sk->sk_v6_rcv_saddr = *saddr;
301 302 303 304 305 306 307 308

		if (prev_addr_hashbucket) {
			err = inet_bhash2_update_saddr(prev_addr_hashbucket, sk);
			if (err) {
				sk->sk_v6_rcv_saddr = prev_v6_rcv_saddr;
				goto failure;
			}
		}
L
Linus Torvalds 已提交
309 310 311
	}

	/* set the source address */
A
Alexey Dobriyan 已提交
312
	np->saddr = *saddr;
E
Eric Dumazet 已提交
313
	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
L
Linus Torvalds 已提交
314

H
Herbert Xu 已提交
315
	sk->sk_gso_type = SKB_GSO_TCPV6;
E
Eric Dumazet 已提交
316
	ip6_dst_store(sk, dst, NULL, NULL);
L
Linus Torvalds 已提交
317

318
	icsk->icsk_ext_hdr_len = 0;
319 320 321
	if (opt)
		icsk->icsk_ext_hdr_len = opt->opt_flen +
					 opt->opt_nflen;
L
Linus Torvalds 已提交
322 323 324

	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);

E
Eric Dumazet 已提交
325
	inet->inet_dport = usin->sin6_port;
L
Linus Torvalds 已提交
326 327

	tcp_set_state(sk, TCP_SYN_SENT);
328
	tcp_death_row = sock_net(sk)->ipv4.tcp_death_row;
329
	err = inet6_hash_connect(tcp_death_row, sk);
L
Linus Torvalds 已提交
330 331 332
	if (err)
		goto late_failure;

333
	sk_set_txhash(sk);
334

335 336
	if (likely(!tp->repair)) {
		if (!tp->write_seq)
337 338 339 340 341
			WRITE_ONCE(tp->write_seq,
				   secure_tcpv6_seq(np->saddr.s6_addr32,
						    sk->sk_v6_daddr.s6_addr32,
						    inet->inet_sport,
						    inet->inet_dport));
342 343
		tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
						   np->saddr.s6_addr32,
344
						   sk->sk_v6_daddr.s6_addr32);
345
	}
L
Linus Torvalds 已提交
346

W
Wei Wang 已提交
347 348 349 350 351
	if (tcp_fastopen_defer_connect(sk, &err))
		return err;
	if (err)
		goto late_failure;

L
Linus Torvalds 已提交
352 353 354 355 356 357 358 359 360
	err = tcp_connect(sk);
	if (err)
		goto late_failure;

	return 0;

late_failure:
	tcp_set_state(sk, TCP_CLOSE);
failure:
E
Eric Dumazet 已提交
361
	inet->inet_dport = 0;
L
Linus Torvalds 已提交
362 363 364 365
	sk->sk_route_caps = 0;
	return err;
}

366 367 368
static void tcp_v6_mtu_reduced(struct sock *sk)
{
	struct dst_entry *dst;
369
	u32 mtu;
370 371 372 373

	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
		return;

374 375 376 377 378 379 380 381 382
	mtu = READ_ONCE(tcp_sk(sk)->mtu_info);

	/* Drop requests trying to increase our current mss.
	 * Check done in __ip6_rt_update_pmtu() is too late.
	 */
	if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
		return;

	dst = inet6_csk_update_pmtu(sk, mtu);
383 384 385 386 387 388 389 390 391
	if (!dst)
		return;

	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
		tcp_sync_mss(sk, dst_mtu(dst));
		tcp_simple_retransmit(sk);
	}
}

392
static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
393
		u8 type, u8 code, int offset, __be32 info)
L
Linus Torvalds 已提交
394
{
W
Weilong Chen 已提交
395
	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
396
	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
397 398
	struct net *net = dev_net(skb->dev);
	struct request_sock *fastopen;
L
Linus Torvalds 已提交
399
	struct ipv6_pinfo *np;
400
	struct tcp_sock *tp;
401
	__u32 seq, snd_una;
402
	struct sock *sk;
403
	bool fatal;
404
	int err;
L
Linus Torvalds 已提交
405

406 407 408
	sk = __inet6_lookup_established(net, &tcp_hashinfo,
					&hdr->daddr, th->dest,
					&hdr->saddr, ntohs(th->source),
409
					skb->dev->ifindex, inet6_sdif(skb));
L
Linus Torvalds 已提交
410

411
	if (!sk) {
E
Eric Dumazet 已提交
412 413
		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
				  ICMP6_MIB_INERRORS);
414
		return -ENOENT;
L
Linus Torvalds 已提交
415 416 417
	}

	if (sk->sk_state == TCP_TIME_WAIT) {
418
		inet_twsk_put(inet_twsk(sk));
419
		return 0;
L
Linus Torvalds 已提交
420
	}
421
	seq = ntohl(th->seq);
422
	fatal = icmpv6_err_convert(type, code, &err);
423 424 425 426
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		tcp_req_err(sk, seq, fatal);
		return 0;
	}
L
Linus Torvalds 已提交
427 428

	bh_lock_sock(sk);
429
	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
430
		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
L
Linus Torvalds 已提交
431 432 433 434

	if (sk->sk_state == TCP_CLOSE)
		goto out;

435 436 437 438 439 440
	if (static_branch_unlikely(&ip6_min_hopcount)) {
		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
		if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
			goto out;
		}
441 442
	}

L
Linus Torvalds 已提交
443
	tp = tcp_sk(sk);
444
	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
445
	fastopen = rcu_dereference(tp->fastopen_rsk);
446
	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
L
Linus Torvalds 已提交
447
	if (sk->sk_state != TCP_LISTEN &&
448
	    !between(seq, snd_una, tp->snd_nxt)) {
449
		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
L
Linus Torvalds 已提交
450 451 452
		goto out;
	}

E
Eric Dumazet 已提交
453
	np = tcp_inet6_sk(sk);
L
Linus Torvalds 已提交
454

455
	if (type == NDISC_REDIRECT) {
456 457
		if (!sock_owned_by_user(sk)) {
			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
458

459 460 461
			if (dst)
				dst->ops->redirect(dst, sk, skb);
		}
462
		goto out;
463 464
	}

L
Linus Torvalds 已提交
465
	if (type == ICMPV6_PKT_TOOBIG) {
466 467
		u32 mtu = ntohl(info);

468 469 470 471 472 473 474
		/* We are not interested in TCP_LISTEN and open_requests
		 * (SYN-ACKs send out by Linux are always <576bytes so
		 * they should go through unfragmented).
		 */
		if (sk->sk_state == TCP_LISTEN)
			goto out;

475 476 477
		if (!ip6_sk_accept_pmtu(sk))
			goto out;

478 479 480 481 482
		if (mtu < IPV6_MIN_MTU)
			goto out;

		WRITE_ONCE(tp->mtu_info, mtu);

483 484
		if (!sock_owned_by_user(sk))
			tcp_v6_mtu_reduced(sk);
485
		else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
486
					   &sk->sk_tsq_flags))
487
			sock_hold(sk);
L
Linus Torvalds 已提交
488 489 490 491
		goto out;
	}


492
	/* Might be for an request_sock */
L
Linus Torvalds 已提交
493 494
	switch (sk->sk_state) {
	case TCP_SYN_SENT:
495 496
	case TCP_SYN_RECV:
		/* Only in fast or simultaneous open. If a fast open socket is
497
		 * already accepted it is treated as a connected one below.
498
		 */
499
		if (fastopen && !fastopen->sk)
500 501
			break;

502 503
		ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);

L
Linus Torvalds 已提交
504 505
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;
506
			sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
L
Linus Torvalds 已提交
507 508 509 510 511

			tcp_done(sk);
		} else
			sk->sk_err_soft = err;
		goto out;
512 513 514 515 516 517 518 519 520
	case TCP_LISTEN:
		break;
	default:
		/* check if this ICMP message allows revert of backoff.
		 * (see RFC 6069)
		 */
		if (!fastopen && type == ICMPV6_DEST_UNREACH &&
		    code == ICMPV6_NOROUTE)
			tcp_ld_RTO_revert(sk, seq);
L
Linus Torvalds 已提交
521 522 523 524
	}

	if (!sock_owned_by_user(sk) && np->recverr) {
		sk->sk_err = err;
525
		sk_error_report(sk);
L
Linus Torvalds 已提交
526 527 528 529 530 531
	} else
		sk->sk_err_soft = err;

out:
	bh_unlock_sock(sk);
	sock_put(sk);
532
	return 0;
L
Linus Torvalds 已提交
533 534 535
}


536
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
537
			      struct flowi *fl,
538
			      struct request_sock *req,
539
			      struct tcp_fastopen_cookie *foc,
540 541
			      enum tcp_synack_type synack_type,
			      struct sk_buff *syn_skb)
L
Linus Torvalds 已提交
542
{
543
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
544
	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
545
	struct ipv6_txoptions *opt;
546
	struct flowi6 *fl6 = &fl->u.ip6;
W
Weilong Chen 已提交
547
	struct sk_buff *skb;
548
	int err = -ENOMEM;
549
	u8 tclass;
L
Linus Torvalds 已提交
550

551
	/* First, grab a route. */
552 553
	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
					       IPPROTO_TCP)) == NULL)
554
		goto done;
555

556
	skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
557

L
Linus Torvalds 已提交
558
	if (skb) {
559 560
		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
				    &ireq->ir_v6_rmt_addr);
L
Linus Torvalds 已提交
561

562
		fl6->daddr = ireq->ir_v6_rmt_addr;
563
		if (np->repflow && ireq->pktopts)
564 565
			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));

566
		tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
W
Wei Wang 已提交
567 568
				(tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
				(np->tclass & INET_ECN_MASK) :
569
				np->tclass;
570 571 572 573 574 575 576

		if (!INET_ECN_is_capable(tclass) &&
		    tcp_bpf_ca_needs_ecn((struct sock *)req))
			tclass |= INET_ECN_ECT_0;

		rcu_read_lock();
		opt = ireq->ipv6_opt;
577 578
		if (!opt)
			opt = rcu_dereference(np->opt);
579
		err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
580
			       tclass, sk->sk_priority);
581
		rcu_read_unlock();
582
		err = net_xmit_eval(err);
L
Linus Torvalds 已提交
583 584 585 586 587 588
	}

done:
	return err;
}

589

590
static void tcp_v6_reqsk_destructor(struct request_sock *req)
L
Linus Torvalds 已提交
591
{
592
	kfree(inet_rsk(req)->ipv6_opt);
593
	consume_skb(inet_rsk(req)->pktopts);
L
Linus Torvalds 已提交
594 595
}

596
#ifdef CONFIG_TCP_MD5SIG
597
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
598 599
						   const struct in6_addr *addr,
						   int l3index)
600
{
601 602
	return tcp_md5_do_lookup(sk, l3index,
				 (union tcp_md5_addr *)addr, AF_INET6);
603 604
}

605
static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
606
						const struct sock *addr_sk)
607
{
608 609 610 611 612 613
	int l3index;

	l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
						 addr_sk->sk_bound_dev_if);
	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
				    l3index);
614 615
}

616
static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
617
				 sockptr_t optval, int optlen)
618 619 620
{
	struct tcp_md5sig cmd;
	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
621
	int l3index = 0;
622
	u8 prefixlen;
623
	u8 flags;
624 625 626 627

	if (optlen < sizeof(cmd))
		return -EINVAL;

628
	if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
629 630 631 632 633
		return -EFAULT;

	if (sin6->sin6_family != AF_INET6)
		return -EINVAL;

634 635
	flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;

636 637 638 639 640 641 642 643 644 645
	if (optname == TCP_MD5SIG_EXT &&
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
		prefixlen = cmd.tcpm_prefixlen;
		if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
					prefixlen > 32))
			return -EINVAL;
	} else {
		prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
	}

646
	if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	    cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
		struct net_device *dev;

		rcu_read_lock();
		dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
		if (dev && netif_is_l3_master(dev))
			l3index = dev->ifindex;
		rcu_read_unlock();

		/* ok to reference set/not set outside of rcu;
		 * right now device MUST be an L3 master
		 */
		if (!dev || !l3index)
			return -EINVAL;
	}

663
	if (!cmd.tcpm_keylen) {
B
Brian Haley 已提交
664
		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
E
Eric Dumazet 已提交
665
			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
666
					      AF_INET, prefixlen,
667
					      l3index, flags);
E
Eric Dumazet 已提交
668
		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
669
				      AF_INET6, prefixlen, l3index, flags);
670 671 672 673 674
	}

	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
		return -EINVAL;

E
Eric Dumazet 已提交
675 676
	if (ipv6_addr_v4mapped(&sin6->sin6_addr))
		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
677
				      AF_INET, prefixlen, l3index, flags,
678 679
				      cmd.tcpm_key, cmd.tcpm_keylen,
				      GFP_KERNEL);
680

E
Eric Dumazet 已提交
681
	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
682
			      AF_INET6, prefixlen, l3index, flags,
683
			      cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
684 685
}

686 687 688 689
static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
				   const struct in6_addr *daddr,
				   const struct in6_addr *saddr,
				   const struct tcphdr *th, int nbytes)
690 691
{
	struct tcp6_pseudohdr *bp;
692
	struct scatterlist sg;
693
	struct tcphdr *_th;
694

695
	bp = hp->scratch;
696
	/* 1. TCP pseudo-header (RFC2460) */
A
Alexey Dobriyan 已提交
697 698
	bp->saddr = *saddr;
	bp->daddr = *daddr;
699
	bp->protocol = cpu_to_be32(IPPROTO_TCP);
A
Adam Langley 已提交
700
	bp->len = cpu_to_be32(nbytes);
701

702 703 704 705 706 707 708
	_th = (struct tcphdr *)(bp + 1);
	memcpy(_th, th, sizeof(*th));
	_th->check = 0;

	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
				sizeof(*bp) + sizeof(*th));
H
Herbert Xu 已提交
709
	return crypto_ahash_update(hp->md5_req);
710
}
711

712
static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
713
			       const struct in6_addr *daddr, struct in6_addr *saddr,
E
Eric Dumazet 已提交
714
			       const struct tcphdr *th)
715 716
{
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
717
	struct ahash_request *req;
718 719 720 721

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
722
	req = hp->md5_req;
723

H
Herbert Xu 已提交
724
	if (crypto_ahash_init(req))
725
		goto clear_hash;
726
	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
727 728 729
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
730 731
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
732 733 734 735
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;
736

737 738 739 740
clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
741
	return 1;
742 743
}

744 745
static int tcp_v6_md5_hash_skb(char *md5_hash,
			       const struct tcp_md5sig_key *key,
E
Eric Dumazet 已提交
746 747
			       const struct sock *sk,
			       const struct sk_buff *skb)
748
{
749
	const struct in6_addr *saddr, *daddr;
750
	struct tcp_md5sig_pool *hp;
H
Herbert Xu 已提交
751
	struct ahash_request *req;
E
Eric Dumazet 已提交
752
	const struct tcphdr *th = tcp_hdr(skb);
753

754 755
	if (sk) { /* valid for establish/request sockets */
		saddr = &sk->sk_v6_rcv_saddr;
756
		daddr = &sk->sk_v6_daddr;
757
	} else {
758
		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
759 760
		saddr = &ip6h->saddr;
		daddr = &ip6h->daddr;
761
	}
762 763 764 765

	hp = tcp_get_md5sig_pool();
	if (!hp)
		goto clear_hash_noput;
H
Herbert Xu 已提交
766
	req = hp->md5_req;
767

H
Herbert Xu 已提交
768
	if (crypto_ahash_init(req))
769 770
		goto clear_hash;

771
	if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
772 773 774 775 776
		goto clear_hash;
	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
		goto clear_hash;
	if (tcp_md5_hash_key(hp, key))
		goto clear_hash;
H
Herbert Xu 已提交
777 778
	ahash_request_set_crypt(req, NULL, md5_hash, 0);
	if (crypto_ahash_final(req))
779 780 781 782 783 784 785 786 787 788
		goto clear_hash;

	tcp_put_md5sig_pool();
	return 0;

clear_hash:
	tcp_put_md5sig_pool();
clear_hash_noput:
	memset(md5_hash, 0, 16);
	return 1;
789 790
}

791 792
#endif

793 794
static void tcp_v6_init_req(struct request_sock *req,
			    const struct sock *sk_listener,
795 796
			    struct sk_buff *skb)
{
797
	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
798
	struct inet_request_sock *ireq = inet_rsk(req);
E
Eric Dumazet 已提交
799
	const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
800 801 802 803 804

	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;

	/* So that link locals have meaning */
805
	if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
806
	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
E
Eric Dumazet 已提交
807
		ireq->ir_iif = tcp_v6_iif(skb);
808

809
	if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
810
	    (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
811
	     np->rxopt.bits.rxinfo ||
812 813
	     np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
	     np->rxopt.bits.rxohlim || np->repflow)) {
814
		refcount_inc(&skb->users);
815 816 817 818
		ireq->pktopts = skb;
	}
}

819
static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
820
					  struct sk_buff *skb,
821
					  struct flowi *fl,
822
					  struct request_sock *req)
823
{
824 825 826 827 828
	tcp_v6_init_req(req, sk, skb);

	if (security_inet_conn_request(sk, skb, req))
		return NULL;

829
	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
830 831
}

832
struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
L
Linus Torvalds 已提交
833
	.family		=	AF_INET6,
834
	.obj_size	=	sizeof(struct tcp6_request_sock),
835
	.rtx_syn_ack	=	tcp_rtx_synack,
836 837
	.send_ack	=	tcp_v6_reqsk_send_ack,
	.destructor	=	tcp_v6_reqsk_destructor,
838
	.send_reset	=	tcp_v6_send_reset,
W
Wang Yufen 已提交
839
	.syn_ack_timeout =	tcp_syn_ack_timeout,
L
Linus Torvalds 已提交
840 841
};

842
const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
843 844
	.mss_clamp	=	IPV6_MIN_MTU - sizeof(struct tcphdr) -
				sizeof(struct ipv6hdr),
845
#ifdef CONFIG_TCP_MD5SIG
846
	.req_md5_lookup	=	tcp_v6_md5_lookup,
847
	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
848
#endif
849 850 851
#ifdef CONFIG_SYN_COOKIES
	.cookie_init_seq =	cookie_v6_init_sequence,
#endif
852
	.route_req	=	tcp_v6_route_req,
853 854
	.init_seq	=	tcp_v6_init_seq,
	.init_ts_off	=	tcp_v6_init_ts_off,
855
	.send_synack	=	tcp_v6_send_synack,
856
};
857

858
static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
859 860
				 u32 ack, u32 win, u32 tsval, u32 tsecr,
				 int oif, struct tcp_md5sig_key *key, int rst,
861
				 u8 tclass, __be32 label, u32 priority, u32 txhash)
L
Linus Torvalds 已提交
862
{
863 864
	const struct tcphdr *th = tcp_hdr(skb);
	struct tcphdr *t1;
L
Linus Torvalds 已提交
865
	struct sk_buff *buff;
866
	struct flowi6 fl6;
867
	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
868
	struct sock *ctl_sk = net->ipv6.tcp_sk;
869
	unsigned int tot_len = sizeof(struct tcphdr);
870
	__be32 mrst = 0, *topt;
E
Eric Dumazet 已提交
871
	struct dst_entry *dst;
J
Jon Maxwell 已提交
872
	__u32 mark = 0;
L
Linus Torvalds 已提交
873

874
	if (tsecr)
875
		tot_len += TCPOLEN_TSTAMP_ALIGNED;
876 877 878 879 880
#ifdef CONFIG_TCP_MD5SIG
	if (key)
		tot_len += TCPOLEN_MD5SIG_ALIGNED;
#endif

881 882 883 884 885 886 887 888 889
#ifdef CONFIG_MPTCP
	if (rst && !key) {
		mrst = mptcp_reset_option(skb);

		if (mrst)
			tot_len += sizeof(__be32);
	}
#endif

890
	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
891
	if (!buff)
892
		return;
L
Linus Torvalds 已提交
893

894
	skb_reserve(buff, MAX_TCP_HEADER);
L
Linus Torvalds 已提交
895

896
	t1 = skb_push(buff, tot_len);
897
	skb_reset_transport_header(buff);
L
Linus Torvalds 已提交
898 899 900 901 902

	/* Swap the send and the receive. */
	memset(t1, 0, sizeof(*t1));
	t1->dest = th->source;
	t1->source = th->dest;
903
	t1->doff = tot_len / 4;
904 905 906 907 908
	t1->seq = htonl(seq);
	t1->ack_seq = htonl(ack);
	t1->ack = !rst || !th->ack;
	t1->rst = rst;
	t1->window = htons(win);
L
Linus Torvalds 已提交
909

910 911
	topt = (__be32 *)(t1 + 1);

912
	if (tsecr) {
913 914
		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
915 916
		*topt++ = htonl(tsval);
		*topt++ = htonl(tsecr);
917 918
	}

919 920 921
	if (mrst)
		*topt++ = mrst;

922 923
#ifdef CONFIG_TCP_MD5SIG
	if (key) {
924 925 926
		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
927 928
				    &ipv6_hdr(skb)->saddr,
				    &ipv6_hdr(skb)->daddr, t1);
929 930 931
	}
#endif

932
	memset(&fl6, 0, sizeof(fl6));
A
Alexey Dobriyan 已提交
933 934
	fl6.daddr = ipv6_hdr(skb)->saddr;
	fl6.saddr = ipv6_hdr(skb)->daddr;
935
	fl6.flowlabel = label;
L
Linus Torvalds 已提交
936

937 938
	buff->ip_summed = CHECKSUM_PARTIAL;

939
	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
L
Linus Torvalds 已提交
940

941
	fl6.flowi6_proto = IPPROTO_TCP;
942
	if (rt6_need_strict(&fl6.daddr) && !oif)
E
Eric Dumazet 已提交
943
		fl6.flowi6_oif = tcp_v6_iif(skb);
944 945 946 947 948 949
	else {
		if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
			oif = skb->skb_iif;

		fl6.flowi6_oif = oif;
	}
950

951
	if (sk) {
952
		if (sk->sk_state == TCP_TIME_WAIT)
953
			mark = inet_twsk(sk)->tw_mark;
954
		else
955
			mark = sk->sk_mark;
956
		skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
957
	}
958 959 960 961
	if (txhash) {
		/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
		skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4);
	}
J
Jon Maxwell 已提交
962
	fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
963 964
	fl6.fl6_dport = t1->dest;
	fl6.fl6_sport = t1->source;
965
	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
966
	security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
L
Linus Torvalds 已提交
967

968 969 970 971
	/* Pass a socket to ip6_dst_lookup either it is for RST
	 * Underlying function will use this to retrieve the network
	 * namespace
	 */
972 973 974 975
	if (sk && sk->sk_state != TCP_TIME_WAIT)
		dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/
	else
		dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL);
976 977
	if (!IS_ERR(dst)) {
		skb_dst_set(buff, dst);
978 979
		ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
			 tclass & ~INET_ECN_MASK, priority);
980
		TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
981
		if (rst)
982
			TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
983
		return;
L
Linus Torvalds 已提交
984 985 986 987 988
	}

	kfree_skb(buff);
}

989
static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
990
{
991
	const struct tcphdr *th = tcp_hdr(skb);
992
	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
993
	u32 seq = 0, ack_seq = 0;
994
	struct tcp_md5sig_key *key = NULL;
995 996 997 998 999 1000
#ifdef CONFIG_TCP_MD5SIG
	const __u8 *hash_location = NULL;
	unsigned char newhash[16];
	int genhash;
	struct sock *sk1 = NULL;
#endif
1001
	__be32 label = 0;
1002
	u32 priority = 0;
1003
	struct net *net;
1004
	int oif = 0;
L
Linus Torvalds 已提交
1005

1006
	if (th->rst)
L
Linus Torvalds 已提交
1007 1008
		return;

1009 1010 1011 1012
	/* If sk not NULL, it means we did a successful lookup and incoming
	 * route had to be correct. prequeue might have dropped our dst.
	 */
	if (!sk && !ipv6_unicast_destination(skb))
1013
		return;
L
Linus Torvalds 已提交
1014

1015
	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
1016
#ifdef CONFIG_TCP_MD5SIG
1017
	rcu_read_lock();
1018
	hash_location = tcp_parse_md5sig_option(th);
1019
	if (sk && sk_fullsock(sk)) {
1020 1021 1022 1023 1024 1025 1026
		int l3index;

		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and inet_iif is set to it.
		 */
		l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
1027
	} else if (hash_location) {
1028 1029
		int dif = tcp_v6_iif_l3_slave(skb);
		int sdif = tcp_v6_sdif(skb);
1030
		int l3index;
1031

1032 1033 1034 1035 1036 1037 1038
		/*
		 * active side is lost. Try to find listening socket through
		 * source port, and then find md5 key through listening socket.
		 * we are not loose security here:
		 * Incoming packet is checked with md5 hash with finding key,
		 * no RST generated if md5 hash doesn't match.
		 */
1039
		sk1 = inet6_lookup_listener(net,
1040 1041
					   &tcp_hashinfo, NULL, 0,
					   &ipv6h->saddr,
1042
					   th->source, &ipv6h->daddr,
1043
					   ntohs(th->source), dif, sdif);
1044
		if (!sk1)
1045
			goto out;
1046

1047 1048 1049 1050 1051 1052
		/* sdif set, means packet ingressed via a device
		 * in an L3 domain and dif is set to it.
		 */
		l3index = tcp_v6_sdif(skb) ? dif : 0;

		key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
1053
		if (!key)
1054
			goto out;
1055

1056
		genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
1057
		if (genhash || memcmp(hash_location, newhash, 16) != 0)
1058
			goto out;
1059
	}
1060 1061
#endif

1062 1063 1064 1065 1066
	if (th->ack)
		seq = ntohl(th->ack_seq);
	else
		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
			  (th->doff << 2);
L
Linus Torvalds 已提交
1067

1068 1069
	if (sk) {
		oif = sk->sk_bound_dev_if;
1070 1071 1072
		if (sk_fullsock(sk)) {
			const struct ipv6_pinfo *np = tcp_inet6_sk(sk);

1073
			trace_tcp_send_reset(sk, skb);
1074 1075
			if (np->repflow)
				label = ip6_flowlabel(ipv6h);
1076
			priority = sk->sk_priority;
1077
		}
1078
		if (sk->sk_state == TCP_TIME_WAIT) {
1079
			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
1080 1081
			priority = inet_twsk(sk)->tw_priority;
		}
1082
	} else {
1083
		if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
1084
			label = ip6_flowlabel(ipv6h);
1085 1086
	}

1087
	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1088
			     ipv6_get_dsfield(ipv6h), label, priority, 0);
1089 1090

#ifdef CONFIG_TCP_MD5SIG
1091 1092
out:
	rcu_read_unlock();
1093
#endif
1094
}
L
Linus Torvalds 已提交
1095

1096
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
1097
			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
1098
			    struct tcp_md5sig_key *key, u8 tclass,
1099
			    __be32 label, u32 priority, u32 txhash)
1100
{
1101
	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
1102
			     tclass, label, priority, txhash);
L
Linus Torvalds 已提交
1103 1104 1105 1106
}

static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
1107
	struct inet_timewait_sock *tw = inet_twsk(sk);
1108
	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
L
Linus Torvalds 已提交
1109

1110
	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1111
			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1112
			tcp_time_stamp_raw() + tcptw->tw_ts_offset,
W
Wang Yufen 已提交
1113
			tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
1114 1115
			tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
			tw->tw_txhash);
L
Linus Torvalds 已提交
1116

1117
	inet_twsk_put(tw);
L
Linus Torvalds 已提交
1118 1119
}

1120
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1121
				  struct request_sock *req)
L
Linus Torvalds 已提交
1122
{
1123 1124 1125 1126
	int l3index;

	l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;

1127 1128 1129
	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
	 */
1130 1131 1132 1133 1134
	/* RFC 7323 2.3
	 * The window field (SEG.WND) of every outgoing segment, with the
	 * exception of <SYN> segments, MUST be right-shifted by
	 * Rcv.Wind.Shift bits:
	 */
1135
	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
1136
			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
1137 1138
			tcp_rsk(req)->rcv_nxt,
			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1139
			tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
1140
			req->ts_recent, sk->sk_bound_dev_if,
1141
			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
1142 1143
			ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority,
			tcp_rsk(req)->txhash);
L
Linus Torvalds 已提交
1144 1145 1146
}


1147
static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1148
{
1149
#ifdef CONFIG_SYN_COOKIES
1150
	const struct tcphdr *th = tcp_hdr(skb);
L
Linus Torvalds 已提交
1151

1152
	if (!th->syn)
1153
		sk = cookie_v6_check(sk, skb);
L
Linus Torvalds 已提交
1154 1155 1156 1157
#endif
	return sk;
}

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
			 struct tcphdr *th, u32 *cookie)
{
	u16 mss = 0;
#ifdef CONFIG_SYN_COOKIES
	mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
				    &tcp_request_sock_ipv6_ops, sk, th);
	if (mss) {
		*cookie = __cookie_v6_init_sequence(iph, th, &mss);
		tcp_synq_overflow(sk);
	}
#endif
	return mss;
}

L
Linus Torvalds 已提交
1173 1174 1175 1176 1177 1178
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
	if (skb->protocol == htons(ETH_P_IP))
		return tcp_v4_conn_request(sk, skb);

	if (!ipv6_unicast_destination(skb))
1179
		goto drop;
L
Linus Torvalds 已提交
1180

1181 1182 1183 1184 1185
	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
		return 0;
	}

O
Octavian Purdila 已提交
1186 1187
	return tcp_conn_request(&tcp6_request_sock_ops,
				&tcp_request_sock_ipv6_ops, sk, skb);
L
Linus Torvalds 已提交
1188 1189

drop:
1190
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1191 1192 1193
	return 0; /* don't send reset */
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
static void tcp_v6_restore_cb(struct sk_buff *skb)
{
	/* We need to move header back to the beginning if xfrm6_policy_check()
	 * and tcp_v6_fill_cb() are going to be called again.
	 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
	 */
	memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
		sizeof(struct inet6_skb_parm));
}

1204
static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
W
Weilong Chen 已提交
1205
					 struct request_sock *req,
1206 1207 1208
					 struct dst_entry *dst,
					 struct request_sock *req_unhash,
					 bool *own_req)
L
Linus Torvalds 已提交
1209
{
1210
	struct inet_request_sock *ireq;
1211
	struct ipv6_pinfo *newnp;
E
Eric Dumazet 已提交
1212
	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1213
	struct ipv6_txoptions *opt;
L
Linus Torvalds 已提交
1214
	struct inet_sock *newinet;
1215
	bool found_dup_sk = false;
L
Linus Torvalds 已提交
1216 1217
	struct tcp_sock *newtp;
	struct sock *newsk;
1218 1219
#ifdef CONFIG_TCP_MD5SIG
	struct tcp_md5sig_key *key;
1220
	int l3index;
1221
#endif
1222
	struct flowi6 fl6;
L
Linus Torvalds 已提交
1223 1224 1225 1226 1227 1228

	if (skb->protocol == htons(ETH_P_IP)) {
		/*
		 *	v6 mapped
		 */

1229 1230
		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
					     req_unhash, own_req);
L
Linus Torvalds 已提交
1231

1232
		if (!newsk)
L
Linus Torvalds 已提交
1233 1234
			return NULL;

E
Eric Dumazet 已提交
1235
		inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
L
Linus Torvalds 已提交
1236

E
Eric Dumazet 已提交
1237
		newnp = tcp_inet6_sk(newsk);
L
Linus Torvalds 已提交
1238 1239 1240 1241
		newtp = tcp_sk(newsk);

		memcpy(newnp, np, sizeof(struct ipv6_pinfo));

1242
		newnp->saddr = newsk->sk_v6_rcv_saddr;
L
Linus Torvalds 已提交
1243

1244
		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1245
		if (sk_is_mptcp(newsk))
1246
			mptcpv6_handle_mapped(newsk, true);
L
Linus Torvalds 已提交
1247
		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1248 1249 1250 1251
#ifdef CONFIG_TCP_MD5SIG
		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif

1252
		newnp->ipv6_mc_list = NULL;
1253 1254
		newnp->ipv6_ac_list = NULL;
		newnp->ipv6_fl_list = NULL;
L
Linus Torvalds 已提交
1255 1256
		newnp->pktoptions  = NULL;
		newnp->opt	   = NULL;
1257 1258 1259
		newnp->mcast_oif   = inet_iif(skb);
		newnp->mcast_hops  = ip_hdr(skb)->ttl;
		newnp->rcv_flowinfo = 0;
1260
		if (np->repflow)
1261
			newnp->flow_label = 0;
L
Linus Torvalds 已提交
1262

1263 1264 1265 1266
		/*
		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
		 * here, tcp_create_openreq_child now does this for us, see the comment in
		 * that function for the gory details. -acme
L
Linus Torvalds 已提交
1267 1268 1269
		 */

		/* It is tricky place. Until this moment IPv4 tcp
1270
		   worked with IPv6 icsk.icsk_af_ops.
L
Linus Torvalds 已提交
1271 1272
		   Sync it now.
		 */
1273
		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
L
Linus Torvalds 已提交
1274 1275 1276 1277

		return newsk;
	}

1278
	ireq = inet_rsk(req);
L
Linus Torvalds 已提交
1279 1280 1281 1282

	if (sk_acceptq_is_full(sk))
		goto out_overflow;

1283
	if (!dst) {
1284
		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1285
		if (!dst)
L
Linus Torvalds 已提交
1286
			goto out;
1287
	}
L
Linus Torvalds 已提交
1288 1289

	newsk = tcp_create_openreq_child(sk, req, skb);
1290
	if (!newsk)
1291
		goto out_nonewsk;
L
Linus Torvalds 已提交
1292

1293 1294 1295 1296 1297
	/*
	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
	 * count here, tcp_create_openreq_child now does this for us, see the
	 * comment in that function for the gory details. -acme
	 */
L
Linus Torvalds 已提交
1298

1299
	newsk->sk_gso_type = SKB_GSO_TCPV6;
E
Eric Dumazet 已提交
1300
	ip6_dst_store(newsk, dst, NULL, NULL);
1301
	inet6_sk_rx_dst_set(newsk, skb);
L
Linus Torvalds 已提交
1302

E
Eric Dumazet 已提交
1303
	inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
L
Linus Torvalds 已提交
1304 1305 1306

	newtp = tcp_sk(newsk);
	newinet = inet_sk(newsk);
E
Eric Dumazet 已提交
1307
	newnp = tcp_inet6_sk(newsk);
L
Linus Torvalds 已提交
1308 1309 1310

	memcpy(newnp, np, sizeof(struct ipv6_pinfo));

1311 1312 1313 1314
	newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
	newnp->saddr = ireq->ir_v6_loc_addr;
	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
	newsk->sk_bound_dev_if = ireq->ir_iif;
L
Linus Torvalds 已提交
1315

1316
	/* Now IPv6 options...
L
Linus Torvalds 已提交
1317 1318 1319

	   First: no IPv4 options.
	 */
1320
	newinet->inet_opt = NULL;
1321
	newnp->ipv6_mc_list = NULL;
1322
	newnp->ipv6_ac_list = NULL;
1323
	newnp->ipv6_fl_list = NULL;
L
Linus Torvalds 已提交
1324 1325 1326 1327 1328 1329

	/* Clone RX bits */
	newnp->rxopt.all = np->rxopt.all;

	newnp->pktoptions = NULL;
	newnp->opt	  = NULL;
E
Eric Dumazet 已提交
1330
	newnp->mcast_oif  = tcp_v6_iif(skb);
1331
	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1332
	newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1333 1334
	if (np->repflow)
		newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
L
Linus Torvalds 已提交
1335

W
Wei Wang 已提交
1336 1337 1338
	/* Set ToS of the new socket based upon the value of incoming SYN.
	 * ECT bits are set later in tcp_init_transfer().
	 */
1339
	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1340 1341
		newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;

L
Linus Torvalds 已提交
1342 1343 1344 1345 1346 1347
	/* Clone native IPv6 options from listening socket (if any)

	   Yes, keeping reference count would be much more clever,
	   but we make one more one thing there: reattach optmem
	   to newsk.
	 */
1348 1349 1350
	opt = ireq->ipv6_opt;
	if (!opt)
		opt = rcu_dereference(np->opt);
1351 1352 1353 1354
	if (opt) {
		opt = ipv6_dup_options(newsk, opt);
		RCU_INIT_POINTER(newnp->opt, opt);
	}
1355
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1356 1357 1358
	if (opt)
		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
						    opt->opt_flen;
L
Linus Torvalds 已提交
1359

1360 1361
	tcp_ca_openreq_child(newsk, dst);

L
Linus Torvalds 已提交
1362
	tcp_sync_mss(newsk, dst_mtu(dst));
E
Eric Dumazet 已提交
1363
	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1364

L
Linus Torvalds 已提交
1365 1366
	tcp_initialize_rcv_mss(newsk);

E
Eric Dumazet 已提交
1367 1368
	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
L
Linus Torvalds 已提交
1369

1370
#ifdef CONFIG_TCP_MD5SIG
1371 1372
	l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);

1373
	/* Copy over the MD5 key from the original socket */
1374
	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
1375
	if (key) {
1376 1377 1378 1379 1380
		/* We're using one, so create a matching key
		 * on the newsk structure. If we fail to get
		 * memory, then we end up not copying the key
		 * across. Shucks.
		 */
1381
		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1382
			       AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
1383
			       sk_gfp_mask(sk, GFP_ATOMIC));
1384 1385 1386
	}
#endif

1387
	if (__inet_inherit_port(sk, newsk) < 0) {
1388 1389
		inet_csk_prepare_forced_close(newsk);
		tcp_done(newsk);
1390 1391
		goto out;
	}
1392 1393
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
				       &found_dup_sk);
E
Eric Dumazet 已提交
1394
	if (*own_req) {
1395
		tcp_move_syn(newtp, req);
E
Eric Dumazet 已提交
1396 1397 1398 1399

		/* Clone pktoptions received with SYN, if we own the req */
		if (ireq->pktopts) {
			newnp->pktoptions = skb_clone(ireq->pktopts,
1400
						      sk_gfp_mask(sk, GFP_ATOMIC));
E
Eric Dumazet 已提交
1401 1402
			consume_skb(ireq->pktopts);
			ireq->pktopts = NULL;
1403 1404
			if (newnp->pktoptions) {
				tcp_v6_restore_cb(newnp->pktoptions);
E
Eric Dumazet 已提交
1405
				skb_set_owner_r(newnp->pktoptions, newsk);
1406
			}
E
Eric Dumazet 已提交
1407
		}
1408 1409 1410 1411 1412 1413 1414 1415 1416
	} else {
		if (!req_unhash && found_dup_sk) {
			/* This code path should only be executed in the
			 * syncookie case only
			 */
			bh_unlock_sock(newsk);
			sock_put(newsk);
			newsk = NULL;
		}
E
Eric Dumazet 已提交
1417
	}
L
Linus Torvalds 已提交
1418 1419 1420 1421

	return newsk;

out_overflow:
1422
	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1423
out_nonewsk:
L
Linus Torvalds 已提交
1424
	dst_release(dst);
1425
out:
1426
	tcp_listendrop(sk);
L
Linus Torvalds 已提交
1427 1428 1429
	return NULL;
}

1430 1431
INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
							   u32));
L
Linus Torvalds 已提交
1432
/* The socket must have it's spinlock held when we get
1433
 * here, unless it is a TCP_LISTEN socket.
L
Linus Torvalds 已提交
1434 1435 1436 1437 1438 1439
 *
 * We have a potential double-lock case here, so even when
 * doing backlog processing we use the BH locking scheme.
 * This is because we cannot sleep with the original spinlock
 * held.
 */
1440 1441
INDIRECT_CALLABLE_SCOPE
int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1442
{
E
Eric Dumazet 已提交
1443
	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
L
Linus Torvalds 已提交
1444
	struct sk_buff *opt_skb = NULL;
1445
	enum skb_drop_reason reason;
E
Eric Dumazet 已提交
1446
	struct tcp_sock *tp;
L
Linus Torvalds 已提交
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474

	/* Imagine: socket is IPv6. IPv4 packet arrives,
	   goes to IPv4 receive handler and backlogged.
	   From backlog it always goes here. Kerboom...
	   Fortunately, tcp_rcv_established and rcv_established
	   handle them correctly, but it is not case with
	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
	 */

	if (skb->protocol == htons(ETH_P_IP))
		return tcp_v4_do_rcv(sk, skb);

	/*
	 *	socket locking is here for SMP purposes as backlog rcv
	 *	is currently called with bh processing disabled.
	 */

	/* Do Stevens' IPV6_PKTOPTIONS.

	   Yes, guys, it is the only place in our code, where we
	   may make it not affecting IPv4.
	   The rest of code is protocol independent,
	   and I do not like idea to uglify IPv4.

	   Actually, all the idea behind IPV6_PKTOPTIONS
	   looks not very well thought. For now we latch
	   options, received in the last packet, enqueued
	   by tcp. Feel free to propose better solution.
1475
					       --ANK (980728)
L
Linus Torvalds 已提交
1476 1477
	 */
	if (np->rxopt.all)
1478
		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
L
Linus Torvalds 已提交
1479

1480
	reason = SKB_DROP_REASON_NOT_SPECIFIED;
L
Linus Torvalds 已提交
1481
	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1482 1483 1484 1485
		struct dst_entry *dst;

		dst = rcu_dereference_protected(sk->sk_rx_dst,
						lockdep_sock_is_held(sk));
E
Eric Dumazet 已提交
1486

1487
		sock_rps_save_rxhash(sk, skb);
1488
		sk_mark_napi_id(sk, skb);
E
Eric Dumazet 已提交
1489
		if (dst) {
1490
			if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1491
			    INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1492
					    dst, sk->sk_rx_dst_cookie) == NULL) {
1493
				RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
E
Eric Dumazet 已提交
1494 1495 1496 1497
				dst_release(dst);
			}
		}

1498
		tcp_rcv_established(sk, skb);
L
Linus Torvalds 已提交
1499 1500 1501 1502 1503
		if (opt_skb)
			goto ipv6_pktoptions;
		return 0;
	}

E
Eric Dumazet 已提交
1504
	if (tcp_checksum_complete(skb))
L
Linus Torvalds 已提交
1505 1506
		goto csum_err;

1507
	if (sk->sk_state == TCP_LISTEN) {
1508 1509
		struct sock *nsk = tcp_v6_cookie_check(sk, skb);

L
Linus Torvalds 已提交
1510 1511 1512
		if (!nsk)
			goto discard;

W
Weilong Chen 已提交
1513
		if (nsk != sk) {
L
Linus Torvalds 已提交
1514 1515 1516 1517 1518 1519
			if (tcp_child_process(sk, nsk, skb))
				goto reset;
			if (opt_skb)
				__kfree_skb(opt_skb);
			return 0;
		}
1520
	} else
1521
		sock_rps_save_rxhash(sk, skb);
L
Linus Torvalds 已提交
1522

1523
	if (tcp_rcv_state_process(sk, skb))
L
Linus Torvalds 已提交
1524 1525 1526 1527 1528 1529
		goto reset;
	if (opt_skb)
		goto ipv6_pktoptions;
	return 0;

reset:
1530
	tcp_v6_send_reset(sk, skb);
L
Linus Torvalds 已提交
1531 1532 1533
discard:
	if (opt_skb)
		__kfree_skb(opt_skb);
1534
	kfree_skb_reason(skb, reason);
L
Linus Torvalds 已提交
1535 1536
	return 0;
csum_err:
1537
	reason = SKB_DROP_REASON_TCP_CSUM;
1538
	trace_tcp_bad_csum(skb);
1539 1540
	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
	goto discard;


ipv6_pktoptions:
	/* Do you ask, what is it?

	   1. skb was enqueued by tcp.
	   2. skb is added to tail of read queue, rather than out of order.
	   3. socket is not in passive state.
	   4. Finally, it really contains options, which user wants to receive.
	 */
	tp = tcp_sk(sk);
	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1555
		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
E
Eric Dumazet 已提交
1556
			np->mcast_oif = tcp_v6_iif(opt_skb);
1557
		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1558
			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1559
		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1560
			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1561 1562
		if (np->repflow)
			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1563
		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
L
Linus Torvalds 已提交
1564
			skb_set_owner_r(opt_skb, sk);
1565
			tcp_v6_restore_cb(opt_skb);
L
Linus Torvalds 已提交
1566 1567 1568 1569 1570 1571 1572
			opt_skb = xchg(&np->pktoptions, opt_skb);
		} else {
			__kfree_skb(opt_skb);
			opt_skb = xchg(&np->pktoptions, NULL);
		}
	}

1573
	consume_skb(opt_skb);
L
Linus Torvalds 已提交
1574 1575 1576
	return 0;
}

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
			   const struct tcphdr *th)
{
	/* This is tricky: we move IP6CB at its correct location into
	 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
	 * _decode_session6() uses IP6CB().
	 * barrier() makes sure compiler won't play aliasing games.
	 */
	memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
		sizeof(struct inet6_skb_parm));
	barrier();

	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
				    skb->len - th->doff*4);
	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
	TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
	TCP_SKB_CB(skb)->sacked = 0;
1597 1598
	TCP_SKB_CB(skb)->has_rxtstamp =
			skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1599 1600
}

1601
INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
L
Linus Torvalds 已提交
1602
{
1603
	enum skb_drop_reason drop_reason;
1604
	int sdif = inet6_sdif(skb);
1605
	int dif = inet6_iif(skb);
1606
	const struct tcphdr *th;
1607
	const struct ipv6hdr *hdr;
1608
	bool refcounted;
L
Linus Torvalds 已提交
1609 1610
	struct sock *sk;
	int ret;
1611
	struct net *net = dev_net(skb->dev);
L
Linus Torvalds 已提交
1612

1613
	drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
L
Linus Torvalds 已提交
1614 1615 1616 1617 1618 1619
	if (skb->pkt_type != PACKET_HOST)
		goto discard_it;

	/*
	 *	Count it even if it's bad.
	 */
E
Eric Dumazet 已提交
1620
	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
L
Linus Torvalds 已提交
1621 1622 1623 1624

	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
		goto discard_it;

1625
	th = (const struct tcphdr *)skb->data;
L
Linus Torvalds 已提交
1626

1627 1628
	if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
		drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
L
Linus Torvalds 已提交
1629
		goto bad_packet;
1630
	}
L
Linus Torvalds 已提交
1631 1632 1633
	if (!pskb_may_pull(skb, th->doff*4))
		goto discard_it;

1634
	if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1635
		goto csum_error;
L
Linus Torvalds 已提交
1636

1637
	th = (const struct tcphdr *)skb->data;
1638
	hdr = ipv6_hdr(skb);
L
Linus Torvalds 已提交
1639

1640
lookup:
1641
	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1642
				th->source, th->dest, inet6_iif(skb), sdif,
1643
				&refcounted);
L
Linus Torvalds 已提交
1644 1645 1646 1647 1648 1649 1650
	if (!sk)
		goto no_tcp_socket;

process:
	if (sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;

1651 1652
	if (sk->sk_state == TCP_NEW_SYN_RECV) {
		struct request_sock *req = inet_reqsk(sk);
1653
		bool req_stolen = false;
1654
		struct sock *nsk;
1655 1656

		sk = req->rsk_listener;
1657 1658 1659 1660
		drop_reason = tcp_inbound_md5_hash(sk, skb,
						   &hdr->saddr, &hdr->daddr,
						   AF_INET6, dif, sdif);
		if (drop_reason) {
1661
			sk_drops_add(sk, skb);
1662 1663 1664
			reqsk_put(req);
			goto discard_it;
		}
1665 1666 1667 1668
		if (tcp_checksum_complete(skb)) {
			reqsk_put(req);
			goto csum_error;
		}
1669
		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
			nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
			if (!nsk) {
				inet_csk_reqsk_queue_drop_and_put(sk, req);
				goto lookup;
			}
			sk = nsk;
			/* reuseport_migrate_sock() has already held one sk_refcnt
			 * before returning.
			 */
		} else {
			sock_hold(sk);
1681
		}
1682
		refcounted = true;
E
Eric Dumazet 已提交
1683
		nsk = NULL;
1684 1685 1686 1687
		if (!tcp_filter(sk, skb)) {
			th = (const struct tcphdr *)skb->data;
			hdr = ipv6_hdr(skb);
			tcp_v6_fill_cb(skb, hdr, th);
1688
			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1689 1690
		} else {
			drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1691
		}
1692 1693
		if (!nsk) {
			reqsk_put(req);
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
			if (req_stolen) {
				/* Another cpu got exclusive access to req
				 * and created a full blown socket.
				 * Try to feed this packet to this socket
				 * instead of discarding it.
				 */
				tcp_v6_restore_cb(skb);
				sock_put(sk);
				goto lookup;
			}
1704
			goto discard_and_relse;
1705 1706 1707 1708 1709 1710
		}
		if (nsk == sk) {
			reqsk_put(req);
			tcp_v6_restore_cb(skb);
		} else if (tcp_child_process(sk, nsk, skb)) {
			tcp_v6_send_reset(nsk, skb);
1711
			goto discard_and_relse;
1712
		} else {
1713
			sock_put(sk);
1714 1715 1716
			return 0;
		}
	}
1717 1718 1719 1720 1721 1722 1723

	if (static_branch_unlikely(&ip6_min_hopcount)) {
		/* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
		if (hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
			__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
			goto discard_and_relse;
		}
1724 1725
	}

1726 1727
	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
L
Linus Torvalds 已提交
1728
		goto discard_and_relse;
1729
	}
L
Linus Torvalds 已提交
1730

1731 1732 1733
	drop_reason = tcp_inbound_md5_hash(sk, skb, &hdr->saddr, &hdr->daddr,
					   AF_INET6, dif, sdif);
	if (drop_reason)
1734 1735
		goto discard_and_relse;

1736 1737
	if (tcp_filter(sk, skb)) {
		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
L
Linus Torvalds 已提交
1738
		goto discard_and_relse;
1739
	}
1740 1741
	th = (const struct tcphdr *)skb->data;
	hdr = ipv6_hdr(skb);
1742
	tcp_v6_fill_cb(skb, hdr, th);
L
Linus Torvalds 已提交
1743 1744 1745

	skb->dev = NULL;

1746 1747 1748 1749 1750 1751 1752
	if (sk->sk_state == TCP_LISTEN) {
		ret = tcp_v6_do_rcv(sk, skb);
		goto put_and_return;
	}

	sk_incoming_cpu_update(sk);

1753
	bh_lock_sock_nested(sk);
1754
	tcp_segs_in(tcp_sk(sk), skb);
L
Linus Torvalds 已提交
1755 1756
	ret = 0;
	if (!sock_owned_by_user(sk)) {
F
Florian Westphal 已提交
1757
		ret = tcp_v6_do_rcv(sk, skb);
E
Eric Dumazet 已提交
1758
	} else {
1759
		if (tcp_add_backlog(sk, skb, &drop_reason))
E
Eric Dumazet 已提交
1760
			goto discard_and_relse;
Z
Zhu Yi 已提交
1761
	}
L
Linus Torvalds 已提交
1762
	bh_unlock_sock(sk);
1763
put_and_return:
1764 1765
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
1766 1767 1768
	return ret ? -1 : 0;

no_tcp_socket:
1769
	drop_reason = SKB_DROP_REASON_NO_SOCKET;
L
Linus Torvalds 已提交
1770 1771 1772
	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto discard_it;

1773 1774
	tcp_v6_fill_cb(skb, hdr, th);

E
Eric Dumazet 已提交
1775
	if (tcp_checksum_complete(skb)) {
1776
csum_error:
1777
		drop_reason = SKB_DROP_REASON_TCP_CSUM;
1778
		trace_tcp_bad_csum(skb);
E
Eric Dumazet 已提交
1779
		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
L
Linus Torvalds 已提交
1780
bad_packet:
E
Eric Dumazet 已提交
1781
		__TCP_INC_STATS(net, TCP_MIB_INERRS);
L
Linus Torvalds 已提交
1782
	} else {
1783
		tcp_v6_send_reset(NULL, skb);
L
Linus Torvalds 已提交
1784 1785 1786
	}

discard_it:
1787
	SKB_DR_OR(drop_reason, NOT_SPECIFIED);
1788
	kfree_skb_reason(skb, drop_reason);
L
Linus Torvalds 已提交
1789 1790 1791
	return 0;

discard_and_relse:
1792
	sk_drops_add(sk, skb);
1793 1794
	if (refcounted)
		sock_put(sk);
L
Linus Torvalds 已提交
1795 1796 1797 1798
	goto discard_it;

do_time_wait:
	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1799
		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
1800
		inet_twsk_put(inet_twsk(sk));
L
Linus Torvalds 已提交
1801 1802 1803
		goto discard_it;
	}

1804 1805
	tcp_v6_fill_cb(skb, hdr, th);

1806 1807 1808
	if (tcp_checksum_complete(skb)) {
		inet_twsk_put(inet_twsk(sk));
		goto csum_error;
L
Linus Torvalds 已提交
1809 1810
	}

1811
	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
L
Linus Torvalds 已提交
1812 1813 1814 1815
	case TCP_TW_SYN:
	{
		struct sock *sk2;

1816
		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1817
					    skb, __tcp_hdrlen(th),
1818
					    &ipv6_hdr(skb)->saddr, th->source,
1819
					    &ipv6_hdr(skb)->daddr,
1820 1821
					    ntohs(th->dest),
					    tcp_v6_iif_l3_slave(skb),
1822
					    sdif);
1823
		if (sk2) {
1824
			struct inet_timewait_sock *tw = inet_twsk(sk);
1825
			inet_twsk_deschedule_put(tw);
L
Linus Torvalds 已提交
1826
			sk = sk2;
1827
			tcp_v6_restore_cb(skb);
1828
			refcounted = false;
L
Linus Torvalds 已提交
1829 1830 1831
			goto process;
		}
	}
1832
		/* to ACK */
J
Joe Perches 已提交
1833
		fallthrough;
L
Linus Torvalds 已提交
1834 1835 1836 1837
	case TCP_TW_ACK:
		tcp_v6_timewait_ack(sk, skb);
		break;
	case TCP_TW_RST:
1838 1839 1840
		tcp_v6_send_reset(sk, skb);
		inet_twsk_deschedule_put(inet_twsk(sk));
		goto discard_it;
W
Wang Yufen 已提交
1841 1842
	case TCP_TW_SUCCESS:
		;
L
Linus Torvalds 已提交
1843 1844 1845 1846
	}
	goto discard_it;
}

1847
void tcp_v6_early_demux(struct sk_buff *skb)
E
Eric Dumazet 已提交
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
{
	const struct ipv6hdr *hdr;
	const struct tcphdr *th;
	struct sock *sk;

	if (skb->pkt_type != PACKET_HOST)
		return;

	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
		return;

	hdr = ipv6_hdr(skb);
	th = tcp_hdr(skb);

	if (th->doff < sizeof(struct tcphdr) / 4)
		return;

E
Eric Dumazet 已提交
1865
	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
E
Eric Dumazet 已提交
1866 1867 1868
	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
					&hdr->saddr, th->source,
					&hdr->daddr, ntohs(th->dest),
1869
					inet6_iif(skb), inet6_sdif(skb));
E
Eric Dumazet 已提交
1870 1871 1872
	if (sk) {
		skb->sk = sk;
		skb->destructor = sock_edemux;
1873
		if (sk_fullsock(sk)) {
1874
			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1875

E
Eric Dumazet 已提交
1876
			if (dst)
1877
				dst = dst_check(dst, sk->sk_rx_dst_cookie);
E
Eric Dumazet 已提交
1878
			if (dst &&
1879
			    sk->sk_rx_dst_ifindex == skb->skb_iif)
E
Eric Dumazet 已提交
1880 1881 1882 1883 1884
				skb_dst_set_noref(skb, dst);
		}
	}
}

1885 1886 1887
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
	.twsk_unique	= tcp_twsk_unique,
W
Wang Yufen 已提交
1888
	.twsk_destructor = tcp_twsk_destructor,
1889 1890
};

1891 1892
INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
1893
	__tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
1894 1895
}

1896
const struct inet_connection_sock_af_ops ipv6_specific = {
1897 1898 1899
	.queue_xmit	   = inet6_csk_xmit,
	.send_check	   = tcp_v6_send_check,
	.rebuild_header	   = inet6_sk_rebuild_header,
E
Eric Dumazet 已提交
1900
	.sk_rx_dst_set	   = inet6_sk_rx_dst_set,
1901 1902 1903
	.conn_request	   = tcp_v6_conn_request,
	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
	.net_header_len	   = sizeof(struct ipv6hdr),
1904
	.net_frag_header_len = sizeof(struct frag_hdr),
1905 1906 1907 1908
	.setsockopt	   = ipv6_setsockopt,
	.getsockopt	   = ipv6_getsockopt,
	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1909
	.mtu_reduced	   = tcp_v6_mtu_reduced,
L
Linus Torvalds 已提交
1910 1911
};

1912
#ifdef CONFIG_TCP_MD5SIG
1913
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1914
	.md5_lookup	=	tcp_v6_md5_lookup,
1915
	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1916 1917
	.md5_parse	=	tcp_v6_parse_md5_keys,
};
1918
#endif
1919

L
Linus Torvalds 已提交
1920 1921 1922
/*
 *	TCP over IPv4 via INET6 API
 */
1923
static const struct inet_connection_sock_af_ops ipv6_mapped = {
1924 1925 1926
	.queue_xmit	   = ip_queue_xmit,
	.send_check	   = tcp_v4_send_check,
	.rebuild_header	   = inet_sk_rebuild_header,
1927
	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1928 1929 1930 1931 1932 1933 1934
	.conn_request	   = tcp_v6_conn_request,
	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
	.net_header_len	   = sizeof(struct iphdr),
	.setsockopt	   = ipv6_setsockopt,
	.getsockopt	   = ipv6_getsockopt,
	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1935
	.mtu_reduced	   = tcp_v4_mtu_reduced,
L
Linus Torvalds 已提交
1936 1937
};

1938
#ifdef CONFIG_TCP_MD5SIG
1939
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1940
	.md5_lookup	=	tcp_v4_md5_lookup,
1941
	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1942 1943
	.md5_parse	=	tcp_v6_parse_md5_keys,
};
1944
#endif
1945

L
Linus Torvalds 已提交
1946 1947 1948 1949 1950
/* NOTE: A lot of things set to zero explicitly by call to
 *       sk_alloc() so need not be done here.
 */
static int tcp_v6_init_sock(struct sock *sk)
{
1951
	struct inet_connection_sock *icsk = inet_csk(sk);
L
Linus Torvalds 已提交
1952

1953
	tcp_init_sock(sk);
L
Linus Torvalds 已提交
1954

1955
	icsk->icsk_af_ops = &ipv6_specific;
L
Linus Torvalds 已提交
1956

1957
#ifdef CONFIG_TCP_MD5SIG
1958
	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1959 1960
#endif

L
Linus Torvalds 已提交
1961 1962 1963
	return 0;
}

1964
static void tcp_v6_destroy_sock(struct sock *sk)
L
Linus Torvalds 已提交
1965 1966
{
	tcp_v4_destroy_sock(sk);
1967
	inet6_destroy_sock(sk);
L
Linus Torvalds 已提交
1968 1969
}

1970
#ifdef CONFIG_PROC_FS
L
Linus Torvalds 已提交
1971
/* Proc filesystem TCPv6 sock list dumping. */
1972
static void get_openreq6(struct seq_file *seq,
E
Eric Dumazet 已提交
1973
			 const struct request_sock *req, int i)
L
Linus Torvalds 已提交
1974
{
1975
	long ttd = req->rsk_timer.expires - jiffies;
1976 1977
	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
	const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
L
Linus Torvalds 已提交
1978 1979 1980 1981 1982 1983

	if (ttd < 0)
		ttd = 0;

	seq_printf(seq,
		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1984
		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
L
Linus Torvalds 已提交
1985 1986 1987
		   i,
		   src->s6_addr32[0], src->s6_addr32[1],
		   src->s6_addr32[2], src->s6_addr32[3],
E
Eric Dumazet 已提交
1988
		   inet_rsk(req)->ir_num,
L
Linus Torvalds 已提交
1989 1990
		   dest->s6_addr32[0], dest->s6_addr32[1],
		   dest->s6_addr32[2], dest->s6_addr32[3],
1991
		   ntohs(inet_rsk(req)->ir_rmt_port),
L
Linus Torvalds 已提交
1992
		   TCP_SYN_RECV,
W
Weilong Chen 已提交
1993
		   0, 0, /* could print option size, but that is af dependent. */
1994 1995
		   1,   /* timers active (only the expire timer) */
		   jiffies_to_clock_t(ttd),
1996
		   req->num_timeout,
E
Eric Dumazet 已提交
1997 1998
		   from_kuid_munged(seq_user_ns(seq),
				    sock_i_uid(req->rsk_listener)),
1999
		   0,  /* non standard timer */
L
Linus Torvalds 已提交
2000 2001 2002 2003 2004 2005
		   0, /* open_requests have no inode */
		   0, req);
}

static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
{
2006
	const struct in6_addr *dest, *src;
L
Linus Torvalds 已提交
2007 2008 2009
	__u16 destp, srcp;
	int timer_active;
	unsigned long timer_expires;
2010 2011
	const struct inet_sock *inet = inet_sk(sp);
	const struct tcp_sock *tp = tcp_sk(sp);
2012
	const struct inet_connection_sock *icsk = inet_csk(sp);
2013
	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2014 2015
	int rx_queue;
	int state;
L
Linus Torvalds 已提交
2016

2017 2018
	dest  = &sp->sk_v6_daddr;
	src   = &sp->sk_v6_rcv_saddr;
E
Eric Dumazet 已提交
2019 2020
	destp = ntohs(inet->inet_dport);
	srcp  = ntohs(inet->inet_sport);
2021

2022
	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2023
	    icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2024
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
L
Linus Torvalds 已提交
2025
		timer_active	= 1;
2026 2027
		timer_expires	= icsk->icsk_timeout;
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
L
Linus Torvalds 已提交
2028
		timer_active	= 4;
2029
		timer_expires	= icsk->icsk_timeout;
L
Linus Torvalds 已提交
2030 2031 2032 2033 2034 2035 2036 2037
	} else if (timer_pending(&sp->sk_timer)) {
		timer_active	= 2;
		timer_expires	= sp->sk_timer.expires;
	} else {
		timer_active	= 0;
		timer_expires = jiffies;
	}

2038
	state = inet_sk_state_load(sp);
2039
	if (state == TCP_LISTEN)
2040
		rx_queue = READ_ONCE(sp->sk_ack_backlog);
2041 2042 2043 2044
	else
		/* Because we don't lock the socket,
		 * we might find a transient negative value.
		 */
2045
		rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2046
				      READ_ONCE(tp->copied_seq), 0);
2047

L
Linus Torvalds 已提交
2048 2049
	seq_printf(seq,
		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2050
		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
L
Linus Torvalds 已提交
2051 2052 2053 2054 2055
		   i,
		   src->s6_addr32[0], src->s6_addr32[1],
		   src->s6_addr32[2], src->s6_addr32[3], srcp,
		   dest->s6_addr32[0], dest->s6_addr32[1],
		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2056
		   state,
2057
		   READ_ONCE(tp->write_seq) - tp->snd_una,
2058
		   rx_queue,
L
Linus Torvalds 已提交
2059
		   timer_active,
2060
		   jiffies_delta_to_clock_t(timer_expires - jiffies),
2061
		   icsk->icsk_retransmits,
2062
		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2063
		   icsk->icsk_probes_out,
L
Linus Torvalds 已提交
2064
		   sock_i_ino(sp),
2065
		   refcount_read(&sp->sk_refcnt), sp,
2066 2067
		   jiffies_to_clock_t(icsk->icsk_rto),
		   jiffies_to_clock_t(icsk->icsk_ack.ato),
W
Wei Wang 已提交
2068
		   (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
2069
		   tcp_snd_cwnd(tp),
2070
		   state == TCP_LISTEN ?
2071
			fastopenq->max_qlen :
2072
			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
L
Linus Torvalds 已提交
2073 2074 2075
		   );
}

2076
static void get_timewait6_sock(struct seq_file *seq,
2077
			       struct inet_timewait_sock *tw, int i)
L
Linus Torvalds 已提交
2078
{
2079
	long delta = tw->tw_timer.expires - jiffies;
2080
	const struct in6_addr *dest, *src;
L
Linus Torvalds 已提交
2081 2082
	__u16 destp, srcp;

2083 2084
	dest = &tw->tw_v6_daddr;
	src  = &tw->tw_v6_rcv_saddr;
L
Linus Torvalds 已提交
2085 2086 2087 2088 2089
	destp = ntohs(tw->tw_dport);
	srcp  = ntohs(tw->tw_sport);

	seq_printf(seq,
		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
D
Dan Rosenberg 已提交
2090
		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
L
Linus Torvalds 已提交
2091 2092 2093 2094 2095 2096
		   i,
		   src->s6_addr32[0], src->s6_addr32[1],
		   src->s6_addr32[2], src->s6_addr32[3], srcp,
		   dest->s6_addr32[0], dest->s6_addr32[1],
		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
		   tw->tw_substate, 0, 0,
2097
		   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2098
		   refcount_read(&tw->tw_refcnt), tw);
L
Linus Torvalds 已提交
2099 2100 2101 2102 2103
}

static int tcp6_seq_show(struct seq_file *seq, void *v)
{
	struct tcp_iter_state *st;
E
Eric Dumazet 已提交
2104
	struct sock *sk = v;
L
Linus Torvalds 已提交
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116

	if (v == SEQ_START_TOKEN) {
		seq_puts(seq,
			 "  sl  "
			 "local_address                         "
			 "remote_address                        "
			 "st tx_queue rx_queue tr tm->when retrnsmt"
			 "   uid  timeout inode\n");
		goto out;
	}
	st = seq->private;

2117 2118 2119
	if (sk->sk_state == TCP_TIME_WAIT)
		get_timewait6_sock(seq, v, st->num);
	else if (sk->sk_state == TCP_NEW_SYN_RECV)
E
Eric Dumazet 已提交
2120
		get_openreq6(seq, v, st->num);
2121 2122
	else
		get_tcp6_sock(seq, v, st->num);
L
Linus Torvalds 已提交
2123 2124 2125 2126
out:
	return 0;
}

2127 2128 2129 2130 2131 2132 2133
static const struct seq_operations tcp6_seq_ops = {
	.show		= tcp6_seq_show,
	.start		= tcp_seq_start,
	.next		= tcp_seq_next,
	.stop		= tcp_seq_stop,
};

L
Linus Torvalds 已提交
2134 2135 2136 2137
static struct tcp_seq_afinfo tcp6_seq_afinfo = {
	.family		= AF_INET6,
};

2138
int __net_init tcp6_proc_init(struct net *net)
L
Linus Torvalds 已提交
2139
{
2140 2141
	if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
			sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
2142 2143
		return -ENOMEM;
	return 0;
L
Linus Torvalds 已提交
2144 2145
}

2146
void tcp6_proc_exit(struct net *net)
L
Linus Torvalds 已提交
2147
{
2148
	remove_proc_entry("tcp6", net->proc_net);
L
Linus Torvalds 已提交
2149 2150 2151 2152 2153 2154 2155
}
#endif

struct proto tcpv6_prot = {
	.name			= "TCPv6",
	.owner			= THIS_MODULE,
	.close			= tcp_close,
A
Andrey Ignatov 已提交
2156
	.pre_connect		= tcp_v6_pre_connect,
L
Linus Torvalds 已提交
2157 2158
	.connect		= tcp_v6_connect,
	.disconnect		= tcp_disconnect,
2159
	.accept			= inet_csk_accept,
L
Linus Torvalds 已提交
2160 2161 2162 2163 2164 2165
	.ioctl			= tcp_ioctl,
	.init			= tcp_v6_init_sock,
	.destroy		= tcp_v6_destroy_sock,
	.shutdown		= tcp_shutdown,
	.setsockopt		= tcp_setsockopt,
	.getsockopt		= tcp_getsockopt,
2166
	.bpf_bypass_getsockopt	= tcp_bpf_bypass_getsockopt,
2167
	.keepalive		= tcp_set_keepalive,
L
Linus Torvalds 已提交
2168
	.recvmsg		= tcp_recvmsg,
2169 2170
	.sendmsg		= tcp_sendmsg,
	.sendpage		= tcp_sendpage,
L
Linus Torvalds 已提交
2171
	.backlog_rcv		= tcp_v6_do_rcv,
E
Eric Dumazet 已提交
2172
	.release_cb		= tcp_release_cb,
2173
	.hash			= inet6_hash,
2174 2175
	.unhash			= inet_unhash,
	.get_port		= inet_csk_get_port,
2176
	.put_port		= inet_put_port,
2177 2178 2179
#ifdef CONFIG_BPF_SYSCALL
	.psock_update_sk_prot	= tcp_bpf_update_proto,
#endif
L
Linus Torvalds 已提交
2180
	.enter_memory_pressure	= tcp_enter_memory_pressure,
2181
	.leave_memory_pressure	= tcp_leave_memory_pressure,
2182
	.stream_memory_free	= tcp_stream_memory_free,
L
Linus Torvalds 已提交
2183
	.sockets_allocated	= &tcp_sockets_allocated,
2184

L
Linus Torvalds 已提交
2185
	.memory_allocated	= &tcp_memory_allocated,
2186 2187
	.per_cpu_fw_alloc	= &tcp_memory_per_cpu_fw_alloc,

L
Linus Torvalds 已提交
2188
	.memory_pressure	= &tcp_memory_pressure,
2189
	.orphan_count		= &tcp_orphan_count,
2190
	.sysctl_mem		= sysctl_tcp_mem,
2191 2192
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
L
Linus Torvalds 已提交
2193 2194
	.max_header		= MAX_TCP_HEADER,
	.obj_size		= sizeof(struct tcp6_sock),
2195
	.slab_flags		= SLAB_TYPESAFE_BY_RCU,
2196
	.twsk_prot		= &tcp6_timewait_sock_ops,
2197
	.rsk_prot		= &tcp6_request_sock_ops,
2198
	.h.hashinfo		= &tcp_hashinfo,
2199
	.no_autobind		= true,
2200
	.diag_destroy		= tcp_abort,
L
Linus Torvalds 已提交
2201
};
2202
EXPORT_SYMBOL_GPL(tcpv6_prot);
L
Linus Torvalds 已提交
2203

2204
static const struct inet6_protocol tcpv6_protocol = {
L
Linus Torvalds 已提交
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	.handler	=	tcp_v6_rcv,
	.err_handler	=	tcp_v6_err,
	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};

static struct inet_protosw tcpv6_protosw = {
	.type		=	SOCK_STREAM,
	.protocol	=	IPPROTO_TCP,
	.prot		=	&tcpv6_prot,
	.ops		=	&inet6_stream_ops,
2215 2216
	.flags		=	INET_PROTOSW_PERMANENT |
				INET_PROTOSW_ICSK,
L
Linus Torvalds 已提交
2217 2218
};

2219
static int __net_init tcpv6_net_init(struct net *net)
2220
{
2221 2222
	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
				    SOCK_RAW, IPPROTO_TCP, net);
2223 2224
}

2225
static void __net_exit tcpv6_net_exit(struct net *net)
2226
{
2227
	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
E
Eric W. Biederman 已提交
2228 2229
}

2230 2231 2232 2233 2234
static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
{
	inet_twsk_purge(&tcp_hashinfo, AF_INET6);
}

2235
static struct pernet_operations tcpv6_net_ops = {
E
Eric W. Biederman 已提交
2236 2237
	.init	    = tcpv6_net_init,
	.exit	    = tcpv6_net_exit,
2238
	.exit_batch = tcpv6_net_exit_batch,
2239 2240
};

2241
int __init tcpv6_init(void)
L
Linus Torvalds 已提交
2242
{
2243 2244
	int ret;

2245 2246
	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
	if (ret)
2247
		goto out;
2248

L
Linus Torvalds 已提交
2249
	/* register inet6 protocol */
2250 2251 2252 2253
	ret = inet6_register_protosw(&tcpv6_protosw);
	if (ret)
		goto out_tcpv6_protocol;

2254
	ret = register_pernet_subsys(&tcpv6_net_ops);
2255 2256
	if (ret)
		goto out_tcpv6_protosw;
M
Mat Martineau 已提交
2257 2258 2259 2260 2261

	ret = mptcpv6_init();
	if (ret)
		goto out_tcpv6_pernet_subsys;

2262 2263
out:
	return ret;
2264

M
Mat Martineau 已提交
2265 2266
out_tcpv6_pernet_subsys:
	unregister_pernet_subsys(&tcpv6_net_ops);
2267 2268
out_tcpv6_protosw:
	inet6_unregister_protosw(&tcpv6_protosw);
2269 2270
out_tcpv6_protocol:
	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2271 2272 2273
	goto out;
}

2274
void tcpv6_exit(void)
2275
{
2276
	unregister_pernet_subsys(&tcpv6_net_ops);
2277 2278
	inet6_unregister_protosw(&tcpv6_protosw);
	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
L
Linus Torvalds 已提交
2279
}