protocol.c 69.4 KB
Newer Older
M
Mat Martineau 已提交
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
 *
 * Copyright (c) 2017 - 2019, Intel Corporation.
 */

#define pr_fmt(fmt) "MPTCP: " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
12 13
#include <linux/sched/signal.h>
#include <linux/atomic.h>
M
Mat Martineau 已提交
14 15 16 17 18
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
#include <net/tcp.h>
19
#include <net/tcp_states.h>
20 21 22
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
#endif
M
Mat Martineau 已提交
23 24
#include <net/mptcp.h>
#include "protocol.h"
25
#include "mib.h"
M
Mat Martineau 已提交
26

27 28 29 30 31 32 33
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct mptcp6_sock {
	struct mptcp_sock msk;
	struct ipv6_pinfo np;
};
#endif

34
struct mptcp_skb_cb {
35 36
	u64 map_seq;
	u64 end_seq;
37 38 39 40 41
	u32 offset;
};

#define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))

42 43
static struct percpu_counter mptcp_sockets_allocated;

44 45 46 47 48 49
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
 * completed yet or has failed, return the subflow socket.
 * Otherwise return NULL.
 */
static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
{
50
	if (!msk->subflow || READ_ONCE(msk->can_ack))
51 52 53 54 55
		return NULL;

	return msk->subflow;
}

56
static bool mptcp_is_tcpsk(struct sock *sk)
F
Florian Westphal 已提交
57 58 59 60 61 62 63 64 65 66 67 68
{
	struct socket *sock = sk->sk_socket;

	if (unlikely(sk->sk_prot == &tcp_prot)) {
		/* we are being invoked after mptcp_accept() has
		 * accepted a non-mp-capable flow: sk is a tcp_sk,
		 * not an mptcp one.
		 *
		 * Hand the socket over to tcp so all further socket ops
		 * bypass mptcp.
		 */
		sock->ops = &inet_stream_ops;
69
		return true;
F
Florian Westphal 已提交
70 71 72
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
		sock->ops = &inet6_stream_ops;
73
		return true;
F
Florian Westphal 已提交
74 75 76
#endif
	}

77
	return false;
F
Florian Westphal 已提交
78 79
}

80
static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
81 82 83
{
	sock_owned_by_me((const struct sock *)msk);

84
	if (likely(!__mptcp_check_fallback(msk)))
85 86
		return NULL;

87
	return msk->first;
88 89
}

90
static int __mptcp_socket_create(struct mptcp_sock *msk)
91 92 93 94 95 96 97 98
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int err;

	err = mptcp_subflow_create_socket(sk, &ssock);
	if (err)
99
		return err;
100

101
	msk->first = ssock->sk;
102 103
	msk->subflow = ssock;
	subflow = mptcp_subflow_ctx(ssock->sk);
104
	list_add(&subflow->node, &msk->conn_list);
105 106
	subflow->request_mptcp = 1;

107 108 109 110 111
	/* accept() will wait on first subflow sk_wq, and we always wakes up
	 * via msk->sk_socket
	 */
	RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);

112
	return 0;
113 114
}

115 116 117 118 119 120
static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
{
	sk_drops_add(sk, skb);
	__kfree_skb(skb);
}

121 122 123 124 125 126 127 128 129 130
static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
			       struct sk_buff *from)
{
	bool fragstolen;
	int delta;

	if (MPTCP_SKB_CB(from)->offset ||
	    !skb_try_coalesce(to, from, &fragstolen, &delta))
		return false;

P
Paolo Abeni 已提交
131 132 133
	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
		 to->len, MPTCP_SKB_CB(from)->end_seq);
134
	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
135 136 137 138 139 140
	kfree_skb_partial(from, fragstolen);
	atomic_add(delta, &sk->sk_rmem_alloc);
	sk_mem_charge(sk, delta);
	return true;
}

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
				   struct sk_buff *from)
{
	if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
		return false;

	return mptcp_try_coalesce((struct sock *)msk, to, from);
}

/* "inspired" by tcp_data_queue_ofo(), main differences:
 * - use mptcp seqs
 * - don't cope with sacks
 */
static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
{
	struct sock *sk = (struct sock *)msk;
	struct rb_node **p, *parent;
	u64 seq, end_seq, max_seq;
	struct sk_buff *skb1;
160
	int space;
161 162 163

	seq = MPTCP_SKB_CB(skb)->map_seq;
	end_seq = MPTCP_SKB_CB(skb)->end_seq;
164 165
	space = tcp_space(sk);
	max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
166

P
Paolo Abeni 已提交
167 168
	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
169 170 171
	if (after64(seq, max_seq)) {
		/* out of window */
		mptcp_drop(sk, skb);
P
Paolo Abeni 已提交
172
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
173 174 175 176
		return;
	}

	p = &msk->out_of_order_queue.rb_node;
P
Paolo Abeni 已提交
177
	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
178 179 180 181 182 183 184 185 186 187
	if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
		rb_link_node(&skb->rbnode, NULL, p);
		rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
		msk->ooo_last_skb = skb;
		goto end;
	}

	/* with 2 subflows, adding at end of ooo queue is quite likely
	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
	 */
P
Paolo Abeni 已提交
188 189 190
	if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
191
		return;
P
Paolo Abeni 已提交
192
	}
193 194 195

	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
	if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
P
Paolo Abeni 已提交
196
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
		parent = &msk->ooo_last_skb->rbnode;
		p = &parent->rb_right;
		goto insert;
	}

	/* Find place to insert this segment. Handle overlaps on the way. */
	parent = NULL;
	while (*p) {
		parent = *p;
		skb1 = rb_to_skb(parent);
		if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
			p = &parent->rb_left;
			continue;
		}
		if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
			if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
				/* All the bits are present. Drop. */
				mptcp_drop(sk, skb);
P
Paolo Abeni 已提交
215
				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
				return;
			}
			if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
				/* partial overlap:
				 *     |     skb      |
				 *  |     skb1    |
				 * continue traversing
				 */
			} else {
				/* skb's seq == skb1's seq and skb covers skb1.
				 * Replace skb1 with skb.
				 */
				rb_replace_node(&skb1->rbnode, &skb->rbnode,
						&msk->out_of_order_queue);
				mptcp_drop(sk, skb1);
P
Paolo Abeni 已提交
231
				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
232 233 234
				goto merge_right;
			}
		} else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
P
Paolo Abeni 已提交
235
			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
236 237 238 239
			return;
		}
		p = &parent->rb_right;
	}
P
Paolo Abeni 已提交
240

241 242 243 244 245 246 247 248 249 250 251 252
insert:
	/* Insert segment into RB tree. */
	rb_link_node(&skb->rbnode, parent, p);
	rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);

merge_right:
	/* Remove other segments covered by skb. */
	while ((skb1 = skb_rb_next(skb)) != NULL) {
		if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
			break;
		rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
		mptcp_drop(sk, skb1);
P
Paolo Abeni 已提交
253
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
254 255 256 257 258 259 260 261 262 263 264 265 266
	}
	/* If there is no skb after us, we are the last_skb ! */
	if (!skb1)
		msk->ooo_last_skb = skb;

end:
	skb_condense(skb);
	skb_set_owner_r(skb, sk);
}

static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
			     struct sk_buff *skb, unsigned int offset,
			     size_t copy_len)
267
{
268
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
269
	struct sock *sk = (struct sock *)msk;
270
	struct sk_buff *tail;
271 272 273

	__skb_unlink(skb, &ssk->sk_receive_queue);

274 275
	skb_ext_reset(skb);
	skb_orphan(skb);
276 277 278 279 280 281 282

	/* the skb map_seq accounts for the skb offset:
	 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
	 * value
	 */
	MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
	MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
283
	MPTCP_SKB_CB(skb)->offset = offset;
284

285 286
	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
		/* in sequence */
287
		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
288 289 290
		tail = skb_peek_tail(&sk->sk_receive_queue);
		if (tail && mptcp_try_coalesce(sk, tail, skb))
			return true;
291

292 293 294 295 296 297 298 299 300 301 302
		skb_set_owner_r(skb, sk);
		__skb_queue_tail(&sk->sk_receive_queue, skb);
		return true;
	} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
		mptcp_data_queue_ofo(msk, skb);
		return false;
	}

	/* old data, keep it simple and drop the whole pkt, sender
	 * will retransmit as needed, if needed.
	 */
P
Paolo Abeni 已提交
303
	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
304 305
	mptcp_drop(sk, skb);
	return false;
306 307
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static void mptcp_stop_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	mptcp_sk(sk)->timer_ival = 0;
}

static void mptcp_check_data_fin_ack(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (__mptcp_check_fallback(msk))
		return;

	/* Look for an acknowledged DATA_FIN */
	if (((1 << sk->sk_state) &
	     (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
	    msk->write_seq == atomic64_read(&msk->snd_una)) {
		mptcp_stop_timer(sk);

		WRITE_ONCE(msk->snd_data_fin_enable, 0);

		switch (sk->sk_state) {
		case TCP_FIN_WAIT1:
			inet_sk_state_store(sk, TCP_FIN_WAIT2);
			sk->sk_state_change(sk);
			break;
		case TCP_CLOSING:
		case TCP_LAST_ACK:
			inet_sk_state_store(sk, TCP_CLOSE);
			sk->sk_state_change(sk);
			break;
		}

		if (sk->sk_shutdown == SHUTDOWN_MASK ||
		    sk->sk_state == TCP_CLOSE)
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
		else
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
	}
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (READ_ONCE(msk->rcv_data_fin) &&
	    ((1 << sk->sk_state) &
	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);

		if (msk->ack_seq == rcv_data_fin_seq) {
			if (seq)
				*seq = rcv_data_fin_seq;

			return true;
		}
	}

	return false;
}

static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
{
	long tout = ssk && inet_csk(ssk)->icsk_pending ?
				      inet_csk(ssk)->icsk_timeout - jiffies : 0;

	if (tout <= 0)
		tout = mptcp_sk(sk)->timer_ival;
	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
}

static void mptcp_check_data_fin(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	u64 rcv_data_fin_seq;

	if (__mptcp_check_fallback(msk) || !msk->first)
		return;

	/* Need to ack a DATA_FIN received from a peer while this side
	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
	 * msk->rcv_data_fin was set when parsing the incoming options
	 * at the subflow level and the msk lock was not held, so this
	 * is the first opportunity to act on the DATA_FIN and change
	 * the msk state.
	 *
	 * If we are caught up to the sequence number of the incoming
	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
	 * not caught up, do nothing and let the recv code send DATA_ACK
	 * when catching up.
	 */

	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
		struct mptcp_subflow_context *subflow;

405
		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
406 407 408
		WRITE_ONCE(msk->rcv_data_fin, 0);

		sk->sk_shutdown |= RCV_SHUTDOWN;
409 410
		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447

		switch (sk->sk_state) {
		case TCP_ESTABLISHED:
			inet_sk_state_store(sk, TCP_CLOSE_WAIT);
			break;
		case TCP_FIN_WAIT1:
			inet_sk_state_store(sk, TCP_CLOSING);
			break;
		case TCP_FIN_WAIT2:
			inet_sk_state_store(sk, TCP_CLOSE);
			// @@ Close subflows now?
			break;
		default:
			/* Other states not expected */
			WARN_ON_ONCE(1);
			break;
		}

		mptcp_set_timeout(sk, NULL);
		mptcp_for_each_subflow(msk, subflow) {
			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

			lock_sock(ssk);
			tcp_send_ack(ssk);
			release_sock(ssk);
		}

		sk->sk_state_change(sk);

		if (sk->sk_shutdown == SHUTDOWN_MASK ||
		    sk->sk_state == TCP_CLOSE)
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
		else
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
	}
}

448 449 450 451 452
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
					   struct sock *ssk,
					   unsigned int *bytes)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
453
	struct sock *sk = (struct sock *)msk;
454 455 456
	unsigned int moved = 0;
	bool more_data_avail;
	struct tcp_sock *tp;
P
Paolo Abeni 已提交
457
	u32 old_copied_seq;
458
	bool done = false;
459

460
	pr_debug("msk=%p ssk=%p", msk, ssk);
461
	tp = tcp_sk(ssk);
P
Paolo Abeni 已提交
462
	old_copied_seq = tp->copied_seq;
463 464 465 466 467 468 469 470 471 472 473
	do {
		u32 map_remaining, offset;
		u32 seq = tp->copied_seq;
		struct sk_buff *skb;
		bool fin;

		/* try to move as much data as available */
		map_remaining = subflow->map_data_len -
				mptcp_subflow_get_map_offset(subflow);

		skb = skb_peek(&ssk->sk_receive_queue);
474 475 476 477 478 479 480
		if (!skb) {
			/* if no data is found, a racing workqueue/recvmsg
			 * already processed the new data, stop here or we
			 * can enter an infinite loop
			 */
			if (!moved)
				done = true;
481
			break;
482
		}
483

484 485 486 487 488 489 490 491 492
		if (__mptcp_check_fallback(msk)) {
			/* if we are running under the workqueue, TCP could have
			 * collapsed skbs between dummy map creation and now
			 * be sure to adjust the size
			 */
			map_remaining = skb->len;
			subflow->map_data_len = skb->len;
		}

493 494 495 496 497 498 499 500 501 502 503 504 505
		offset = seq - TCP_SKB_CB(skb)->seq;
		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
		if (fin) {
			done = true;
			seq++;
		}

		if (offset < skb->len) {
			size_t len = skb->len - offset;

			if (tp->urg_data)
				done = true;

506 507
			if (__mptcp_move_skb(msk, ssk, skb, offset, len))
				moved += len;
508 509 510 511 512 513 514 515 516 517 518 519
			seq += len;

			if (WARN_ON_ONCE(map_remaining < len))
				break;
		} else {
			WARN_ON_ONCE(!fin);
			sk_eat_skb(ssk, skb);
			done = true;
		}

		WRITE_ONCE(tp->copied_seq, seq);
		more_data_avail = mptcp_subflow_data_available(ssk);
520 521 522 523 524

		if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
			done = true;
			break;
		}
525 526
	} while (more_data_avail);

527
	*bytes += moved;
P
Paolo Abeni 已提交
528 529
	if (tp->copied_seq != old_copied_seq)
		tcp_cleanup_rbuf(ssk, 1);
530 531 532 533

	return done;
}

534 535 536 537 538 539 540 541 542
static bool mptcp_ofo_queue(struct mptcp_sock *msk)
{
	struct sock *sk = (struct sock *)msk;
	struct sk_buff *skb, *tail;
	bool moved = false;
	struct rb_node *p;
	u64 end_seq;

	p = rb_first(&msk->out_of_order_queue);
P
Paolo Abeni 已提交
543
	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
544 545 546 547 548 549 550 551 552 553 554
	while (p) {
		skb = rb_to_skb(p);
		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
			break;

		p = rb_next(p);
		rb_erase(&skb->rbnode, &msk->out_of_order_queue);

		if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
				      msk->ack_seq))) {
			mptcp_drop(sk, skb);
P
Paolo Abeni 已提交
555
			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
556 557 558 559 560 561 562 563 564
			continue;
		}

		end_seq = MPTCP_SKB_CB(skb)->end_seq;
		tail = skb_peek_tail(&sk->sk_receive_queue);
		if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;

			/* skip overlapping data, if any */
P
Paolo Abeni 已提交
565 566 567
			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
				 delta);
568 569 570 571 572 573 574 575 576
			MPTCP_SKB_CB(skb)->offset += delta;
			__skb_queue_tail(&sk->sk_receive_queue, skb);
		}
		msk->ack_seq = end_seq;
		moved = true;
	}
	return moved;
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
/* In most cases we will be able to lock the mptcp socket.  If its already
 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 */
static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
	struct sock *sk = (struct sock *)msk;
	unsigned int moved = 0;

	if (READ_ONCE(sk->sk_lock.owned))
		return false;

	if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
		return false;

	/* must re-check after taking the lock */
592
	if (!READ_ONCE(sk->sk_lock.owned)) {
593
		__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
594 595 596 597 598 599 600 601 602 603 604
		mptcp_ofo_queue(msk);

		/* If the moves have caught up with the DATA_FIN sequence number
		 * it's time to ack the DATA_FIN and change socket state, but
		 * this is not a good place to change state. Let the workqueue
		 * do it.
		 */
		if (mptcp_pending_data_fin(sk, NULL) &&
		    schedule_work(&msk->work))
			sock_hold(sk);
	}
605 606 607 608 609 610 611

	spin_unlock_bh(&sk->sk_lock.slock);

	return moved > 0;
}

void mptcp_data_ready(struct sock *sk, struct sock *ssk)
612
{
613
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
614
	struct mptcp_sock *msk = mptcp_sk(sk);
615
	bool wake;
616

617 618 619 620 621 622 623
	/* move_skbs_to_msk below can legitly clear the data_avail flag,
	 * but we will need later to properly woke the reader, cache its
	 * value
	 */
	wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
	if (wake)
		set_bit(MPTCP_DATA_READY, &msk->flags);
624

625 626 627 628
	if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
	    move_skbs_to_msk(msk, ssk))
		goto wake;

629 630 631 632
	/* don't schedule if mptcp sk is (still) over limit */
	if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
		goto wake;

633 634 635 636
	/* mptcp socket is owned, release_cb should retry */
	if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
			      &sk->sk_tsq_flags)) {
		sock_hold(sk);
637

638 639 640 641 642
		/* need to try again, its possible release_cb() has already
		 * been called after the test_and_set_bit() above.
		 */
		move_skbs_to_msk(msk, ssk);
	}
643
wake:
644 645
	if (wake)
		sk->sk_data_ready(sk);
646 647
}

648 649 650 651 652 653 654 655 656 657
static void __mptcp_flush_join_list(struct mptcp_sock *msk)
{
	if (likely(list_empty(&msk->join_list)))
		return;

	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static bool mptcp_timer_pending(struct sock *sk)
{
	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
}

static void mptcp_reset_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	unsigned long tout;

	/* should never be called with mptcp level timer cleared */
	tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
	if (WARN_ON_ONCE(!tout))
		tout = TCP_RTO_MIN;
	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
}

void mptcp_data_acked(struct sock *sk)
{
	mptcp_reset_timer(sk);
678

679
	if ((!test_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags) ||
680
	     (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
681 682
	    schedule_work(&mptcp_sk(sk)->work))
		sock_hold(sk);
683 684
}

685 686 687 688 689 690 691 692 693
void mptcp_subflow_eof(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
	    schedule_work(&msk->work))
		sock_hold(sk);
}

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
static void mptcp_check_for_eof(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	int receivers = 0;

	mptcp_for_each_subflow(msk, subflow)
		receivers += !subflow->rx_eof;

	if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
		/* hopefully temporary hack: propagate shutdown status
		 * to msk, when all subflows agree on it
		 */
		sk->sk_shutdown |= RCV_SHUTDOWN;

		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
		sk->sk_data_ready(sk);
	}
}

715 716
static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
{
717 718
	const struct sock *sk = (const struct sock *)msk;

719
	if (!msk->cached_ext)
720
		msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
721 722 723 724

	return !!msk->cached_ext;
}

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;

	sock_owned_by_me(sk);

	mptcp_for_each_subflow(msk, subflow) {
		if (subflow->data_avail)
			return mptcp_subflow_tcp_sock(subflow);
	}

	return NULL;
}

740 741 742
static bool mptcp_skb_can_collapse_to(u64 write_seq,
				      const struct sk_buff *skb,
				      const struct mptcp_ext *mpext)
743 744 745 746 747
{
	if (!tcp_skb_can_collapse_to(skb))
		return false;

	/* can collapse only if MPTCP level sequence is in order */
748
	return mpext && mpext->data_seq + mpext->data_len == write_seq;
749 750
}

751 752 753 754 755 756 757 758
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
				       const struct page_frag *pfrag,
				       const struct mptcp_data_frag *df)
{
	return df && pfrag->page == df->page &&
		df->data_seq + df->data_len == msk->write_seq;
}

759 760 761
static void dfrag_uncharge(struct sock *sk, int len)
{
	sk_mem_uncharge(sk, len);
762
	sk_wmem_queued_add(sk, -len);
763 764 765
}

static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
766
{
767 768
	int len = dfrag->data_len + dfrag->overhead;

769
	list_del(&dfrag->list);
770
	dfrag_uncharge(sk, len);
771 772 773
	put_page(dfrag->page);
}

774 775 776 777 778 779 780 781 782 783 784 785 786 787
static bool mptcp_is_writeable(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;

	if (!sk_stream_is_writeable((struct sock *)msk))
		return false;

	mptcp_for_each_subflow(msk, subflow) {
		if (sk_stream_is_writeable(subflow->tcp_sock))
			return true;
	}
	return false;
}

788 789 790 791
static void mptcp_clean_una(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;
792
	bool cleaned = false;
793 794 795 796 797 798 799 800
	u64 snd_una;

	/* on fallback we just need to ignore snd_una, as this is really
	 * plain TCP
	 */
	if (__mptcp_check_fallback(msk))
		atomic64_set(&msk->snd_una, msk->write_seq);
	snd_una = atomic64_read(&msk->snd_una);
801 802 803 804 805

	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
			break;

806 807 808 809
		dfrag_clear(sk, dfrag);
		cleaned = true;
	}

810 811
	dfrag = mptcp_rtx_head(sk);
	if (dfrag && after64(snd_una, dfrag->data_seq)) {
812 813 814 815
		u64 delta = snd_una - dfrag->data_seq;

		if (WARN_ON_ONCE(delta > dfrag->data_len))
			goto out;
816 817

		dfrag->data_seq += delta;
818
		dfrag->offset += delta;
819 820 821 822 823 824
		dfrag->data_len -= delta;

		dfrag_uncharge(sk, delta);
		cleaned = true;
	}

825
out:
826 827
	if (cleaned) {
		sk_mem_reclaim_partial(sk);
828 829

		/* Only wake up writers if a subflow is ready */
830 831 832 833 834 835 836
		if (mptcp_is_writeable(msk)) {
			set_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags);
			smp_mb__after_atomic();

			/* set SEND_SPACE before sk_stream_write_space clears
			 * NOSPACE
			 */
837
			sk_stream_write_space(sk);
838
		}
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
	}
}

/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
 * data
 */
static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{
	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
					pfrag, sk->sk_allocation)))
		return true;

	sk->sk_prot->enter_memory_pressure(sk);
	sk_stream_moderate_sndbuf(sk);
	return false;
}

static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
		      int orig_offset)
{
	int offset = ALIGN(orig_offset, sizeof(long));
	struct mptcp_data_frag *dfrag;

	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
	dfrag->data_len = 0;
	dfrag->data_seq = msk->write_seq;
	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
	dfrag->page = pfrag->page;

	return dfrag;
}

873
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
874 875
			      struct msghdr *msg, struct mptcp_data_frag *dfrag,
			      long *timeo, int *pmss_now,
876
			      int *ps_goal)
877
{
878 879
	int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
	bool dfrag_collapsed, can_collapse = false;
880 881
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_ext *mpext = NULL;
882
	bool retransmission = !!dfrag;
883
	struct sk_buff *skb, *tail;
884
	struct page_frag *pfrag;
885 886
	struct page *page;
	u64 *write_seq;
887 888 889 890
	size_t psize;

	/* use the mptcp page cache so that we can easily move the data
	 * from one substream to another, but do per subflow memory accounting
891 892
	 * Note: pfrag is used only !retransmission, but the compiler if
	 * fooled into a warning if we don't init here
893 894
	 */
	pfrag = sk_page_frag(sk);
895 896 897 898 899 900 901
	if (!retransmission) {
		write_seq = &msk->write_seq;
		page = pfrag->page;
	} else {
		write_seq = &dfrag->data_seq;
		page = dfrag->page;
	}
902 903 904

	/* compute copy limit */
	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
905 906 907 908 909 910 911 912 913 914 915 916 917 918
	*pmss_now = mss_now;
	*ps_goal = size_goal;
	avail_size = size_goal;
	skb = tcp_write_queue_tail(ssk);
	if (skb) {
		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);

		/* Limit the write to the size available in the
		 * current skb, if any, so that we create at most a new skb.
		 * Explicitly tells TCP internals to avoid collapsing on later
		 * queue management operation, to avoid breaking the ext <->
		 * SSN association set here
		 */
		can_collapse = (size_goal - skb->len > 0) &&
919
			      mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
920 921 922 923 924
		if (!can_collapse)
			TCP_SKB_CB(skb)->eor = 1;
		else
			avail_size = size_goal - skb->len;
	}
925

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
	if (!retransmission) {
		/* reuse tail pfrag, if possible, or carve a new one from the
		 * page allocator
		 */
		dfrag = mptcp_rtx_tail(sk);
		offset = pfrag->offset;
		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
		if (!dfrag_collapsed) {
			dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
			offset = dfrag->offset;
			frag_truesize = dfrag->overhead;
		}
		psize = min_t(size_t, pfrag->size - offset, avail_size);

		/* Copy to page */
		pr_debug("left=%zu", msg_data_left(msg));
		psize = copy_page_from_iter(pfrag->page, offset,
					    min_t(size_t, msg_data_left(msg),
						  psize),
					    &msg->msg_iter);
		pr_debug("left=%zu", msg_data_left(msg));
		if (!psize)
			return -EINVAL;

950 951
		if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
			iov_iter_revert(&msg->msg_iter, psize);
952
			return -ENOMEM;
953
		}
954
	} else {
955
		offset = dfrag->offset;
956
		psize = min_t(size_t, dfrag->data_len, avail_size);
957
	}
958

959 960
	/* tell the TCP stack to delay the push so that we can safely
	 * access the skb after the sendpages call
961
	 */
962
	ret = do_tcp_sendpages(ssk, page, offset, psize,
963
			       msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
964
	if (ret <= 0) {
965 966
		if (!retransmission)
			iov_iter_revert(&msg->msg_iter, psize);
967
		return ret;
968
	}
969 970

	frag_truesize += ret;
971 972 973
	if (!retransmission) {
		if (unlikely(ret < psize))
			iov_iter_revert(&msg->msg_iter, psize - ret);
974

975 976 977 978 979 980 981 982 983 984 985
		/* send successful, keep track of sent data for mptcp-level
		 * retransmission
		 */
		dfrag->data_len += ret;
		if (!dfrag_collapsed) {
			get_page(dfrag->page);
			list_add_tail(&dfrag->list, &msk->rtx_queue);
			sk_wmem_queued_add(sk, frag_truesize);
		} else {
			sk_wmem_queued_add(sk, ret);
		}
986

987 988 989 990 991
		/* charge data on mptcp rtx queue to the master socket
		 * Note: we charge such data both to sk and ssk
		 */
		sk->sk_forward_alloc -= frag_truesize;
	}
992

993 994 995 996 997 998 999 1000 1001 1002 1003 1004
	/* if the tail skb extension is still the cached one, collapsing
	 * really happened. Note: we can't check for 'same skb' as the sk_buff
	 * hdr on tail can be transmitted, freed and re-allocated by the
	 * do_tcp_sendpages() call
	 */
	tail = tcp_write_queue_tail(ssk);
	if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
		WARN_ON_ONCE(!can_collapse);
		mpext->data_len += ret;
		goto out;
	}

1005 1006 1007 1008 1009
	skb = tcp_write_queue_tail(ssk);
	mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
	msk->cached_ext = NULL;

	memset(mpext, 0, sizeof(*mpext));
1010
	mpext->data_seq = *write_seq;
1011 1012 1013 1014 1015 1016 1017 1018 1019
	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
	mpext->data_len = ret;
	mpext->use_map = 1;
	mpext->dsn64 = 1;

	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
		 mpext->dsn64);

1020
out:
1021 1022
	if (!retransmission)
		pfrag->offset += frag_truesize;
1023
	WRITE_ONCE(*write_seq, *write_seq + ret);
1024 1025 1026 1027 1028
	mptcp_subflow_ctx(ssk)->rel_write_seq += ret;

	return ret;
}

1029
static void mptcp_nospace(struct mptcp_sock *msk)
1030
{
1031 1032
	struct mptcp_subflow_context *subflow;

1033 1034 1035
	clear_bit(MPTCP_SEND_SPACE, &msk->flags);
	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */

1036 1037 1038 1039 1040 1041 1042 1043
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
		struct socket *sock = READ_ONCE(ssk->sk_socket);

		/* enables ssk->write_space() callbacks */
		if (sock)
			set_bit(SOCK_NOSPACE, &sock->flags);
	}
1044 1045
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
	struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

	/* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
	if (subflow->request_join && !subflow->fully_established)
		return false;

	/* only send if our side has not closed yet */
	return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
}

#define MPTCP_SEND_BURST_SIZE		((1 << 16) - \
					 sizeof(struct tcphdr) - \
					 MAX_TCP_OPTION_SPACE - \
					 sizeof(struct ipv6hdr) - \
					 sizeof(struct frag_hdr))

struct subflow_send_info {
	struct sock *ssk;
	u64 ratio;
};

P
Paolo Abeni 已提交
1069 1070
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
					   u32 *sndbuf)
1071
{
1072
	struct subflow_send_info send_info[2];
1073
	struct mptcp_subflow_context *subflow;
1074 1075 1076 1077
	int i, nr_active = 0;
	struct sock *ssk;
	u64 ratio;
	u32 pace;
1078

1079
	sock_owned_by_me((struct sock *)msk);
1080

P
Paolo Abeni 已提交
1081
	*sndbuf = 0;
1082 1083 1084
	if (!mptcp_ext_cache_refill(msk))
		return NULL;

1085 1086
	if (__mptcp_check_fallback(msk)) {
		if (!msk->first)
1087
			return NULL;
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
		*sndbuf = msk->first->sk_sndbuf;
		return sk_stream_memory_free(msk->first) ? msk->first : NULL;
	}

	/* re-use last subflow, if the burst allow that */
	if (msk->last_snd && msk->snd_burst > 0 &&
	    sk_stream_memory_free(msk->last_snd) &&
	    mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
		mptcp_for_each_subflow(msk, subflow) {
			ssk =  mptcp_subflow_tcp_sock(subflow);
			*sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
1099
		}
1100 1101
		return msk->last_snd;
	}
1102

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	/* pick the subflow with the lower wmem/wspace ratio */
	for (i = 0; i < 2; ++i) {
		send_info[i].ssk = NULL;
		send_info[i].ratio = -1;
	}
	mptcp_for_each_subflow(msk, subflow) {
		ssk =  mptcp_subflow_tcp_sock(subflow);
		if (!mptcp_subflow_active(subflow))
			continue;

		nr_active += !subflow->backup;
P
Paolo Abeni 已提交
1114
		*sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
1115 1116
		if (!sk_stream_memory_free(subflow->tcp_sock))
			continue;
1117

1118 1119
		pace = READ_ONCE(ssk->sk_pacing_rate);
		if (!pace)
1120 1121
			continue;

1122 1123 1124 1125 1126 1127
		ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
				pace);
		if (ratio < send_info[subflow->backup].ratio) {
			send_info[subflow->backup].ssk = ssk;
			send_info[subflow->backup].ratio = ratio;
		}
1128 1129
	}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
		 msk, nr_active, send_info[0].ssk, send_info[0].ratio,
		 send_info[1].ssk, send_info[1].ratio);

	/* pick the best backup if no other subflow is active */
	if (!nr_active)
		send_info[0].ssk = send_info[1].ssk;

	if (send_info[0].ssk) {
		msk->last_snd = send_info[0].ssk;
		msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
				       sk_stream_wspace(msk->last_snd));
		return msk->last_snd;
	}
	return NULL;
1145 1146
}

1147
static void ssk_check_wmem(struct mptcp_sock *msk)
1148
{
1149 1150
	if (unlikely(!mptcp_is_writeable(msk)))
		mptcp_nospace(msk);
1151 1152
}

M
Mat Martineau 已提交
1153 1154
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
1155
	int mss_now = 0, size_goal = 0, ret = 0;
M
Mat Martineau 已提交
1156
	struct mptcp_sock *msk = mptcp_sk(sk);
1157
	struct page_frag *pfrag;
1158
	size_t copied = 0;
1159
	struct sock *ssk;
P
Paolo Abeni 已提交
1160
	u32 sndbuf;
1161
	bool tx_ok;
1162
	long timeo;
M
Mat Martineau 已提交
1163 1164 1165 1166

	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
		return -EOPNOTSUPP;

1167
	lock_sock(sk);
1168 1169 1170 1171 1172 1173 1174 1175 1176

	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
		ret = sk_stream_wait_connect(sk, &timeo);
		if (ret)
			goto out;
	}

1177
	pfrag = sk_page_frag(sk);
1178
restart:
1179 1180
	mptcp_clean_una(sk);

1181 1182 1183 1184 1185
	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
		ret = -EPIPE;
		goto out;
	}

1186
	__mptcp_flush_join_list(msk);
P
Paolo Abeni 已提交
1187
	ssk = mptcp_subflow_get_send(msk, &sndbuf);
1188 1189 1190
	while (!sk_stream_memory_free(sk) ||
	       !ssk ||
	       !mptcp_page_frag_refill(ssk, pfrag)) {
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
		if (ssk) {
			/* make sure retransmit timer is
			 * running before we wait for memory.
			 *
			 * The retransmit timer might be needed
			 * to make the peer send an up-to-date
			 * MPTCP Ack.
			 */
			mptcp_set_timeout(sk, ssk);
			if (!mptcp_timer_pending(sk))
				mptcp_reset_timer(sk);
		}

1204
		mptcp_nospace(msk);
1205 1206 1207 1208
		ret = sk_stream_wait_memory(sk, &timeo);
		if (ret)
			goto out;

1209 1210
		mptcp_clean_una(sk);

P
Paolo Abeni 已提交
1211
		ssk = mptcp_subflow_get_send(msk, &sndbuf);
1212 1213 1214 1215
		if (list_empty(&msk->conn_list)) {
			ret = -ENOTCONN;
			goto out;
		}
1216 1217
	}

P
Paolo Abeni 已提交
1218 1219 1220 1221 1222
	/* do auto tuning */
	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
	    sndbuf > READ_ONCE(sk->sk_sndbuf))
		WRITE_ONCE(sk->sk_sndbuf, sndbuf);

1223
	pr_debug("conn_list->subflow=%p", ssk);
1224

1225
	lock_sock(ssk);
1226 1227
	tx_ok = msg_data_left(msg);
	while (tx_ok) {
1228
		ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
1229
					 &size_goal);
1230 1231 1232 1233 1234 1235
		if (ret < 0) {
			if (ret == -EAGAIN && timeo > 0) {
				mptcp_set_timeout(sk, ssk);
				release_sock(ssk);
				goto restart;
			}
1236
			break;
1237
		}
1238

1239 1240 1241 1242
		/* burst can be negative, we will try move to the next subflow
		 * at selection time, if possible.
		 */
		msk->snd_burst -= ret;
1243
		copied += ret;
1244

1245 1246 1247 1248
		tx_ok = msg_data_left(msg);
		if (!tx_ok)
			break;

1249
		if (!sk_stream_memory_free(ssk) ||
1250
		    !mptcp_page_frag_refill(ssk, pfrag) ||
1251
		    !mptcp_ext_cache_refill(msk)) {
1252 1253 1254 1255 1256 1257 1258
			tcp_push(ssk, msg->msg_flags, mss_now,
				 tcp_sk(ssk)->nonagle, size_goal);
			mptcp_set_timeout(sk, ssk);
			release_sock(ssk);
			goto restart;
		}

1259 1260 1261 1262 1263
		/* memory is charged to mptcp level socket as well, i.e.
		 * if msg is very large, mptcp socket may run out of buffer
		 * space.  mptcp_clean_una() will release data that has
		 * been acked at mptcp level in the mean time, so there is
		 * a good chance we can continue sending data right away.
1264 1265 1266 1267 1268 1269 1270
		 *
		 * Normally, when the tcp subflow can accept more data, then
		 * so can the MPTCP socket.  However, we need to cope with
		 * peers that might lag behind in their MPTCP-level
		 * acknowledgements, i.e.  data might have been acked at
		 * tcp level only.  So, we must also check the MPTCP socket
		 * limits before we send more data.
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
		 */
		if (unlikely(!sk_stream_memory_free(sk))) {
			tcp_push(ssk, msg->msg_flags, mss_now,
				 tcp_sk(ssk)->nonagle, size_goal);
			mptcp_clean_una(sk);
			if (!sk_stream_memory_free(sk)) {
				/* can't send more for now, need to wait for
				 * MPTCP-level ACKs from peer.
				 *
				 * Wakeup will happen via mptcp_clean_una().
				 */
				mptcp_set_timeout(sk, ssk);
				release_sock(ssk);
1284
				goto restart;
1285 1286
			}
		}
1287 1288
	}

1289
	mptcp_set_timeout(sk, ssk);
1290 1291 1292
	if (copied) {
		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
			 size_goal);
1293 1294 1295 1296

		/* start the timer, if it's not pending */
		if (!mptcp_timer_pending(sk))
			mptcp_reset_timer(sk);
1297
	}
1298 1299

	release_sock(ssk);
1300
out:
1301
	ssk_check_wmem(msk);
1302
	release_sock(sk);
1303
	return copied ? : ret;
M
Mat Martineau 已提交
1304 1305
}

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
static void mptcp_wait_data(struct sock *sk, long *timeo)
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct mptcp_sock *msk = mptcp_sk(sk);

	add_wait_queue(sk_sleep(sk), &wait);
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);

	sk_wait_event(sk, timeo,
		      test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);

	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	remove_wait_queue(sk_sleep(sk), &wait);
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
				struct msghdr *msg,
				size_t len)
{
	struct sock *sk = (struct sock *)msk;
	struct sk_buff *skb;
	int copied = 0;

	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
		u32 offset = MPTCP_SKB_CB(skb)->offset;
		u32 data_len = skb->len - offset;
		u32 count = min_t(size_t, len - copied, data_len);
		int err;

		err = skb_copy_datagram_msg(skb, offset, msg, count);
		if (unlikely(err < 0)) {
			if (!copied)
				return err;
			break;
		}

		copied += count;

		if (count < data_len) {
			MPTCP_SKB_CB(skb)->offset += count;
			break;
		}

		__skb_unlink(skb, &sk->sk_receive_queue);
		__kfree_skb(skb);

		if (copied >= len)
			break;
	}

	return copied;
}

1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
 *
 * Only difference: Use highest rtt estimate of the subflows in use.
 */
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	u32 time, advmss = 1;
	u64 rtt_us, mstamp;

	sock_owned_by_me(sk);

	if (copied <= 0)
		return;

	msk->rcvq_space.copied += copied;

	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);

	rtt_us = msk->rcvq_space.rtt_us;
	if (rtt_us && time < (rtt_us >> 3))
		return;

	rtt_us = 0;
	mptcp_for_each_subflow(msk, subflow) {
		const struct tcp_sock *tp;
		u64 sf_rtt_us;
		u32 sf_advmss;

		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));

		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
		sf_advmss = READ_ONCE(tp->advmss);

		rtt_us = max(sf_rtt_us, rtt_us);
		advmss = max(sf_advmss, advmss);
	}

	msk->rcvq_space.rtt_us = rtt_us;
	if (time < (rtt_us >> 3) || rtt_us == 0)
		return;

	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
		goto new_measure;

	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
		int rcvmem, rcvbuf;
		u64 rcvwin, grow;

		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;

		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);

		do_div(grow, msk->rcvq_space.space);
		rcvwin += (grow << 1);

		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
		while (tcp_win_from_space(sk, rcvmem) < advmss)
			rcvmem += 128;

		do_div(rcvwin, advmss);
		rcvbuf = min_t(u64, rcvwin * rcvmem,
			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);

		if (rcvbuf > sk->sk_rcvbuf) {
			u32 window_clamp;

			window_clamp = tcp_win_from_space(sk, rcvbuf);
			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);

			/* Make subflows follow along.  If we do not do this, we
			 * get drops at subflow level if skbs can't be moved to
			 * the mptcp rx queue fast enough (announced rcv_win can
			 * exceed ssk->sk_rcvbuf).
			 */
			mptcp_for_each_subflow(msk, subflow) {
				struct sock *ssk;
1439
				bool slow;
1440 1441

				ssk = mptcp_subflow_tcp_sock(subflow);
1442
				slow = lock_sock_fast(ssk);
1443 1444
				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
				tcp_sk(ssk)->window_clamp = window_clamp;
1445 1446
				tcp_cleanup_rbuf(ssk, 1);
				unlock_sock_fast(ssk, slow);
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
			}
		}
	}

	msk->rcvq_space.space = msk->rcvq_space.copied;
new_measure:
	msk->rcvq_space.copied = 0;
	msk->rcvq_space.time = mstamp;
}

1457 1458 1459 1460 1461
static bool __mptcp_move_skbs(struct mptcp_sock *msk)
{
	unsigned int moved = 0;
	bool done;

1462 1463 1464 1465 1466
	/* avoid looping forever below on racing close */
	if (((struct sock *)msk)->sk_state == TCP_CLOSE)
		return false;

	__mptcp_flush_join_list(msk);
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	do {
		struct sock *ssk = mptcp_subflow_recv_lookup(msk);

		if (!ssk)
			break;

		lock_sock(ssk);
		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
		release_sock(ssk);
	} while (!done);

1478 1479 1480 1481 1482
	if (mptcp_ofo_queue(msk) || moved > 0) {
		mptcp_check_data_fin((struct sock *)msk);
		return true;
	}
	return false;
1483 1484
}

M
Mat Martineau 已提交
1485 1486 1487 1488
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
			 int nonblock, int flags, int *addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1489
	int copied = 0;
1490 1491
	int target;
	long timeo;
M
Mat Martineau 已提交
1492 1493 1494 1495

	if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
		return -EOPNOTSUPP;

1496
	lock_sock(sk);
1497 1498 1499 1500
	timeo = sock_rcvtimeo(sk, nonblock);

	len = min_t(size_t, len, INT_MAX);
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1501
	__mptcp_flush_join_list(msk);
1502

1503
	while (len > (size_t)copied) {
1504 1505
		int bytes_read;

1506 1507 1508 1509 1510 1511
		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
		if (unlikely(bytes_read < 0)) {
			if (!copied)
				copied = bytes_read;
			goto out_err;
		}
1512

1513
		copied += bytes_read;
1514

1515 1516 1517
		if (skb_queue_empty(&sk->sk_receive_queue) &&
		    __mptcp_move_skbs(msk))
			continue;
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537

		/* only the master socket status is relevant here. The exit
		 * conditions mirror closely tcp_recvmsg()
		 */
		if (copied >= target)
			break;

		if (copied) {
			if (sk->sk_err ||
			    sk->sk_state == TCP_CLOSE ||
			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
			    !timeo ||
			    signal_pending(current))
				break;
		} else {
			if (sk->sk_err) {
				copied = sock_error(sk);
				break;
			}

1538 1539 1540
			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
				mptcp_check_for_eof(msk);

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
			if (sk->sk_shutdown & RCV_SHUTDOWN)
				break;

			if (sk->sk_state == TCP_CLOSE) {
				copied = -ENOTCONN;
				break;
			}

			if (!timeo) {
				copied = -EAGAIN;
				break;
			}

			if (signal_pending(current)) {
				copied = sock_intr_errno(timeo);
				break;
			}
		}

		pr_debug("block timeout %ld", timeo);
		mptcp_wait_data(sk, &timeo);
1562 1563
	}

1564 1565
	if (skb_queue_empty(&sk->sk_receive_queue)) {
		/* entire backlog drained, clear DATA_READY. */
1566
		clear_bit(MPTCP_DATA_READY, &msk->flags);
1567

1568 1569
		/* .. race-breaker: ssk might have gotten new data
		 * after last __mptcp_move_skbs() returned false.
1570
		 */
1571
		if (unlikely(__mptcp_move_skbs(msk)))
1572
			set_bit(MPTCP_DATA_READY, &msk->flags);
1573 1574 1575
	} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
		/* data to read but mptcp_wait_data() cleared DATA_READY */
		set_bit(MPTCP_DATA_READY, &msk->flags);
1576
	}
1577
out_err:
1578 1579 1580
	pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
		 msk, test_bit(MPTCP_DATA_READY, &msk->flags),
		 skb_queue_empty(&sk->sk_receive_queue), copied);
1581 1582
	mptcp_rcv_space_adjust(msk, copied);

1583
	release_sock(sk);
1584 1585 1586
	return copied;
}

1587 1588 1589 1590
static void mptcp_retransmit_handler(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

1591
	if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
1592
		mptcp_stop_timer(sk);
1593 1594 1595 1596 1597
	} else {
		set_bit(MPTCP_WORK_RTX, &msk->flags);
		if (schedule_work(&msk->work))
			sock_hold(sk);
	}
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
}

static void mptcp_retransmit_timer(struct timer_list *t)
{
	struct inet_connection_sock *icsk = from_timer(icsk, t,
						       icsk_retransmit_timer);
	struct sock *sk = &icsk->icsk_inet.sk;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		mptcp_retransmit_handler(sk);
	} else {
		/* delegate our work to tcp_release_cb() */
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
				      &sk->sk_tsq_flags))
			sock_hold(sk);
	}
	bh_unlock_sock(sk);
	sock_put(sk);
}

1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
/* Find an idle subflow.  Return NULL if there is unacked data at tcp
 * level.
 *
 * A backup subflow is returned only if that is the only kind available.
 */
static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *backup = NULL;

	sock_owned_by_me((const struct sock *)msk);

1631 1632 1633
	if (__mptcp_check_fallback(msk))
		return msk->first;

1634 1635 1636
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

1637 1638 1639
		if (!mptcp_subflow_active(subflow))
			continue;

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
		/* still data outstanding at TCP level?  Don't retransmit. */
		if (!tcp_write_queue_empty(ssk))
			return NULL;

		if (subflow->backup) {
			if (!backup)
				backup = ssk;
			continue;
		}

		return ssk;
	}

	return backup;
}

1656 1657 1658 1659 1660 1661 1662 1663
/* subflow sockets can be either outgoing (connect) or incoming
 * (accept).
 *
 * Outgoing subflows use in-kernel sockets.
 * Incoming subflows do not have their own 'struct socket' allocated,
 * so we need to use tcp_close() after detaching them from the mptcp
 * parent socket.
 */
1664 1665 1666
void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
		       struct mptcp_subflow_context *subflow,
		       long timeout)
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
{
	struct socket *sock = READ_ONCE(ssk->sk_socket);

	list_del(&subflow->node);

	if (sock && sock != sk->sk_socket) {
		/* outgoing subflow */
		sock_release(sock);
	} else {
		/* incoming subflow */
		tcp_close(ssk, timeout);
	}
M
Mat Martineau 已提交
1679 1680
}

P
Paolo Abeni 已提交
1681 1682 1683 1684 1685
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
{
	return 0;
}

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
static void pm_work(struct mptcp_sock *msk)
{
	struct mptcp_pm_data *pm = &msk->pm;

	spin_lock_bh(&msk->pm.lock);

	pr_debug("msk=%p status=%x", msk, pm->status);
	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
		mptcp_pm_nl_add_addr_received(msk);
	}
1697 1698 1699 1700
	if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
		pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
		mptcp_pm_nl_rm_addr_received(msk);
	}
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
	if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
		pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
		mptcp_pm_nl_fully_established(msk);
	}
	if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
		pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
		mptcp_pm_nl_subflow_established(msk);
	}

	spin_unlock_bh(&msk->pm.lock);
}

P
Paolo Abeni 已提交
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
static void __mptcp_close_subflow(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow, *tmp;

	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		if (inet_sk_state_load(ssk) != TCP_CLOSE)
			continue;

		__mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
	}
}

P
Paolo Abeni 已提交
1727 1728 1729
static void mptcp_worker(struct work_struct *work)
{
	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1730
	struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1731
	int orig_len, orig_offset, mss_now = 0, size_goal = 0;
1732 1733 1734
	struct mptcp_data_frag *dfrag;
	u64 orig_write_seq;
	size_t copied = 0;
1735 1736 1737
	struct msghdr msg = {
		.msg_flags = MSG_DONTWAIT,
	};
1738
	long timeo = 0;
P
Paolo Abeni 已提交
1739 1740

	lock_sock(sk);
1741
	mptcp_clean_una(sk);
1742
	mptcp_check_data_fin_ack(sk);
1743
	__mptcp_flush_join_list(msk);
P
Paolo Abeni 已提交
1744 1745 1746
	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
		__mptcp_close_subflow(msk);

1747
	__mptcp_move_skbs(msk);
1748

1749 1750 1751
	if (msk->pm.status)
		pm_work(msk);

1752 1753 1754
	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
		mptcp_check_for_eof(msk);

1755 1756
	mptcp_check_data_fin(sk);

1757 1758 1759 1760 1761 1762 1763
	if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
		goto unlock;

	dfrag = mptcp_rtx_head(sk);
	if (!dfrag)
		goto unlock;

1764 1765 1766
	if (!mptcp_ext_cache_refill(msk))
		goto reset_unlock;

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	ssk = mptcp_subflow_get_retrans(msk);
	if (!ssk)
		goto reset_unlock;

	lock_sock(ssk);

	orig_len = dfrag->data_len;
	orig_offset = dfrag->offset;
	orig_write_seq = dfrag->data_seq;
	while (dfrag->data_len > 0) {
1777 1778
		int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
					     &mss_now, &size_goal);
1779 1780 1781
		if (ret < 0)
			break;

1782
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
1783 1784 1785
		copied += ret;
		dfrag->data_len -= ret;
		dfrag->offset += ret;
1786 1787 1788

		if (!mptcp_ext_cache_refill(msk))
			break;
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
	}
	if (copied)
		tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
			 size_goal);

	dfrag->data_seq = orig_write_seq;
	dfrag->offset = orig_offset;
	dfrag->data_len = orig_len;

	mptcp_set_timeout(sk, ssk);
	release_sock(ssk);

reset_unlock:
	if (!mptcp_timer_pending(sk))
		mptcp_reset_timer(sk);

unlock:
P
Paolo Abeni 已提交
1806 1807 1808 1809
	release_sock(sk);
	sock_put(sk);
}

1810
static int __mptcp_init_sock(struct sock *sk)
M
Mat Martineau 已提交
1811
{
1812 1813
	struct mptcp_sock *msk = mptcp_sk(sk);

1814 1815
	spin_lock_init(&msk->join_list_lock);

1816
	INIT_LIST_HEAD(&msk->conn_list);
1817
	INIT_LIST_HEAD(&msk->join_list);
1818
	INIT_LIST_HEAD(&msk->rtx_queue);
1819
	__set_bit(MPTCP_SEND_SPACE, &msk->flags);
P
Paolo Abeni 已提交
1820
	INIT_WORK(&msk->work, mptcp_worker);
1821
	msk->out_of_order_queue = RB_ROOT;
1822

1823
	msk->first = NULL;
P
Paolo Abeni 已提交
1824
	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
1825

1826 1827
	mptcp_pm_data_init(msk);

1828 1829 1830
	/* re-use the csk retrans timer for MPTCP-level retrans */
	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);

M
Mat Martineau 已提交
1831 1832 1833
	return 0;
}

1834 1835
static int mptcp_init_sock(struct sock *sk)
{
1836 1837
	struct net *net = sock_net(sk);
	int ret;
1838

1839 1840 1841 1842
	ret = __mptcp_init_sock(sk);
	if (ret)
		return ret;

1843 1844 1845 1846 1847 1848
	if (!mptcp_is_enabled(net))
		return -ENOPROTOOPT;

	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
		return -ENOMEM;

1849 1850 1851 1852
	ret = __mptcp_socket_create(mptcp_sk(sk));
	if (ret)
		return ret;

1853
	sk_sockets_allocated_inc(sk);
1854
	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
P
Paolo Abeni 已提交
1855
	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
1856

1857 1858 1859 1860 1861 1862 1863 1864
	return 0;
}

static void __mptcp_clear_xmit(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;

1865 1866
	sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);

1867
	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
1868
		dfrag_clear(sk, dfrag);
1869 1870
}

P
Paolo Abeni 已提交
1871 1872 1873 1874 1875 1876 1877 1878
static void mptcp_cancel_work(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (cancel_work_sync(&msk->work))
		sock_put(sk);
}

1879
void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
1880 1881 1882 1883 1884 1885 1886
{
	lock_sock(ssk);

	switch (ssk->sk_state) {
	case TCP_LISTEN:
		if (!(how & RCV_SHUTDOWN))
			break;
1887
		fallthrough;
1888 1889 1890 1891
	case TCP_SYN_SENT:
		tcp_disconnect(ssk, O_NONBLOCK);
		break;
	default:
1892 1893 1894 1895 1896 1897 1898 1899 1900
		if (__mptcp_check_fallback(mptcp_sk(sk))) {
			pr_debug("Fallback");
			ssk->sk_shutdown |= how;
			tcp_shutdown(ssk, how);
		} else {
			pr_debug("Sending DATA_FIN on subflow %p", ssk);
			mptcp_set_timeout(sk, ssk);
			tcp_send_ack(ssk);
		}
1901 1902 1903 1904 1905 1906
		break;
	}

	release_sock(ssk);
}

1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
static const unsigned char new_state[16] = {
	/* current state:     new state:      action:	*/
	[0 /* (Invalid) */] = TCP_CLOSE,
	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
	[TCP_SYN_SENT]      = TCP_CLOSE,
	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
	[TCP_CLOSE]         = TCP_CLOSE,
	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
	[TCP_LAST_ACK]      = TCP_LAST_ACK,
	[TCP_LISTEN]        = TCP_CLOSE,
	[TCP_CLOSING]       = TCP_CLOSING,
	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
};

static int mptcp_close_state(struct sock *sk)
{
	int next = (int)new_state[sk->sk_state];
	int ns = next & TCP_STATE_MASK;

	inet_sk_state_store(sk, ns);

	return next & TCP_ACTION_FIN;
}

1934
static void mptcp_close(struct sock *sk, long timeout)
M
Mat Martineau 已提交
1935
{
1936
	struct mptcp_subflow_context *subflow, *tmp;
M
Mat Martineau 已提交
1937
	struct mptcp_sock *msk = mptcp_sk(sk);
1938
	LIST_HEAD(conn_list);
M
Mat Martineau 已提交
1939

1940
	lock_sock(sk);
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == TCP_LISTEN) {
		inet_sk_state_store(sk, TCP_CLOSE);
		goto cleanup;
	} else if (sk->sk_state == TCP_CLOSE) {
		goto cleanup;
	}

	if (__mptcp_check_fallback(msk)) {
		goto update_state;
	} else if (mptcp_close_state(sk)) {
		pr_debug("Sending DATA_FIN sk=%p", sk);
		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
		WRITE_ONCE(msk->snd_data_fin_enable, 1);

		mptcp_for_each_subflow(msk, subflow) {
			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);

			mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK);
		}
	}
1963

1964 1965 1966
	sk_stream_wait_close(sk, timeout);

update_state:
M
Mat Martineau 已提交
1967 1968
	inet_sk_state_store(sk, TCP_CLOSE);

1969
cleanup:
1970 1971 1972 1973 1974 1975
	/* be sure to always acquire the join list lock, to sync vs
	 * mptcp_finish_join().
	 */
	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
1976 1977
	list_splice_init(&msk->conn_list, &conn_list);

1978 1979
	__mptcp_clear_xmit(sk);

1980 1981 1982
	release_sock(sk);

	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1983 1984
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
		__mptcp_close_ssk(sk, ssk, subflow, timeout);
M
Mat Martineau 已提交
1985 1986
	}

P
Paolo Abeni 已提交
1987 1988
	mptcp_cancel_work(sk);

1989 1990
	__skb_queue_purge(&sk->sk_receive_queue);

1991
	sk_common_release(sk);
M
Mat Martineau 已提交
1992 1993
}

1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
	struct ipv6_pinfo *msk6 = inet6_sk(msk);

	msk->sk_v6_daddr = ssk->sk_v6_daddr;
	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;

	if (msk6 && ssk6) {
		msk6->saddr = ssk6->saddr;
		msk6->flow_label = ssk6->flow_label;
	}
#endif

	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}

2017 2018
static int mptcp_disconnect(struct sock *sk, int flags)
{
2019 2020 2021 2022 2023 2024
	/* Should never be called.
	 * inet_stream_connect() calls ->disconnect, but that
	 * refers to the subflow socket, not the mptcp one.
	 */
	WARN_ON_ONCE(1);
	return 0;
2025 2026
}

2027 2028 2029 2030 2031 2032 2033 2034 2035
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{
	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);

	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
#endif

2036
struct sock *mptcp_sk_clone(const struct sock *sk,
2037
			    const struct mptcp_options_received *mp_opt,
2038
			    struct request_sock *req)
2039
{
P
Paolo Abeni 已提交
2040
	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
2041
	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
P
Paolo Abeni 已提交
2042 2043
	struct mptcp_sock *msk;
	u64 ack_seq;
2044 2045 2046 2047 2048 2049 2050 2051 2052

	if (!nsk)
		return NULL;

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	if (nsk->sk_family == AF_INET6)
		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif

P
Paolo Abeni 已提交
2053 2054 2055 2056 2057 2058
	__mptcp_init_sock(nsk);

	msk = mptcp_sk(nsk);
	msk->local_key = subflow_req->local_key;
	msk->token = subflow_req->token;
	msk->subflow = NULL;
2059
	WRITE_ONCE(msk->fully_established, false);
P
Paolo Abeni 已提交
2060 2061

	msk->write_seq = subflow_req->idsn + 1;
2062
	atomic64_set(&msk->snd_una, msk->write_seq);
2063
	if (mp_opt->mp_capable) {
P
Paolo Abeni 已提交
2064
		msk->can_ack = true;
2065
		msk->remote_key = mp_opt->sndr_key;
P
Paolo Abeni 已提交
2066 2067
		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
		ack_seq++;
2068
		WRITE_ONCE(msk->ack_seq, ack_seq);
P
Paolo Abeni 已提交
2069
	}
2070

2071
	sock_reset_flag(nsk, SOCK_RCU_FREE);
2072 2073
	/* will be fully established after successful MPC subflow creation */
	inet_sk_state_store(nsk, TCP_SYN_RECV);
P
Paolo Abeni 已提交
2074 2075 2076 2077
	bh_unlock_sock(nsk);

	/* keep a single reference */
	__sock_put(nsk);
2078 2079 2080
	return nsk;
}

2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
{
	const struct tcp_sock *tp = tcp_sk(ssk);

	msk->rcvq_space.copied = 0;
	msk->rcvq_space.rtt_us = 0;

	msk->rcvq_space.time = tp->tcp_mstamp;

	/* initial rcv_space offering made to peer */
	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
				      TCP_INIT_CWND * tp->advmss);
	if (msk->rcvq_space.space == 0)
		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
}

2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
				 bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct socket *listener;
	struct sock *newsk;

	listener = __mptcp_nmpc_socket(msk);
	if (WARN_ON_ONCE(!listener)) {
		*err = -EINVAL;
		return NULL;
	}

	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
	newsk = inet_csk_accept(listener->sk, flags, err, kern);
	if (!newsk)
		return NULL;

	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
	if (sk_is_mptcp(newsk)) {
		struct mptcp_subflow_context *subflow;
		struct sock *new_mptcp_sock;
		struct sock *ssk = newsk;

		subflow = mptcp_subflow_ctx(newsk);
P
Paolo Abeni 已提交
2122
		new_mptcp_sock = subflow->conn;
2123

P
Paolo Abeni 已提交
2124 2125 2126 2127 2128 2129
		/* is_mptcp should be false if subflow->conn is missing, see
		 * subflow_syn_recv_sock()
		 */
		if (WARN_ON_ONCE(!new_mptcp_sock)) {
			tcp_sk(newsk)->is_mptcp = 0;
			return newsk;
2130 2131
		}

P
Paolo Abeni 已提交
2132 2133
		/* acquire the 2nd reference for the owning socket */
		sock_hold(new_mptcp_sock);
2134

P
Paolo Abeni 已提交
2135 2136
		local_bh_disable();
		bh_lock_sock(new_mptcp_sock);
2137
		msk = mptcp_sk(new_mptcp_sock);
2138
		msk->first = newsk;
2139 2140 2141 2142 2143

		newsk = new_mptcp_sock;
		mptcp_copy_inaddrs(newsk, ssk);
		list_add(&subflow->node, &msk->conn_list);

2144
		mptcp_rcv_space_init(msk, ssk);
2145
		bh_unlock_sock(new_mptcp_sock);
2146 2147

		__MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
2148
		local_bh_enable();
2149 2150 2151
	} else {
		MPTCP_INC_STATS(sock_net(sk),
				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
2152 2153 2154 2155 2156
	}

	return newsk;
}

2157 2158 2159 2160 2161 2162 2163
void mptcp_destroy_common(struct mptcp_sock *msk)
{
	skb_rbtree_purge(&msk->out_of_order_queue);
	mptcp_token_destroy(msk);
	mptcp_pm_free_anno_list(msk);
}

2164 2165
static void mptcp_destroy(struct sock *sk)
{
2166 2167 2168 2169
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (msk->cached_ext)
		__skb_ext_put(msk->cached_ext);
2170

2171
	mptcp_destroy_common(msk);
2172
	sk_sockets_allocated_dec(sk);
2173 2174
}

2175
static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
2176
				       sockptr_t optval, unsigned int optlen)
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
{
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int ret;

	switch (optname) {
	case SO_REUSEPORT:
	case SO_REUSEADDR:
		lock_sock(sk);
		ssock = __mptcp_nmpc_socket(msk);
		if (!ssock) {
			release_sock(sk);
			return -EINVAL;
		}

2192
		ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
		if (ret == 0) {
			if (optname == SO_REUSEPORT)
				sk->sk_reuseport = ssock->sk->sk_reuseport;
			else if (optname == SO_REUSEADDR)
				sk->sk_reuse = ssock->sk->sk_reuse;
		}
		release_sock(sk);
		return ret;
	}

2203
	return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
2204 2205
}

2206
static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
2207
			       sockptr_t optval, unsigned int optlen)
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
{
	struct sock *sk = (struct sock *)msk;
	int ret = -EOPNOTSUPP;
	struct socket *ssock;

	switch (optname) {
	case IPV6_V6ONLY:
		lock_sock(sk);
		ssock = __mptcp_nmpc_socket(msk);
		if (!ssock) {
			release_sock(sk);
			return -EINVAL;
		}

		ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
		if (ret == 0)
			sk->sk_ipv6only = ssock->sk->sk_ipv6only;

		release_sock(sk);
		break;
	}

	return ret;
}

2233
static int mptcp_setsockopt(struct sock *sk, int level, int optname,
2234
			    sockptr_t optval, unsigned int optlen)
2235 2236
{
	struct mptcp_sock *msk = mptcp_sk(sk);
2237
	struct sock *ssk;
2238 2239 2240

	pr_debug("msk=%p", msk);

2241
	if (level == SOL_SOCKET)
2242
		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
2243

2244
	/* @@ the meaning of setsockopt() when the socket is connected and
2245 2246 2247 2248
	 * there are multiple subflows is not yet defined. It is up to the
	 * MPTCP-level socket to configure the subflows until the subflow
	 * is in TCP fallback, when TCP socket options are passed through
	 * to the one remaining subflow.
2249 2250
	 */
	lock_sock(sk);
2251
	ssk = __mptcp_tcp_fallback(msk);
2252
	release_sock(sk);
2253 2254
	if (ssk)
		return tcp_setsockopt(ssk, level, optname, optval, optlen);
2255

2256 2257 2258
	if (level == SOL_IPV6)
		return mptcp_setsockopt_v6(msk, optname, optval, optlen);

2259
	return -EOPNOTSUPP;
2260 2261 2262
}

static int mptcp_getsockopt(struct sock *sk, int level, int optname,
2263
			    char __user *optval, int __user *option)
2264 2265
{
	struct mptcp_sock *msk = mptcp_sk(sk);
2266
	struct sock *ssk;
2267 2268 2269

	pr_debug("msk=%p", msk);

2270 2271 2272 2273 2274
	/* @@ the meaning of setsockopt() when the socket is connected and
	 * there are multiple subflows is not yet defined. It is up to the
	 * MPTCP-level socket to configure the subflows until the subflow
	 * is in TCP fallback, when socket options are passed through
	 * to the one remaining subflow.
2275 2276
	 */
	lock_sock(sk);
2277
	ssk = __mptcp_tcp_fallback(msk);
2278
	release_sock(sk);
2279 2280
	if (ssk)
		return tcp_getsockopt(ssk, level, optname, optval, option);
2281

2282
	return -EOPNOTSUPP;
2283 2284
}

2285 2286
#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
			    TCPF_WRITE_TIMER_DEFERRED)
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301

/* this is very alike tcp_release_cb() but we must handle differently a
 * different set of events
 */
static void mptcp_release_cb(struct sock *sk)
{
	unsigned long flags, nflags;

	do {
		flags = sk->sk_tsq_flags;
		if (!(flags & MPTCP_DEFERRED_ALL))
			return;
		nflags = flags & ~MPTCP_DEFERRED_ALL;
	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);

2302 2303
	sock_release_ownership(sk);

2304 2305 2306 2307 2308 2309 2310 2311
	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
		struct mptcp_sock *msk = mptcp_sk(sk);
		struct sock *ssk;

		ssk = mptcp_subflow_recv_lookup(msk);
		if (!ssk || !schedule_work(&msk->work))
			__sock_put(sk);
	}
2312 2313 2314 2315 2316

	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
		mptcp_retransmit_handler(sk);
		__sock_put(sk);
	}
2317 2318
}

P
Paolo Abeni 已提交
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
static int mptcp_hash(struct sock *sk)
{
	/* should never be called,
	 * we hash the TCP subflows not the master socket
	 */
	WARN_ON_ONCE(1);
	return 0;
}

static void mptcp_unhash(struct sock *sk)
{
	/* called from sk_common_release(), but nothing to do here */
}

2333
static int mptcp_get_port(struct sock *sk, unsigned short snum)
M
Mat Martineau 已提交
2334 2335
{
	struct mptcp_sock *msk = mptcp_sk(sk);
2336
	struct socket *ssock;
M
Mat Martineau 已提交
2337

2338 2339 2340 2341
	ssock = __mptcp_nmpc_socket(msk);
	pr_debug("msk=%p, subflow=%p", msk, ssock);
	if (WARN_ON_ONCE(!ssock))
		return -EINVAL;
M
Mat Martineau 已提交
2342

2343 2344
	return inet_csk_get_port(ssock->sk, snum);
}
M
Mat Martineau 已提交
2345

2346 2347 2348 2349 2350
void mptcp_finish_connect(struct sock *ssk)
{
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk;
	struct sock *sk;
2351
	u64 ack_seq;
M
Mat Martineau 已提交
2352

2353 2354 2355 2356
	subflow = mptcp_subflow_ctx(ssk);
	sk = subflow->conn;
	msk = mptcp_sk(sk);

2357 2358
	pr_debug("msk=%p, token=%u", sk, subflow->token);

2359 2360
	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
	ack_seq++;
2361 2362
	subflow->map_seq = ack_seq;
	subflow->map_subflow_seq = 1;
2363

2364 2365 2366 2367 2368
	/* the socket is not connected yet, no msk/subflow ops can access/race
	 * accessing the field below
	 */
	WRITE_ONCE(msk->remote_key, subflow->remote_key);
	WRITE_ONCE(msk->local_key, subflow->local_key);
2369 2370
	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
	WRITE_ONCE(msk->ack_seq, ack_seq);
2371
	WRITE_ONCE(msk->can_ack, 1);
2372
	atomic64_set(&msk->snd_una, msk->write_seq);
2373 2374

	mptcp_pm_new_connection(msk, 0);
2375 2376

	mptcp_rcv_space_init(msk, ssk);
M
Mat Martineau 已提交
2377 2378
}

2379 2380 2381 2382 2383 2384 2385 2386 2387
static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
{
	write_lock_bh(&sk->sk_callback_lock);
	rcu_assign_pointer(sk->sk_wq, &parent->wq);
	sk_set_socket(sk, parent);
	sk->sk_uid = SOCK_INODE(parent)->i_uid;
	write_unlock_bh(&sk->sk_callback_lock);
}

2388 2389 2390 2391 2392 2393
bool mptcp_finish_join(struct sock *sk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
	struct sock *parent = (void *)msk;
	struct socket *parent_sock;
2394
	bool ret;
2395 2396 2397 2398

	pr_debug("msk=%p, subflow=%p", msk, subflow);

	/* mptcp socket already closing? */
2399
	if (!mptcp_is_fully_established(parent))
2400 2401 2402 2403 2404
		return false;

	if (!msk->pm.server_side)
		return true;

2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
	if (!mptcp_pm_allow_new_subflow(msk))
		return false;

	/* active connections are already on conn_list, and we can't acquire
	 * msk lock here.
	 * use the join list lock as synchronization point and double-check
	 * msk status to avoid racing with mptcp_close()
	 */
	spin_lock_bh(&msk->join_list_lock);
	ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
	if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
		list_add_tail(&subflow->node, &msk->join_list);
	spin_unlock_bh(&msk->join_list_lock);
	if (!ret)
		return false;

	/* attach to msk socket only after we are sure he will deal with us
	 * at close time
	 */
2424 2425 2426
	parent_sock = READ_ONCE(parent->sk_socket);
	if (parent_sock && !sk->sk_socket)
		mptcp_sock_graft(sk, parent_sock);
2427
	subflow->map_seq = READ_ONCE(msk->ack_seq);
2428
	return true;
2429 2430
}

2431 2432 2433 2434 2435 2436 2437
static bool mptcp_memory_free(const struct sock *sk, int wake)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
}

M
Mat Martineau 已提交
2438 2439 2440 2441
static struct proto mptcp_prot = {
	.name		= "MPTCP",
	.owner		= THIS_MODULE,
	.init		= mptcp_init_sock,
2442
	.disconnect	= mptcp_disconnect,
M
Mat Martineau 已提交
2443
	.close		= mptcp_close,
2444
	.accept		= mptcp_accept,
2445 2446
	.setsockopt	= mptcp_setsockopt,
	.getsockopt	= mptcp_getsockopt,
M
Mat Martineau 已提交
2447
	.shutdown	= tcp_shutdown,
2448
	.destroy	= mptcp_destroy,
M
Mat Martineau 已提交
2449 2450
	.sendmsg	= mptcp_sendmsg,
	.recvmsg	= mptcp_recvmsg,
2451
	.release_cb	= mptcp_release_cb,
P
Paolo Abeni 已提交
2452 2453
	.hash		= mptcp_hash,
	.unhash		= mptcp_unhash,
2454
	.get_port	= mptcp_get_port,
2455 2456 2457
	.sockets_allocated	= &mptcp_sockets_allocated,
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2458
	.stream_memory_free	= mptcp_memory_free,
2459 2460
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_mem	= sysctl_tcp_mem,
M
Mat Martineau 已提交
2461
	.obj_size	= sizeof(struct mptcp_sock),
P
Paolo Abeni 已提交
2462
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
M
Mat Martineau 已提交
2463 2464 2465
	.no_autobind	= true,
};

2466 2467 2468 2469
static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
2470
	int err;
2471 2472

	lock_sock(sock->sk);
2473 2474 2475
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2476 2477 2478 2479
		goto unlock;
	}

	err = ssock->ops->bind(ssock, uaddr, addr_len);
2480 2481
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
2482 2483 2484 2485 2486 2487

unlock:
	release_sock(sock->sk);
	return err;
}

2488 2489 2490 2491 2492 2493 2494
static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
					 struct mptcp_subflow_context *subflow)
{
	subflow->request_mptcp = 0;
	__mptcp_do_fallback(msk);
}

2495 2496 2497 2498
static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
				int addr_len, int flags)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
P
Paolo Abeni 已提交
2499
	struct mptcp_subflow_context *subflow;
2500 2501 2502 2503
	struct socket *ssock;
	int err;

	lock_sock(sock->sk);
P
Paolo Abeni 已提交
2504 2505 2506 2507 2508 2509 2510 2511
	if (sock->state != SS_UNCONNECTED && msk->subflow) {
		/* pending connection or invalid state, let existing subflow
		 * cope with that
		 */
		ssock = msk->subflow;
		goto do_connect;
	}

2512 2513 2514
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2515 2516 2517
		goto unlock;
	}

2518 2519
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
P
Paolo Abeni 已提交
2520
	subflow = mptcp_subflow_ctx(ssock->sk);
2521 2522 2523 2524 2525
#ifdef CONFIG_TCP_MD5SIG
	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
	 * TCP option space.
	 */
	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
2526
		mptcp_subflow_early_fallback(msk, subflow);
2527
#endif
P
Paolo Abeni 已提交
2528
	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
2529
		mptcp_subflow_early_fallback(msk, subflow);
2530

P
Paolo Abeni 已提交
2531
do_connect:
2532
	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
P
Paolo Abeni 已提交
2533 2534 2535 2536 2537
	sock->state = ssock->state;

	/* on successful connect, the msk state will be moved to established by
	 * subflow_finish_connect()
	 */
2538
	if (!err || err == -EINPROGRESS)
P
Paolo Abeni 已提交
2539 2540 2541
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
	else
		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2542 2543 2544 2545 2546 2547

unlock:
	release_sock(sock->sk);
	return err;
}

2548 2549 2550 2551 2552 2553 2554 2555 2556
static int mptcp_listen(struct socket *sock, int backlog)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
2557 2558 2559
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2560 2561 2562
		goto unlock;
	}

2563 2564
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_LISTEN);
2565 2566
	sock_set_flag(sock->sk, SOCK_RCU_FREE);

2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
	err = ssock->ops->listen(ssock, backlog);
	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);

unlock:
	release_sock(sock->sk);
	return err;
}

static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
			       int flags, bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
	if (sock->sk->sk_state != TCP_LISTEN)
		goto unlock_fail;

	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock)
		goto unlock_fail;

P
Paolo Abeni 已提交
2594
	clear_bit(MPTCP_DATA_READY, &msk->flags);
2595 2596 2597 2598
	sock_hold(ssock->sk);
	release_sock(sock->sk);

	err = ssock->ops->accept(sock, newsock, flags, kern);
2599
	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
2600 2601 2602 2603 2604 2605
		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
		struct mptcp_subflow_context *subflow;

		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
		 * This is needed so NOSPACE flag can be set from tcp stack.
		 */
2606
		__mptcp_flush_join_list(msk);
2607
		mptcp_for_each_subflow(msk, subflow) {
2608 2609 2610 2611 2612 2613 2614
			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

			if (!ssk->sk_socket)
				mptcp_sock_graft(ssk, newsock);
		}
	}

P
Paolo Abeni 已提交
2615 2616
	if (inet_csk_listen_poll(ssock->sk))
		set_bit(MPTCP_DATA_READY, &msk->flags);
2617 2618 2619 2620 2621 2622 2623 2624
	sock_put(ssock->sk);
	return err;

unlock_fail:
	release_sock(sock->sk);
	return -EINVAL;
}

P
Paolo Abeni 已提交
2625 2626 2627 2628 2629 2630
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
{
	return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
	       0;
}

2631 2632 2633
static __poll_t mptcp_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait)
{
2634
	struct sock *sk = sock->sk;
2635
	struct mptcp_sock *msk;
2636
	__poll_t mask = 0;
P
Paolo Abeni 已提交
2637
	int state;
2638

2639 2640 2641
	msk = mptcp_sk(sk);
	sock_poll_wait(file, sock, wait);

P
Paolo Abeni 已提交
2642
	state = inet_sk_state_load(sk);
2643
	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
P
Paolo Abeni 已提交
2644 2645 2646 2647 2648
	if (state == TCP_LISTEN)
		return mptcp_check_readable(msk);

	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
		mask |= mptcp_check_readable(msk);
2649
		if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
P
Paolo Abeni 已提交
2650 2651
			mask |= EPOLLOUT | EPOLLWRNORM;
	}
2652 2653 2654
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;

2655 2656 2657
	return mask;
}

2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
static int mptcp_shutdown(struct socket *sock, int how)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct mptcp_subflow_context *subflow;
	int ret = 0;

	pr_debug("sk=%p, how=%d", msk, how);

	lock_sock(sock->sk);

	how++;
	if ((how & ~SHUTDOWN_MASK) || !how) {
		ret = -EINVAL;
		goto out_unlock;
	}

	if (sock->state == SS_CONNECTING) {
		if ((1 << sock->sk->sk_state) &
		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
			sock->state = SS_DISCONNECTING;
		else
			sock->state = SS_CONNECTED;
	}

2682 2683 2684 2685
	/* If we've already sent a FIN, or it's a closed state, skip this. */
	if (__mptcp_check_fallback(msk)) {
		if (how == SHUT_WR || how == SHUT_RDWR)
			inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
2686

2687 2688
		mptcp_for_each_subflow(msk, subflow) {
			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2689

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706
			mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
		}
	} else if ((how & SEND_SHUTDOWN) &&
		   ((1 << sock->sk->sk_state) &
		    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
		     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) &&
		   mptcp_close_state(sock->sk)) {
		__mptcp_flush_join_list(msk);

		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
		WRITE_ONCE(msk->snd_data_fin_enable, 1);

		mptcp_for_each_subflow(msk, subflow) {
			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);

			mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
		}
2707 2708
	}

2709 2710 2711
	/* Wake up anyone sleeping in poll. */
	sock->sk->sk_state_change(sock->sk);

2712 2713 2714 2715 2716 2717
out_unlock:
	release_sock(sock->sk);

	return ret;
}

2718 2719 2720 2721 2722 2723 2724 2725
static const struct proto_ops mptcp_stream_ops = {
	.family		   = PF_INET,
	.owner		   = THIS_MODULE,
	.release	   = inet_release,
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
2726
	.getname	   = inet_getname,
2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
	.poll		   = mptcp_poll,
	.ioctl		   = inet_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
	.shutdown	   = mptcp_shutdown,
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet_sendmsg,
	.recvmsg	   = inet_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
};
2739

M
Mat Martineau 已提交
2740 2741 2742 2743
static struct inet_protosw mptcp_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_prot,
2744 2745
	.ops		= &mptcp_stream_ops,
	.flags		= INET_PROTOSW_ICSK,
M
Mat Martineau 已提交
2746 2747
};

2748
void __init mptcp_proto_init(void)
M
Mat Martineau 已提交
2749
{
2750 2751
	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;

2752 2753 2754
	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
		panic("Failed to allocate MPTCP pcpu counter\n");

2755
	mptcp_subflow_init();
2756
	mptcp_pm_init();
P
Paolo Abeni 已提交
2757
	mptcp_token_init();
2758

M
Mat Martineau 已提交
2759 2760 2761 2762
	if (proto_register(&mptcp_prot, 1) != 0)
		panic("Failed to register MPTCP proto.\n");

	inet_register_protosw(&mptcp_protosw);
2763 2764

	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
M
Mat Martineau 已提交
2765 2766 2767
}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2768 2769 2770 2771 2772 2773 2774 2775
static const struct proto_ops mptcp_v6_stream_ops = {
	.family		   = PF_INET6,
	.owner		   = THIS_MODULE,
	.release	   = inet6_release,
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
2776
	.getname	   = inet6_getname,
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788
	.poll		   = mptcp_poll,
	.ioctl		   = inet6_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
	.shutdown	   = mptcp_shutdown,
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet6_sendmsg,
	.recvmsg	   = inet6_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
#ifdef CONFIG_COMPAT
2789
	.compat_ioctl	   = inet6_compat_ioctl,
2790 2791 2792
#endif
};

M
Mat Martineau 已提交
2793 2794
static struct proto mptcp_v6_prot;

2795 2796 2797 2798 2799 2800
static void mptcp_v6_destroy(struct sock *sk)
{
	mptcp_destroy(sk);
	inet6_destroy_sock(sk);
}

M
Mat Martineau 已提交
2801 2802 2803 2804
static struct inet_protosw mptcp_v6_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_v6_prot,
2805
	.ops		= &mptcp_v6_stream_ops,
M
Mat Martineau 已提交
2806 2807 2808
	.flags		= INET_PROTOSW_ICSK,
};

2809
int __init mptcp_proto_v6_init(void)
M
Mat Martineau 已提交
2810 2811 2812 2813 2814 2815
{
	int err;

	mptcp_v6_prot = mptcp_prot;
	strcpy(mptcp_v6_prot.name, "MPTCPv6");
	mptcp_v6_prot.slab = NULL;
2816
	mptcp_v6_prot.destroy = mptcp_v6_destroy;
2817
	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
M
Mat Martineau 已提交
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829

	err = proto_register(&mptcp_v6_prot, 1);
	if (err)
		return err;

	err = inet6_register_protosw(&mptcp_v6_protosw);
	if (err)
		proto_unregister(&mptcp_v6_prot);

	return err;
}
#endif