protocol.c 53.9 KB
Newer Older
M
Mat Martineau 已提交
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
 *
 * Copyright (c) 2017 - 2019, Intel Corporation.
 */

#define pr_fmt(fmt) "MPTCP: " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
12 13
#include <linux/sched/signal.h>
#include <linux/atomic.h>
M
Mat Martineau 已提交
14 15 16 17 18
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
#include <net/tcp.h>
19 20 21
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
#endif
M
Mat Martineau 已提交
22 23
#include <net/mptcp.h>
#include "protocol.h"
24
#include "mib.h"
M
Mat Martineau 已提交
25

26 27
#define MPTCP_SAME_STATE TCP_MAX_STATES

28 29 30 31 32 33 34
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct mptcp6_sock {
	struct mptcp_sock msk;
	struct ipv6_pinfo np;
};
#endif

35 36 37 38 39 40
struct mptcp_skb_cb {
	u32 offset;
};

#define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))

41 42
static struct percpu_counter mptcp_sockets_allocated;

43 44 45 46 47 48
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
 * completed yet or has failed, return the subflow socket.
 * Otherwise return NULL.
 */
static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
{
49
	if (!msk->subflow || READ_ONCE(msk->can_ack))
50 51 52 53 54
		return NULL;

	return msk->subflow;
}

55
static bool mptcp_is_tcpsk(struct sock *sk)
F
Florian Westphal 已提交
56 57 58 59 60 61 62 63 64 65 66 67
{
	struct socket *sock = sk->sk_socket;

	if (unlikely(sk->sk_prot == &tcp_prot)) {
		/* we are being invoked after mptcp_accept() has
		 * accepted a non-mp-capable flow: sk is a tcp_sk,
		 * not an mptcp one.
		 *
		 * Hand the socket over to tcp so all further socket ops
		 * bypass mptcp.
		 */
		sock->ops = &inet_stream_ops;
68
		return true;
F
Florian Westphal 已提交
69 70 71
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
		sock->ops = &inet6_stream_ops;
72
		return true;
F
Florian Westphal 已提交
73 74 75
#endif
	}

76
	return false;
F
Florian Westphal 已提交
77 78
}

79
static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
80 81 82
{
	sock_owned_by_me((const struct sock *)msk);

83
	if (likely(!__mptcp_check_fallback(msk)))
84 85
		return NULL;

86
	return msk->first;
87 88
}

89
static int __mptcp_socket_create(struct mptcp_sock *msk)
90 91 92 93 94 95 96 97
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int err;

	err = mptcp_subflow_create_socket(sk, &ssock);
	if (err)
98
		return err;
99

100
	msk->first = ssock->sk;
101 102
	msk->subflow = ssock;
	subflow = mptcp_subflow_ctx(ssock->sk);
103
	list_add(&subflow->node, &msk->conn_list);
104 105
	subflow->request_mptcp = 1;

106 107 108 109 110
	/* accept() will wait on first subflow sk_wq, and we always wakes up
	 * via msk->sk_socket
	 */
	RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);

111
	return 0;
112 113
}

114 115 116 117 118
static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
			     struct sk_buff *skb,
			     unsigned int offset, size_t copy_len)
{
	struct sock *sk = (struct sock *)msk;
119
	struct sk_buff *tail;
120 121 122

	__skb_unlink(skb, &ssk->sk_receive_queue);

123 124
	skb_ext_reset(skb);
	skb_orphan(skb);
125
	msk->ack_seq += copy_len;
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

	tail = skb_peek_tail(&sk->sk_receive_queue);
	if (offset == 0 && tail) {
		bool fragstolen;
		int delta;

		if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
			kfree_skb_partial(skb, fragstolen);
			atomic_add(delta, &sk->sk_rmem_alloc);
			sk_mem_charge(sk, delta);
			return;
		}
	}

	skb_set_owner_r(skb, sk);
	__skb_queue_tail(&sk->sk_receive_queue, skb);
142 143 144
	MPTCP_SKB_CB(skb)->offset = offset;
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
/* both sockets must be locked */
static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
				    struct sock *ssk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
	u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);

	/* revalidate data sequence number.
	 *
	 * mptcp_subflow_data_available() is usually called
	 * without msk lock.  Its unlikely (but possible)
	 * that msk->ack_seq has been advanced since the last
	 * call found in-sequence data.
	 */
	if (likely(dsn == msk->ack_seq))
		return true;

	subflow->data_avail = 0;
	return mptcp_subflow_data_available(ssk);
}

166 167 168 169 170
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
					   struct sock *ssk,
					   unsigned int *bytes)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
171
	struct sock *sk = (struct sock *)msk;
172 173 174 175
	unsigned int moved = 0;
	bool more_data_avail;
	struct tcp_sock *tp;
	bool done = false;
176

177 178 179 180 181
	if (!mptcp_subflow_dsn_valid(msk, ssk)) {
		*bytes = 0;
		return false;
	}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	tp = tcp_sk(ssk);
	do {
		u32 map_remaining, offset;
		u32 seq = tp->copied_seq;
		struct sk_buff *skb;
		bool fin;

		/* try to move as much data as available */
		map_remaining = subflow->map_data_len -
				mptcp_subflow_get_map_offset(subflow);

		skb = skb_peek(&ssk->sk_receive_queue);
		if (!skb)
			break;

197 198 199 200 201 202 203 204 205
		if (__mptcp_check_fallback(msk)) {
			/* if we are running under the workqueue, TCP could have
			 * collapsed skbs between dummy map creation and now
			 * be sure to adjust the size
			 */
			map_remaining = skb->len;
			subflow->map_data_len = skb->len;
		}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
		offset = seq - TCP_SKB_CB(skb)->seq;
		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
		if (fin) {
			done = true;
			seq++;
		}

		if (offset < skb->len) {
			size_t len = skb->len - offset;

			if (tp->urg_data)
				done = true;

			__mptcp_move_skb(msk, ssk, skb, offset, len);
			seq += len;
			moved += len;

			if (WARN_ON_ONCE(map_remaining < len))
				break;
		} else {
			WARN_ON_ONCE(!fin);
			sk_eat_skb(ssk, skb);
			done = true;
		}

		WRITE_ONCE(tp->copied_seq, seq);
		more_data_avail = mptcp_subflow_data_available(ssk);
233 234 235 236 237

		if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
			done = true;
			break;
		}
238 239 240 241 242 243 244
	} while (more_data_avail);

	*bytes = moved;

	return done;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/* In most cases we will be able to lock the mptcp socket.  If its already
 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 */
static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
	struct sock *sk = (struct sock *)msk;
	unsigned int moved = 0;

	if (READ_ONCE(sk->sk_lock.owned))
		return false;

	if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
		return false;

	/* must re-check after taking the lock */
	if (!READ_ONCE(sk->sk_lock.owned))
		__mptcp_move_skbs_from_subflow(msk, ssk, &moved);

	spin_unlock_bh(&sk->sk_lock.slock);

	return moved > 0;
}

void mptcp_data_ready(struct sock *sk, struct sock *ssk)
269 270 271 272
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	set_bit(MPTCP_DATA_READY, &msk->flags);
273

274 275 276 277
	if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
	    move_skbs_to_msk(msk, ssk))
		goto wake;

278 279 280 281
	/* don't schedule if mptcp sk is (still) over limit */
	if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
		goto wake;

282 283 284 285
	/* mptcp socket is owned, release_cb should retry */
	if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
			      &sk->sk_tsq_flags)) {
		sock_hold(sk);
286

287 288 289 290 291
		/* need to try again, its possible release_cb() has already
		 * been called after the test_and_set_bit() above.
		 */
		move_skbs_to_msk(msk, ssk);
	}
292
wake:
293 294 295
	sk->sk_data_ready(sk);
}

296 297 298 299 300 301 302 303 304 305
static void __mptcp_flush_join_list(struct mptcp_sock *msk)
{
	if (likely(list_empty(&msk->join_list)))
		return;

	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
}

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
{
	long tout = ssk && inet_csk(ssk)->icsk_pending ?
				      inet_csk(ssk)->icsk_timeout - jiffies : 0;

	if (tout <= 0)
		tout = mptcp_sk(sk)->timer_ival;
	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
}

static bool mptcp_timer_pending(struct sock *sk)
{
	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
}

static void mptcp_reset_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	unsigned long tout;

	/* should never be called with mptcp level timer cleared */
	tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
	if (WARN_ON_ONCE(!tout))
		tout = TCP_RTO_MIN;
	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
}

void mptcp_data_acked(struct sock *sk)
{
	mptcp_reset_timer(sk);
336 337 338 339

	if (!sk_stream_is_writeable(sk) &&
	    schedule_work(&mptcp_sk(sk)->work))
		sock_hold(sk);
340 341
}

342 343 344 345 346 347 348 349 350
void mptcp_subflow_eof(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
	    schedule_work(&msk->work))
		sock_hold(sk);
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
static void mptcp_check_for_eof(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	int receivers = 0;

	mptcp_for_each_subflow(msk, subflow)
		receivers += !subflow->rx_eof;

	if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
		/* hopefully temporary hack: propagate shutdown status
		 * to msk, when all subflows agree on it
		 */
		sk->sk_shutdown |= RCV_SHUTDOWN;

		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
		sk->sk_data_ready(sk);
	}
}

372 373 374 375 376 377 378 379
static void mptcp_stop_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	mptcp_sk(sk)->timer_ival = 0;
}

380 381
static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
{
382 383
	const struct sock *sk = (const struct sock *)msk;

384
	if (!msk->cached_ext)
385
		msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
386 387 388 389

	return !!msk->cached_ext;
}

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;

	sock_owned_by_me(sk);

	mptcp_for_each_subflow(msk, subflow) {
		if (subflow->data_avail)
			return mptcp_subflow_tcp_sock(subflow);
	}

	return NULL;
}

405 406 407
static bool mptcp_skb_can_collapse_to(u64 write_seq,
				      const struct sk_buff *skb,
				      const struct mptcp_ext *mpext)
408 409 410 411 412
{
	if (!tcp_skb_can_collapse_to(skb))
		return false;

	/* can collapse only if MPTCP level sequence is in order */
413
	return mpext && mpext->data_seq + mpext->data_len == write_seq;
414 415
}

416 417 418 419 420 421 422 423
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
				       const struct page_frag *pfrag,
				       const struct mptcp_data_frag *df)
{
	return df && pfrag->page == df->page &&
		df->data_seq + df->data_len == msk->write_seq;
}

424 425 426
static void dfrag_uncharge(struct sock *sk, int len)
{
	sk_mem_uncharge(sk, len);
427
	sk_wmem_queued_add(sk, -len);
428 429 430
}

static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
431
{
432 433
	int len = dfrag->data_len + dfrag->overhead;

434
	list_del(&dfrag->list);
435
	dfrag_uncharge(sk, len);
436 437 438 439 440 441 442
	put_page(dfrag->page);
}

static void mptcp_clean_una(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;
443
	bool cleaned = false;
444 445 446 447 448 449 450 451
	u64 snd_una;

	/* on fallback we just need to ignore snd_una, as this is really
	 * plain TCP
	 */
	if (__mptcp_check_fallback(msk))
		atomic64_set(&msk->snd_una, msk->write_seq);
	snd_una = atomic64_read(&msk->snd_una);
452 453 454 455 456

	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
			break;

457 458 459 460
		dfrag_clear(sk, dfrag);
		cleaned = true;
	}

461 462
	dfrag = mptcp_rtx_head(sk);
	if (dfrag && after64(snd_una, dfrag->data_seq)) {
463 464 465 466
		u64 delta = snd_una - dfrag->data_seq;

		if (WARN_ON_ONCE(delta > dfrag->data_len))
			goto out;
467 468

		dfrag->data_seq += delta;
469
		dfrag->offset += delta;
470 471 472 473 474 475
		dfrag->data_len -= delta;

		dfrag_uncharge(sk, delta);
		cleaned = true;
	}

476
out:
477 478
	if (cleaned) {
		sk_mem_reclaim_partial(sk);
479 480 481 482

		/* Only wake up writers if a subflow is ready */
		if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
			sk_stream_write_space(sk);
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	}
}

/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
 * data
 */
static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{
	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
					pfrag, sk->sk_allocation)))
		return true;

	sk->sk_prot->enter_memory_pressure(sk);
	sk_stream_moderate_sndbuf(sk);
	return false;
}

static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
		      int orig_offset)
{
	int offset = ALIGN(orig_offset, sizeof(long));
	struct mptcp_data_frag *dfrag;

	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
	dfrag->data_len = 0;
	dfrag->data_seq = msk->write_seq;
	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
	dfrag->page = pfrag->page;

	return dfrag;
}

517
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
518 519
			      struct msghdr *msg, struct mptcp_data_frag *dfrag,
			      long *timeo, int *pmss_now,
520
			      int *ps_goal)
521
{
522 523
	int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
	bool dfrag_collapsed, can_collapse = false;
524 525
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_ext *mpext = NULL;
526
	bool retransmission = !!dfrag;
527
	struct sk_buff *skb, *tail;
528
	struct page_frag *pfrag;
529 530
	struct page *page;
	u64 *write_seq;
531 532 533 534
	size_t psize;

	/* use the mptcp page cache so that we can easily move the data
	 * from one substream to another, but do per subflow memory accounting
535 536
	 * Note: pfrag is used only !retransmission, but the compiler if
	 * fooled into a warning if we don't init here
537 538
	 */
	pfrag = sk_page_frag(sk);
539 540 541 542 543 544 545
	if (!retransmission) {
		write_seq = &msk->write_seq;
		page = pfrag->page;
	} else {
		write_seq = &dfrag->data_seq;
		page = dfrag->page;
	}
546 547 548

	/* compute copy limit */
	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
549 550 551 552 553 554 555 556 557 558 559 560 561 562
	*pmss_now = mss_now;
	*ps_goal = size_goal;
	avail_size = size_goal;
	skb = tcp_write_queue_tail(ssk);
	if (skb) {
		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);

		/* Limit the write to the size available in the
		 * current skb, if any, so that we create at most a new skb.
		 * Explicitly tells TCP internals to avoid collapsing on later
		 * queue management operation, to avoid breaking the ext <->
		 * SSN association set here
		 */
		can_collapse = (size_goal - skb->len > 0) &&
563
			      mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
564 565 566 567 568
		if (!can_collapse)
			TCP_SKB_CB(skb)->eor = 1;
		else
			avail_size = size_goal - skb->len;
	}
569

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
	if (!retransmission) {
		/* reuse tail pfrag, if possible, or carve a new one from the
		 * page allocator
		 */
		dfrag = mptcp_rtx_tail(sk);
		offset = pfrag->offset;
		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
		if (!dfrag_collapsed) {
			dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
			offset = dfrag->offset;
			frag_truesize = dfrag->overhead;
		}
		psize = min_t(size_t, pfrag->size - offset, avail_size);

		/* Copy to page */
		pr_debug("left=%zu", msg_data_left(msg));
		psize = copy_page_from_iter(pfrag->page, offset,
					    min_t(size_t, msg_data_left(msg),
						  psize),
					    &msg->msg_iter);
		pr_debug("left=%zu", msg_data_left(msg));
		if (!psize)
			return -EINVAL;

		if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
			return -ENOMEM;
	} else {
597
		offset = dfrag->offset;
598
		psize = min_t(size_t, dfrag->data_len, avail_size);
599
	}
600

601 602
	/* tell the TCP stack to delay the push so that we can safely
	 * access the skb after the sendpages call
603
	 */
604
	ret = do_tcp_sendpages(ssk, page, offset, psize,
605
			       msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
606 607
	if (ret <= 0)
		return ret;
608 609

	frag_truesize += ret;
610 611 612
	if (!retransmission) {
		if (unlikely(ret < psize))
			iov_iter_revert(&msg->msg_iter, psize - ret);
613

614 615 616 617 618 619 620 621 622 623 624
		/* send successful, keep track of sent data for mptcp-level
		 * retransmission
		 */
		dfrag->data_len += ret;
		if (!dfrag_collapsed) {
			get_page(dfrag->page);
			list_add_tail(&dfrag->list, &msk->rtx_queue);
			sk_wmem_queued_add(sk, frag_truesize);
		} else {
			sk_wmem_queued_add(sk, ret);
		}
625

626 627 628 629 630
		/* charge data on mptcp rtx queue to the master socket
		 * Note: we charge such data both to sk and ssk
		 */
		sk->sk_forward_alloc -= frag_truesize;
	}
631

632 633 634 635 636 637 638 639 640 641 642 643
	/* if the tail skb extension is still the cached one, collapsing
	 * really happened. Note: we can't check for 'same skb' as the sk_buff
	 * hdr on tail can be transmitted, freed and re-allocated by the
	 * do_tcp_sendpages() call
	 */
	tail = tcp_write_queue_tail(ssk);
	if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
		WARN_ON_ONCE(!can_collapse);
		mpext->data_len += ret;
		goto out;
	}

644 645 646 647 648
	skb = tcp_write_queue_tail(ssk);
	mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
	msk->cached_ext = NULL;

	memset(mpext, 0, sizeof(*mpext));
649
	mpext->data_seq = *write_seq;
650 651 652 653 654 655 656 657 658
	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
	mpext->data_len = ret;
	mpext->use_map = 1;
	mpext->dsn64 = 1;

	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
		 mpext->dsn64);

659
out:
660 661 662
	if (!retransmission)
		pfrag->offset += frag_truesize;
	*write_seq += ret;
663 664 665 666 667
	mptcp_subflow_ctx(ssk)->rel_write_seq += ret;

	return ret;
}

668 669 670 671 672 673 674 675 676
static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
{
	clear_bit(MPTCP_SEND_SPACE, &msk->flags);
	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */

	/* enables sk->write_space() callbacks */
	set_bit(SOCK_NOSPACE, &sock->flags);
}

677 678 679 680 681 682 683
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *backup = NULL;

	sock_owned_by_me((const struct sock *)msk);

684 685 686
	if (!mptcp_ext_cache_refill(msk))
		return NULL;

687 688 689 690 691 692
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		if (!sk_stream_memory_free(ssk)) {
			struct socket *sock = ssk->sk_socket;

693 694
			if (sock)
				mptcp_nospace(msk, sock);
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711

			return NULL;
		}

		if (subflow->backup) {
			if (!backup)
				backup = ssk;

			continue;
		}

		return ssk;
	}

	return backup;
}

712 713 714 715 716 717 718 719
static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
{
	struct socket *sock;

	if (likely(sk_stream_is_writeable(ssk)))
		return;

	sock = READ_ONCE(ssk->sk_socket);
720 721
	if (sock)
		mptcp_nospace(msk, sock);
722 723
}

M
Mat Martineau 已提交
724 725
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
726
	int mss_now = 0, size_goal = 0, ret = 0;
M
Mat Martineau 已提交
727
	struct mptcp_sock *msk = mptcp_sk(sk);
728
	struct page_frag *pfrag;
729
	size_t copied = 0;
730
	struct sock *ssk;
731
	bool tx_ok;
732
	long timeo;
M
Mat Martineau 已提交
733 734 735 736

	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
		return -EOPNOTSUPP;

737
	lock_sock(sk);
738 739 740 741 742 743 744 745 746

	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
		ret = sk_stream_wait_connect(sk, &timeo);
		if (ret)
			goto out;
	}

747
	pfrag = sk_page_frag(sk);
748
restart:
749 750
	mptcp_clean_una(sk);

751
wait_for_sndbuf:
752
	__mptcp_flush_join_list(msk);
753
	ssk = mptcp_subflow_get_send(msk);
754 755 756
	while (!sk_stream_memory_free(sk) ||
	       !ssk ||
	       !mptcp_page_frag_refill(ssk, pfrag)) {
757 758 759 760 761 762 763 764 765 766 767 768 769
		if (ssk) {
			/* make sure retransmit timer is
			 * running before we wait for memory.
			 *
			 * The retransmit timer might be needed
			 * to make the peer send an up-to-date
			 * MPTCP Ack.
			 */
			mptcp_set_timeout(sk, ssk);
			if (!mptcp_timer_pending(sk))
				mptcp_reset_timer(sk);
		}

770 771 772 773
		ret = sk_stream_wait_memory(sk, &timeo);
		if (ret)
			goto out;

774 775
		mptcp_clean_una(sk);

776 777 778 779 780
		ssk = mptcp_subflow_get_send(msk);
		if (list_empty(&msk->conn_list)) {
			ret = -ENOTCONN;
			goto out;
		}
781 782
	}

783
	pr_debug("conn_list->subflow=%p", ssk);
784

785
	lock_sock(ssk);
786 787
	tx_ok = msg_data_left(msg);
	while (tx_ok) {
788
		ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
789
					 &size_goal);
790 791 792 793 794 795
		if (ret < 0) {
			if (ret == -EAGAIN && timeo > 0) {
				mptcp_set_timeout(sk, ssk);
				release_sock(ssk);
				goto restart;
			}
796
			break;
797
		}
798 799

		copied += ret;
800

801 802 803 804
		tx_ok = msg_data_left(msg);
		if (!tx_ok)
			break;

805
		if (!sk_stream_memory_free(ssk) ||
806
		    !mptcp_page_frag_refill(ssk, pfrag) ||
807
		    !mptcp_ext_cache_refill(msk)) {
808 809 810 811 812 813 814 815
			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
			tcp_push(ssk, msg->msg_flags, mss_now,
				 tcp_sk(ssk)->nonagle, size_goal);
			mptcp_set_timeout(sk, ssk);
			release_sock(ssk);
			goto restart;
		}

816 817 818 819 820
		/* memory is charged to mptcp level socket as well, i.e.
		 * if msg is very large, mptcp socket may run out of buffer
		 * space.  mptcp_clean_una() will release data that has
		 * been acked at mptcp level in the mean time, so there is
		 * a good chance we can continue sending data right away.
821 822 823 824 825 826 827
		 *
		 * Normally, when the tcp subflow can accept more data, then
		 * so can the MPTCP socket.  However, we need to cope with
		 * peers that might lag behind in their MPTCP-level
		 * acknowledgements, i.e.  data might have been acked at
		 * tcp level only.  So, we must also check the MPTCP socket
		 * limits before we send more data.
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
		 */
		if (unlikely(!sk_stream_memory_free(sk))) {
			tcp_push(ssk, msg->msg_flags, mss_now,
				 tcp_sk(ssk)->nonagle, size_goal);
			mptcp_clean_una(sk);
			if (!sk_stream_memory_free(sk)) {
				/* can't send more for now, need to wait for
				 * MPTCP-level ACKs from peer.
				 *
				 * Wakeup will happen via mptcp_clean_una().
				 */
				mptcp_set_timeout(sk, ssk);
				release_sock(ssk);
				goto wait_for_sndbuf;
			}
		}
844 845
	}

846
	mptcp_set_timeout(sk, ssk);
847
	if (copied) {
848
		ret = copied;
849 850
		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
			 size_goal);
851 852 853 854

		/* start the timer, if it's not pending */
		if (!mptcp_timer_pending(sk))
			mptcp_reset_timer(sk);
855
	}
856

857
	ssk_check_wmem(msk, ssk);
858
	release_sock(ssk);
859
out:
860 861
	release_sock(sk);
	return ret;
M
Mat Martineau 已提交
862 863
}

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
static void mptcp_wait_data(struct sock *sk, long *timeo)
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct mptcp_sock *msk = mptcp_sk(sk);

	add_wait_queue(sk_sleep(sk), &wait);
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);

	sk_wait_event(sk, timeo,
		      test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);

	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	remove_wait_queue(sk_sleep(sk), &wait);
}

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
				struct msghdr *msg,
				size_t len)
{
	struct sock *sk = (struct sock *)msk;
	struct sk_buff *skb;
	int copied = 0;

	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
		u32 offset = MPTCP_SKB_CB(skb)->offset;
		u32 data_len = skb->len - offset;
		u32 count = min_t(size_t, len - copied, data_len);
		int err;

		err = skb_copy_datagram_msg(skb, offset, msg, count);
		if (unlikely(err < 0)) {
			if (!copied)
				return err;
			break;
		}

		copied += count;

		if (count < data_len) {
			MPTCP_SKB_CB(skb)->offset += count;
			break;
		}

		__skb_unlink(skb, &sk->sk_receive_queue);
		__kfree_skb(skb);

		if (copied >= len)
			break;
	}

	return copied;
}

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
 *
 * Only difference: Use highest rtt estimate of the subflows in use.
 */
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	u32 time, advmss = 1;
	u64 rtt_us, mstamp;

	sock_owned_by_me(sk);

	if (copied <= 0)
		return;

	msk->rcvq_space.copied += copied;

	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);

	rtt_us = msk->rcvq_space.rtt_us;
	if (rtt_us && time < (rtt_us >> 3))
		return;

	rtt_us = 0;
	mptcp_for_each_subflow(msk, subflow) {
		const struct tcp_sock *tp;
		u64 sf_rtt_us;
		u32 sf_advmss;

		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));

		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
		sf_advmss = READ_ONCE(tp->advmss);

		rtt_us = max(sf_rtt_us, rtt_us);
		advmss = max(sf_advmss, advmss);
	}

	msk->rcvq_space.rtt_us = rtt_us;
	if (time < (rtt_us >> 3) || rtt_us == 0)
		return;

	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
		goto new_measure;

	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
		int rcvmem, rcvbuf;
		u64 rcvwin, grow;

		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;

		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);

		do_div(grow, msk->rcvq_space.space);
		rcvwin += (grow << 1);

		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
		while (tcp_win_from_space(sk, rcvmem) < advmss)
			rcvmem += 128;

		do_div(rcvwin, advmss);
		rcvbuf = min_t(u64, rcvwin * rcvmem,
			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);

		if (rcvbuf > sk->sk_rcvbuf) {
			u32 window_clamp;

			window_clamp = tcp_win_from_space(sk, rcvbuf);
			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);

			/* Make subflows follow along.  If we do not do this, we
			 * get drops at subflow level if skbs can't be moved to
			 * the mptcp rx queue fast enough (announced rcv_win can
			 * exceed ssk->sk_rcvbuf).
			 */
			mptcp_for_each_subflow(msk, subflow) {
				struct sock *ssk;

				ssk = mptcp_subflow_tcp_sock(subflow);
				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
				tcp_sk(ssk)->window_clamp = window_clamp;
			}
		}
	}

	msk->rcvq_space.space = msk->rcvq_space.copied;
new_measure:
	msk->rcvq_space.copied = 0;
	msk->rcvq_space.time = mstamp;
}

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
static bool __mptcp_move_skbs(struct mptcp_sock *msk)
{
	unsigned int moved = 0;
	bool done;

	do {
		struct sock *ssk = mptcp_subflow_recv_lookup(msk);

		if (!ssk)
			break;

		lock_sock(ssk);
		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
		release_sock(ssk);
	} while (!done);

	return moved > 0;
}

M
Mat Martineau 已提交
1030 1031 1032 1033
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
			 int nonblock, int flags, int *addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1034
	int copied = 0;
1035 1036
	int target;
	long timeo;
M
Mat Martineau 已提交
1037 1038 1039 1040

	if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
		return -EOPNOTSUPP;

1041
	lock_sock(sk);
1042 1043 1044 1045
	timeo = sock_rcvtimeo(sk, nonblock);

	len = min_t(size_t, len, INT_MAX);
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1046
	__mptcp_flush_join_list(msk);
1047

1048
	while (len > (size_t)copied) {
1049 1050
		int bytes_read;

1051 1052 1053 1054 1055 1056
		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
		if (unlikely(bytes_read < 0)) {
			if (!copied)
				copied = bytes_read;
			goto out_err;
		}
1057

1058
		copied += bytes_read;
1059

1060 1061 1062
		if (skb_queue_empty(&sk->sk_receive_queue) &&
		    __mptcp_move_skbs(msk))
			continue;
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082

		/* only the master socket status is relevant here. The exit
		 * conditions mirror closely tcp_recvmsg()
		 */
		if (copied >= target)
			break;

		if (copied) {
			if (sk->sk_err ||
			    sk->sk_state == TCP_CLOSE ||
			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
			    !timeo ||
			    signal_pending(current))
				break;
		} else {
			if (sk->sk_err) {
				copied = sock_error(sk);
				break;
			}

1083 1084 1085
			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
				mptcp_check_for_eof(msk);

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
			if (sk->sk_shutdown & RCV_SHUTDOWN)
				break;

			if (sk->sk_state == TCP_CLOSE) {
				copied = -ENOTCONN;
				break;
			}

			if (!timeo) {
				copied = -EAGAIN;
				break;
			}

			if (signal_pending(current)) {
				copied = sock_intr_errno(timeo);
				break;
			}
		}

		pr_debug("block timeout %ld", timeo);
		mptcp_wait_data(sk, &timeo);
1107 1108
	}

1109 1110
	if (skb_queue_empty(&sk->sk_receive_queue)) {
		/* entire backlog drained, clear DATA_READY. */
1111
		clear_bit(MPTCP_DATA_READY, &msk->flags);
1112

1113 1114
		/* .. race-breaker: ssk might have gotten new data
		 * after last __mptcp_move_skbs() returned false.
1115
		 */
1116
		if (unlikely(__mptcp_move_skbs(msk)))
1117
			set_bit(MPTCP_DATA_READY, &msk->flags);
1118 1119 1120
	} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
		/* data to read but mptcp_wait_data() cleared DATA_READY */
		set_bit(MPTCP_DATA_READY, &msk->flags);
1121
	}
1122
out_err:
1123 1124
	mptcp_rcv_space_adjust(msk, copied);

1125
	release_sock(sk);
1126 1127 1128
	return copied;
}

1129 1130 1131 1132
static void mptcp_retransmit_handler(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

1133
	if (atomic64_read(&msk->snd_una) == msk->write_seq) {
1134
		mptcp_stop_timer(sk);
1135 1136 1137 1138 1139
	} else {
		set_bit(MPTCP_WORK_RTX, &msk->flags);
		if (schedule_work(&msk->work))
			sock_hold(sk);
	}
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
}

static void mptcp_retransmit_timer(struct timer_list *t)
{
	struct inet_connection_sock *icsk = from_timer(icsk, t,
						       icsk_retransmit_timer);
	struct sock *sk = &icsk->icsk_inet.sk;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		mptcp_retransmit_handler(sk);
	} else {
		/* delegate our work to tcp_release_cb() */
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
				      &sk->sk_tsq_flags))
			sock_hold(sk);
	}
	bh_unlock_sock(sk);
	sock_put(sk);
}

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
/* Find an idle subflow.  Return NULL if there is unacked data at tcp
 * level.
 *
 * A backup subflow is returned only if that is the only kind available.
 */
static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *backup = NULL;

	sock_owned_by_me((const struct sock *)msk);

	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		/* still data outstanding at TCP level?  Don't retransmit. */
		if (!tcp_write_queue_empty(ssk))
			return NULL;

		if (subflow->backup) {
			if (!backup)
				backup = ssk;
			continue;
		}

		return ssk;
	}

	return backup;
}

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
/* subflow sockets can be either outgoing (connect) or incoming
 * (accept).
 *
 * Outgoing subflows use in-kernel sockets.
 * Incoming subflows do not have their own 'struct socket' allocated,
 * so we need to use tcp_close() after detaching them from the mptcp
 * parent socket.
 */
static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
			      struct mptcp_subflow_context *subflow,
			      long timeout)
{
	struct socket *sock = READ_ONCE(ssk->sk_socket);

	list_del(&subflow->node);

	if (sock && sock != sk->sk_socket) {
		/* outgoing subflow */
		sock_release(sock);
	} else {
		/* incoming subflow */
		tcp_close(ssk, timeout);
	}
M
Mat Martineau 已提交
1215 1216
}

P
Paolo Abeni 已提交
1217 1218 1219 1220 1221
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
{
	return 0;
}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
static void pm_work(struct mptcp_sock *msk)
{
	struct mptcp_pm_data *pm = &msk->pm;

	spin_lock_bh(&msk->pm.lock);

	pr_debug("msk=%p status=%x", msk, pm->status);
	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
		mptcp_pm_nl_add_addr_received(msk);
	}
	if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
		pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
		mptcp_pm_nl_fully_established(msk);
	}
	if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
		pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
		mptcp_pm_nl_subflow_established(msk);
	}

	spin_unlock_bh(&msk->pm.lock);
}

P
Paolo Abeni 已提交
1245 1246 1247
static void mptcp_worker(struct work_struct *work)
{
	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1248
	struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1249
	int orig_len, orig_offset, mss_now = 0, size_goal = 0;
1250 1251 1252 1253 1254
	struct mptcp_data_frag *dfrag;
	u64 orig_write_seq;
	size_t copied = 0;
	struct msghdr msg;
	long timeo = 0;
P
Paolo Abeni 已提交
1255 1256

	lock_sock(sk);
1257
	mptcp_clean_una(sk);
1258
	__mptcp_flush_join_list(msk);
1259
	__mptcp_move_skbs(msk);
1260

1261 1262 1263
	if (msk->pm.status)
		pm_work(msk);

1264 1265 1266
	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
		mptcp_check_for_eof(msk);

1267 1268 1269 1270 1271 1272 1273
	if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
		goto unlock;

	dfrag = mptcp_rtx_head(sk);
	if (!dfrag)
		goto unlock;

1274 1275 1276
	if (!mptcp_ext_cache_refill(msk))
		goto reset_unlock;

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	ssk = mptcp_subflow_get_retrans(msk);
	if (!ssk)
		goto reset_unlock;

	lock_sock(ssk);

	msg.msg_flags = MSG_DONTWAIT;
	orig_len = dfrag->data_len;
	orig_offset = dfrag->offset;
	orig_write_seq = dfrag->data_seq;
	while (dfrag->data_len > 0) {
1288 1289
		int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
					     &mss_now, &size_goal);
1290 1291 1292
		if (ret < 0)
			break;

1293
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
1294 1295 1296
		copied += ret;
		dfrag->data_len -= ret;
		dfrag->offset += ret;
1297 1298 1299

		if (!mptcp_ext_cache_refill(msk))
			break;
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
	}
	if (copied)
		tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
			 size_goal);

	dfrag->data_seq = orig_write_seq;
	dfrag->offset = orig_offset;
	dfrag->data_len = orig_len;

	mptcp_set_timeout(sk, ssk);
	release_sock(ssk);

reset_unlock:
	if (!mptcp_timer_pending(sk))
		mptcp_reset_timer(sk);

unlock:
P
Paolo Abeni 已提交
1317 1318 1319 1320
	release_sock(sk);
	sock_put(sk);
}

1321
static int __mptcp_init_sock(struct sock *sk)
M
Mat Martineau 已提交
1322
{
1323 1324
	struct mptcp_sock *msk = mptcp_sk(sk);

1325 1326
	spin_lock_init(&msk->join_list_lock);

1327
	INIT_LIST_HEAD(&msk->conn_list);
1328
	INIT_LIST_HEAD(&msk->join_list);
1329
	INIT_LIST_HEAD(&msk->rtx_queue);
1330
	__set_bit(MPTCP_SEND_SPACE, &msk->flags);
P
Paolo Abeni 已提交
1331
	INIT_WORK(&msk->work, mptcp_worker);
1332

1333
	msk->first = NULL;
P
Paolo Abeni 已提交
1334
	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
1335

1336 1337
	mptcp_pm_data_init(msk);

1338 1339 1340
	/* re-use the csk retrans timer for MPTCP-level retrans */
	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);

M
Mat Martineau 已提交
1341 1342 1343
	return 0;
}

1344 1345
static int mptcp_init_sock(struct sock *sk)
{
1346 1347
	struct net *net = sock_net(sk);
	int ret;
1348

1349 1350 1351 1352 1353 1354 1355
	if (!mptcp_is_enabled(net))
		return -ENOPROTOOPT;

	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
		return -ENOMEM;

	ret = __mptcp_init_sock(sk);
1356 1357 1358
	if (ret)
		return ret;

1359 1360 1361 1362
	ret = __mptcp_socket_create(mptcp_sk(sk));
	if (ret)
		return ret;

1363
	sk_sockets_allocated_inc(sk);
1364
	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
1365
	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
1366

1367 1368 1369 1370 1371 1372 1373 1374
	return 0;
}

static void __mptcp_clear_xmit(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;

1375 1376
	sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);

1377
	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
1378
		dfrag_clear(sk, dfrag);
1379 1380
}

P
Paolo Abeni 已提交
1381 1382 1383 1384 1385 1386 1387 1388
static void mptcp_cancel_work(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (cancel_work_sync(&msk->work))
		sock_put(sk);
}

1389 1390
static void mptcp_subflow_shutdown(struct sock *ssk, int how,
				   bool data_fin_tx_enable, u64 data_fin_tx_seq)
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
{
	lock_sock(ssk);

	switch (ssk->sk_state) {
	case TCP_LISTEN:
		if (!(how & RCV_SHUTDOWN))
			break;
		/* fall through */
	case TCP_SYN_SENT:
		tcp_disconnect(ssk, O_NONBLOCK);
		break;
	default:
1403 1404 1405 1406 1407 1408 1409 1410
		if (data_fin_tx_enable) {
			struct mptcp_subflow_context *subflow;

			subflow = mptcp_subflow_ctx(ssk);
			subflow->data_fin_tx_seq = data_fin_tx_seq;
			subflow->data_fin_tx_enable = 1;
		}

1411 1412 1413 1414 1415 1416 1417 1418
		ssk->sk_shutdown |= how;
		tcp_shutdown(ssk, how);
		break;
	}

	release_sock(ssk);
}

1419
/* Called with msk lock held, releases such lock before returning */
1420
static void mptcp_close(struct sock *sk, long timeout)
M
Mat Martineau 已提交
1421
{
1422
	struct mptcp_subflow_context *subflow, *tmp;
M
Mat Martineau 已提交
1423
	struct mptcp_sock *msk = mptcp_sk(sk);
1424
	LIST_HEAD(conn_list);
1425
	u64 data_fin_tx_seq;
M
Mat Martineau 已提交
1426

1427 1428
	lock_sock(sk);

M
Mat Martineau 已提交
1429 1430
	inet_sk_state_store(sk, TCP_CLOSE);

1431 1432 1433 1434 1435 1436
	/* be sure to always acquire the join list lock, to sync vs
	 * mptcp_finish_join().
	 */
	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
1437 1438
	list_splice_init(&msk->conn_list, &conn_list);

1439 1440
	data_fin_tx_seq = msk->write_seq;

1441 1442
	__mptcp_clear_xmit(sk);

1443 1444 1445
	release_sock(sk);

	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1446 1447
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

1448 1449
		subflow->data_fin_tx_seq = data_fin_tx_seq;
		subflow->data_fin_tx_enable = 1;
1450
		__mptcp_close_ssk(sk, ssk, subflow, timeout);
M
Mat Martineau 已提交
1451 1452
	}

P
Paolo Abeni 已提交
1453 1454
	mptcp_cancel_work(sk);

1455 1456
	__skb_queue_purge(&sk->sk_receive_queue);

1457
	sk_common_release(sk);
M
Mat Martineau 已提交
1458 1459
}

1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
	struct ipv6_pinfo *msk6 = inet6_sk(msk);

	msk->sk_v6_daddr = ssk->sk_v6_daddr;
	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;

	if (msk6 && ssk6) {
		msk6->saddr = ssk6->saddr;
		msk6->flow_label = ssk6->flow_label;
	}
#endif

	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}

1483 1484
static int mptcp_disconnect(struct sock *sk, int flags)
{
1485 1486 1487 1488 1489 1490
	/* Should never be called.
	 * inet_stream_connect() calls ->disconnect, but that
	 * refers to the subflow socket, not the mptcp one.
	 */
	WARN_ON_ONCE(1);
	return 0;
1491 1492
}

1493 1494 1495 1496 1497 1498 1499 1500 1501
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{
	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);

	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
#endif

1502
struct sock *mptcp_sk_clone(const struct sock *sk,
1503
			    const struct mptcp_options_received *mp_opt,
1504
			    struct request_sock *req)
1505
{
P
Paolo Abeni 已提交
1506
	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1507
	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
P
Paolo Abeni 已提交
1508 1509
	struct mptcp_sock *msk;
	u64 ack_seq;
1510 1511 1512 1513 1514 1515 1516 1517 1518

	if (!nsk)
		return NULL;

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	if (nsk->sk_family == AF_INET6)
		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif

P
Paolo Abeni 已提交
1519 1520 1521 1522 1523 1524
	__mptcp_init_sock(nsk);

	msk = mptcp_sk(nsk);
	msk->local_key = subflow_req->local_key;
	msk->token = subflow_req->token;
	msk->subflow = NULL;
1525
	WRITE_ONCE(msk->fully_established, false);
P
Paolo Abeni 已提交
1526 1527

	msk->write_seq = subflow_req->idsn + 1;
1528
	atomic64_set(&msk->snd_una, msk->write_seq);
1529
	if (mp_opt->mp_capable) {
P
Paolo Abeni 已提交
1530
		msk->can_ack = true;
1531
		msk->remote_key = mp_opt->sndr_key;
P
Paolo Abeni 已提交
1532 1533 1534 1535
		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
		ack_seq++;
		msk->ack_seq = ack_seq;
	}
1536

1537
	sock_reset_flag(nsk, SOCK_RCU_FREE);
1538 1539
	/* will be fully established after successful MPC subflow creation */
	inet_sk_state_store(nsk, TCP_SYN_RECV);
P
Paolo Abeni 已提交
1540 1541 1542 1543
	bh_unlock_sock(nsk);

	/* keep a single reference */
	__sock_put(nsk);
1544 1545 1546
	return nsk;
}

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
{
	const struct tcp_sock *tp = tcp_sk(ssk);

	msk->rcvq_space.copied = 0;
	msk->rcvq_space.rtt_us = 0;

	msk->rcvq_space.time = tp->tcp_mstamp;

	/* initial rcv_space offering made to peer */
	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
				      TCP_INIT_CWND * tp->advmss);
	if (msk->rcvq_space.space == 0)
		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
				 bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct socket *listener;
	struct sock *newsk;

	listener = __mptcp_nmpc_socket(msk);
	if (WARN_ON_ONCE(!listener)) {
		*err = -EINVAL;
		return NULL;
	}

	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
	newsk = inet_csk_accept(listener->sk, flags, err, kern);
	if (!newsk)
		return NULL;

	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
	if (sk_is_mptcp(newsk)) {
		struct mptcp_subflow_context *subflow;
		struct sock *new_mptcp_sock;
		struct sock *ssk = newsk;

		subflow = mptcp_subflow_ctx(newsk);
P
Paolo Abeni 已提交
1588
		new_mptcp_sock = subflow->conn;
1589

P
Paolo Abeni 已提交
1590 1591 1592 1593 1594 1595
		/* is_mptcp should be false if subflow->conn is missing, see
		 * subflow_syn_recv_sock()
		 */
		if (WARN_ON_ONCE(!new_mptcp_sock)) {
			tcp_sk(newsk)->is_mptcp = 0;
			return newsk;
1596 1597
		}

P
Paolo Abeni 已提交
1598 1599
		/* acquire the 2nd reference for the owning socket */
		sock_hold(new_mptcp_sock);
1600

P
Paolo Abeni 已提交
1601 1602
		local_bh_disable();
		bh_lock_sock(new_mptcp_sock);
1603
		msk = mptcp_sk(new_mptcp_sock);
1604
		msk->first = newsk;
1605 1606 1607 1608 1609

		newsk = new_mptcp_sock;
		mptcp_copy_inaddrs(newsk, ssk);
		list_add(&subflow->node, &msk->conn_list);

1610
		mptcp_rcv_space_init(msk, ssk);
1611
		bh_unlock_sock(new_mptcp_sock);
1612 1613

		__MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
1614
		local_bh_enable();
1615 1616 1617
	} else {
		MPTCP_INC_STATS(sock_net(sk),
				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
1618 1619 1620 1621 1622
	}

	return newsk;
}

1623 1624
static void mptcp_destroy(struct sock *sk)
{
1625 1626
	struct mptcp_sock *msk = mptcp_sk(sk);

P
Paolo Abeni 已提交
1627
	mptcp_token_destroy(msk);
1628 1629
	if (msk->cached_ext)
		__skb_ext_put(msk->cached_ext);
1630 1631

	sk_sockets_allocated_dec(sk);
1632 1633
}

1634
static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
1635
				       sockptr_t optval, unsigned int optlen)
1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
{
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int ret;

	switch (optname) {
	case SO_REUSEPORT:
	case SO_REUSEADDR:
		lock_sock(sk);
		ssock = __mptcp_nmpc_socket(msk);
		if (!ssock) {
			release_sock(sk);
			return -EINVAL;
		}

1651
		ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
		if (ret == 0) {
			if (optname == SO_REUSEPORT)
				sk->sk_reuseport = ssock->sk->sk_reuseport;
			else if (optname == SO_REUSEADDR)
				sk->sk_reuse = ssock->sk->sk_reuse;
		}
		release_sock(sk);
		return ret;
	}

1662
	return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
1663 1664
}

1665
static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
1666
			       sockptr_t optval, unsigned int optlen)
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
{
	struct sock *sk = (struct sock *)msk;
	int ret = -EOPNOTSUPP;
	struct socket *ssock;

	switch (optname) {
	case IPV6_V6ONLY:
		lock_sock(sk);
		ssock = __mptcp_nmpc_socket(msk);
		if (!ssock) {
			release_sock(sk);
			return -EINVAL;
		}

		ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
		if (ret == 0)
			sk->sk_ipv6only = ssock->sk->sk_ipv6only;

		release_sock(sk);
		break;
	}

	return ret;
}

1692
static int mptcp_setsockopt(struct sock *sk, int level, int optname,
1693
			    sockptr_t optval, unsigned int optlen)
1694 1695
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1696
	struct sock *ssk;
1697 1698 1699

	pr_debug("msk=%p", msk);

1700
	if (level == SOL_SOCKET)
1701
		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
1702

1703
	/* @@ the meaning of setsockopt() when the socket is connected and
1704 1705 1706 1707
	 * there are multiple subflows is not yet defined. It is up to the
	 * MPTCP-level socket to configure the subflows until the subflow
	 * is in TCP fallback, when TCP socket options are passed through
	 * to the one remaining subflow.
1708 1709
	 */
	lock_sock(sk);
1710
	ssk = __mptcp_tcp_fallback(msk);
1711
	release_sock(sk);
1712 1713
	if (ssk)
		return tcp_setsockopt(ssk, level, optname, optval, optlen);
1714

1715 1716 1717
	if (level == SOL_IPV6)
		return mptcp_setsockopt_v6(msk, optname, optval, optlen);

1718
	return -EOPNOTSUPP;
1719 1720 1721
}

static int mptcp_getsockopt(struct sock *sk, int level, int optname,
1722
			    char __user *optval, int __user *option)
1723 1724
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1725
	struct sock *ssk;
1726 1727 1728

	pr_debug("msk=%p", msk);

1729 1730 1731 1732 1733
	/* @@ the meaning of setsockopt() when the socket is connected and
	 * there are multiple subflows is not yet defined. It is up to the
	 * MPTCP-level socket to configure the subflows until the subflow
	 * is in TCP fallback, when socket options are passed through
	 * to the one remaining subflow.
1734 1735
	 */
	lock_sock(sk);
1736
	ssk = __mptcp_tcp_fallback(msk);
1737
	release_sock(sk);
1738 1739
	if (ssk)
		return tcp_getsockopt(ssk, level, optname, optval, option);
1740

1741
	return -EOPNOTSUPP;
1742 1743
}

1744 1745
#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
			    TCPF_WRITE_TIMER_DEFERRED)
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760

/* this is very alike tcp_release_cb() but we must handle differently a
 * different set of events
 */
static void mptcp_release_cb(struct sock *sk)
{
	unsigned long flags, nflags;

	do {
		flags = sk->sk_tsq_flags;
		if (!(flags & MPTCP_DEFERRED_ALL))
			return;
		nflags = flags & ~MPTCP_DEFERRED_ALL;
	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);

1761 1762
	sock_release_ownership(sk);

1763 1764 1765 1766 1767 1768 1769 1770
	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
		struct mptcp_sock *msk = mptcp_sk(sk);
		struct sock *ssk;

		ssk = mptcp_subflow_recv_lookup(msk);
		if (!ssk || !schedule_work(&msk->work))
			__sock_put(sk);
	}
1771 1772 1773 1774 1775

	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
		mptcp_retransmit_handler(sk);
		__sock_put(sk);
	}
1776 1777
}

P
Paolo Abeni 已提交
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
static int mptcp_hash(struct sock *sk)
{
	/* should never be called,
	 * we hash the TCP subflows not the master socket
	 */
	WARN_ON_ONCE(1);
	return 0;
}

static void mptcp_unhash(struct sock *sk)
{
	/* called from sk_common_release(), but nothing to do here */
}

1792
static int mptcp_get_port(struct sock *sk, unsigned short snum)
M
Mat Martineau 已提交
1793 1794
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1795
	struct socket *ssock;
M
Mat Martineau 已提交
1796

1797 1798 1799 1800
	ssock = __mptcp_nmpc_socket(msk);
	pr_debug("msk=%p, subflow=%p", msk, ssock);
	if (WARN_ON_ONCE(!ssock))
		return -EINVAL;
M
Mat Martineau 已提交
1801

1802 1803
	return inet_csk_get_port(ssock->sk, snum);
}
M
Mat Martineau 已提交
1804

1805 1806 1807 1808 1809
void mptcp_finish_connect(struct sock *ssk)
{
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk;
	struct sock *sk;
1810
	u64 ack_seq;
M
Mat Martineau 已提交
1811

1812 1813 1814 1815
	subflow = mptcp_subflow_ctx(ssk);
	sk = subflow->conn;
	msk = mptcp_sk(sk);

1816 1817
	pr_debug("msk=%p, token=%u", sk, subflow->token);

1818 1819
	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
	ack_seq++;
1820 1821
	subflow->map_seq = ack_seq;
	subflow->map_subflow_seq = 1;
1822

1823 1824 1825 1826 1827
	/* the socket is not connected yet, no msk/subflow ops can access/race
	 * accessing the field below
	 */
	WRITE_ONCE(msk->remote_key, subflow->remote_key);
	WRITE_ONCE(msk->local_key, subflow->local_key);
1828 1829
	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
	WRITE_ONCE(msk->ack_seq, ack_seq);
1830
	WRITE_ONCE(msk->can_ack, 1);
1831
	atomic64_set(&msk->snd_una, msk->write_seq);
1832 1833

	mptcp_pm_new_connection(msk, 0);
1834 1835

	mptcp_rcv_space_init(msk, ssk);
M
Mat Martineau 已提交
1836 1837
}

1838 1839 1840 1841 1842 1843 1844 1845 1846
static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
{
	write_lock_bh(&sk->sk_callback_lock);
	rcu_assign_pointer(sk->sk_wq, &parent->wq);
	sk_set_socket(sk, parent);
	sk->sk_uid = SOCK_INODE(parent)->i_uid;
	write_unlock_bh(&sk->sk_callback_lock);
}

1847 1848 1849 1850 1851 1852
bool mptcp_finish_join(struct sock *sk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
	struct sock *parent = (void *)msk;
	struct socket *parent_sock;
1853
	bool ret;
1854 1855 1856 1857

	pr_debug("msk=%p, subflow=%p", msk, subflow);

	/* mptcp socket already closing? */
1858
	if (!mptcp_is_fully_established(parent))
1859 1860 1861 1862 1863
		return false;

	if (!msk->pm.server_side)
		return true;

1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
	if (!mptcp_pm_allow_new_subflow(msk))
		return false;

	/* active connections are already on conn_list, and we can't acquire
	 * msk lock here.
	 * use the join list lock as synchronization point and double-check
	 * msk status to avoid racing with mptcp_close()
	 */
	spin_lock_bh(&msk->join_list_lock);
	ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
	if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
		list_add_tail(&subflow->node, &msk->join_list);
	spin_unlock_bh(&msk->join_list_lock);
	if (!ret)
		return false;

	/* attach to msk socket only after we are sure he will deal with us
	 * at close time
	 */
1883 1884 1885
	parent_sock = READ_ONCE(parent->sk_socket);
	if (parent_sock && !sk->sk_socket)
		mptcp_sock_graft(sk, parent_sock);
1886 1887
	subflow->map_seq = msk->ack_seq;
	return true;
1888 1889
}

1890 1891 1892 1893 1894 1895 1896
static bool mptcp_memory_free(const struct sock *sk, int wake)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
}

M
Mat Martineau 已提交
1897 1898 1899 1900
static struct proto mptcp_prot = {
	.name		= "MPTCP",
	.owner		= THIS_MODULE,
	.init		= mptcp_init_sock,
1901
	.disconnect	= mptcp_disconnect,
M
Mat Martineau 已提交
1902
	.close		= mptcp_close,
1903
	.accept		= mptcp_accept,
1904 1905
	.setsockopt	= mptcp_setsockopt,
	.getsockopt	= mptcp_getsockopt,
M
Mat Martineau 已提交
1906
	.shutdown	= tcp_shutdown,
1907
	.destroy	= mptcp_destroy,
M
Mat Martineau 已提交
1908 1909
	.sendmsg	= mptcp_sendmsg,
	.recvmsg	= mptcp_recvmsg,
1910
	.release_cb	= mptcp_release_cb,
P
Paolo Abeni 已提交
1911 1912
	.hash		= mptcp_hash,
	.unhash		= mptcp_unhash,
1913
	.get_port	= mptcp_get_port,
1914 1915 1916
	.sockets_allocated	= &mptcp_sockets_allocated,
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
1917
	.stream_memory_free	= mptcp_memory_free,
1918 1919
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_mem	= sysctl_tcp_mem,
M
Mat Martineau 已提交
1920
	.obj_size	= sizeof(struct mptcp_sock),
P
Paolo Abeni 已提交
1921
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
M
Mat Martineau 已提交
1922 1923 1924
	.no_autobind	= true,
};

1925 1926 1927 1928
static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
1929
	int err;
1930 1931

	lock_sock(sock->sk);
1932 1933 1934
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
1935 1936 1937 1938
		goto unlock;
	}

	err = ssock->ops->bind(ssock, uaddr, addr_len);
1939 1940
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
1941 1942 1943 1944 1945 1946

unlock:
	release_sock(sock->sk);
	return err;
}

1947 1948 1949 1950 1951 1952 1953
static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
					 struct mptcp_subflow_context *subflow)
{
	subflow->request_mptcp = 0;
	__mptcp_do_fallback(msk);
}

1954 1955 1956 1957
static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
				int addr_len, int flags)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
P
Paolo Abeni 已提交
1958
	struct mptcp_subflow_context *subflow;
1959 1960 1961 1962
	struct socket *ssock;
	int err;

	lock_sock(sock->sk);
P
Paolo Abeni 已提交
1963 1964 1965 1966 1967 1968 1969 1970
	if (sock->state != SS_UNCONNECTED && msk->subflow) {
		/* pending connection or invalid state, let existing subflow
		 * cope with that
		 */
		ssock = msk->subflow;
		goto do_connect;
	}

1971 1972 1973
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
1974 1975 1976
		goto unlock;
	}

1977 1978
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
P
Paolo Abeni 已提交
1979
	subflow = mptcp_subflow_ctx(ssock->sk);
1980 1981 1982 1983 1984
#ifdef CONFIG_TCP_MD5SIG
	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
	 * TCP option space.
	 */
	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
1985
		mptcp_subflow_early_fallback(msk, subflow);
1986
#endif
P
Paolo Abeni 已提交
1987
	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
1988
		mptcp_subflow_early_fallback(msk, subflow);
1989

P
Paolo Abeni 已提交
1990
do_connect:
1991
	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
P
Paolo Abeni 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000
	sock->state = ssock->state;

	/* on successful connect, the msk state will be moved to established by
	 * subflow_finish_connect()
	 */
	if (!err || err == EINPROGRESS)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
	else
		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2001 2002 2003 2004 2005 2006

unlock:
	release_sock(sock->sk);
	return err;
}

2007 2008 2009 2010 2011 2012 2013 2014 2015
static int mptcp_listen(struct socket *sock, int backlog)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
2016 2017 2018
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2019 2020 2021
		goto unlock;
	}

2022 2023
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_LISTEN);
2024 2025
	sock_set_flag(sock->sk, SOCK_RCU_FREE);

2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
	err = ssock->ops->listen(ssock, backlog);
	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);

unlock:
	release_sock(sock->sk);
	return err;
}

static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
			       int flags, bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
	if (sock->sk->sk_state != TCP_LISTEN)
		goto unlock_fail;

	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock)
		goto unlock_fail;

P
Paolo Abeni 已提交
2053
	clear_bit(MPTCP_DATA_READY, &msk->flags);
2054 2055 2056 2057
	sock_hold(ssock->sk);
	release_sock(sock->sk);

	err = ssock->ops->accept(sock, newsock, flags, kern);
2058
	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
2059 2060 2061 2062 2063 2064
		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
		struct mptcp_subflow_context *subflow;

		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
		 * This is needed so NOSPACE flag can be set from tcp stack.
		 */
2065
		__mptcp_flush_join_list(msk);
2066 2067 2068 2069 2070 2071 2072 2073
		list_for_each_entry(subflow, &msk->conn_list, node) {
			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

			if (!ssk->sk_socket)
				mptcp_sock_graft(ssk, newsock);
		}
	}

P
Paolo Abeni 已提交
2074 2075
	if (inet_csk_listen_poll(ssock->sk))
		set_bit(MPTCP_DATA_READY, &msk->flags);
2076 2077 2078 2079 2080 2081 2082 2083
	sock_put(ssock->sk);
	return err;

unlock_fail:
	release_sock(sock->sk);
	return -EINVAL;
}

P
Paolo Abeni 已提交
2084 2085 2086 2087 2088 2089
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
{
	return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
	       0;
}

2090 2091 2092
static __poll_t mptcp_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait)
{
2093
	struct sock *sk = sock->sk;
2094
	struct mptcp_sock *msk;
2095
	__poll_t mask = 0;
P
Paolo Abeni 已提交
2096
	int state;
2097

2098 2099 2100
	msk = mptcp_sk(sk);
	sock_poll_wait(file, sock, wait);

P
Paolo Abeni 已提交
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
	state = inet_sk_state_load(sk);
	if (state == TCP_LISTEN)
		return mptcp_check_readable(msk);

	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
		mask |= mptcp_check_readable(msk);
		if (sk_stream_is_writeable(sk) &&
		    test_bit(MPTCP_SEND_SPACE, &msk->flags))
			mask |= EPOLLOUT | EPOLLWRNORM;
	}
2111 2112 2113
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;

2114 2115 2116
	return mask;
}

2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
static int mptcp_shutdown(struct socket *sock, int how)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct mptcp_subflow_context *subflow;
	int ret = 0;

	pr_debug("sk=%p, how=%d", msk, how);

	lock_sock(sock->sk);
	if (how == SHUT_WR || how == SHUT_RDWR)
		inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);

	how++;

	if ((how & ~SHUTDOWN_MASK) || !how) {
		ret = -EINVAL;
		goto out_unlock;
	}

	if (sock->state == SS_CONNECTING) {
		if ((1 << sock->sk->sk_state) &
		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
			sock->state = SS_DISCONNECTING;
		else
			sock->state = SS_CONNECTED;
	}

2144
	__mptcp_flush_join_list(msk);
2145 2146 2147
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);

2148
		mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq);
2149 2150
	}

2151 2152 2153
	/* Wake up anyone sleeping in poll. */
	sock->sk->sk_state_change(sock->sk);

2154 2155 2156 2157 2158 2159
out_unlock:
	release_sock(sock->sk);

	return ret;
}

2160 2161 2162 2163 2164 2165 2166 2167
static const struct proto_ops mptcp_stream_ops = {
	.family		   = PF_INET,
	.owner		   = THIS_MODULE,
	.release	   = inet_release,
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
2168
	.getname	   = inet_getname,
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	.poll		   = mptcp_poll,
	.ioctl		   = inet_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
	.shutdown	   = mptcp_shutdown,
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet_sendmsg,
	.recvmsg	   = inet_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
};
2181

M
Mat Martineau 已提交
2182 2183 2184 2185
static struct inet_protosw mptcp_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_prot,
2186 2187
	.ops		= &mptcp_stream_ops,
	.flags		= INET_PROTOSW_ICSK,
M
Mat Martineau 已提交
2188 2189
};

2190
void __init mptcp_proto_init(void)
M
Mat Martineau 已提交
2191
{
2192 2193
	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;

2194 2195 2196
	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
		panic("Failed to allocate MPTCP pcpu counter\n");

2197
	mptcp_subflow_init();
2198
	mptcp_pm_init();
P
Paolo Abeni 已提交
2199
	mptcp_token_init();
2200

M
Mat Martineau 已提交
2201 2202 2203 2204
	if (proto_register(&mptcp_prot, 1) != 0)
		panic("Failed to register MPTCP proto.\n");

	inet_register_protosw(&mptcp_protosw);
2205 2206

	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
M
Mat Martineau 已提交
2207 2208 2209
}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2210 2211 2212 2213 2214 2215 2216 2217
static const struct proto_ops mptcp_v6_stream_ops = {
	.family		   = PF_INET6,
	.owner		   = THIS_MODULE,
	.release	   = inet6_release,
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
2218
	.getname	   = inet6_getname,
2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
	.poll		   = mptcp_poll,
	.ioctl		   = inet6_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
	.shutdown	   = mptcp_shutdown,
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet6_sendmsg,
	.recvmsg	   = inet6_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
#ifdef CONFIG_COMPAT
2231
	.compat_ioctl	   = inet6_compat_ioctl,
2232 2233 2234
#endif
};

M
Mat Martineau 已提交
2235 2236
static struct proto mptcp_v6_prot;

2237 2238 2239 2240 2241 2242
static void mptcp_v6_destroy(struct sock *sk)
{
	mptcp_destroy(sk);
	inet6_destroy_sock(sk);
}

M
Mat Martineau 已提交
2243 2244 2245 2246
static struct inet_protosw mptcp_v6_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_v6_prot,
2247
	.ops		= &mptcp_v6_stream_ops,
M
Mat Martineau 已提交
2248 2249 2250
	.flags		= INET_PROTOSW_ICSK,
};

2251
int __init mptcp_proto_v6_init(void)
M
Mat Martineau 已提交
2252 2253 2254 2255 2256 2257
{
	int err;

	mptcp_v6_prot = mptcp_prot;
	strcpy(mptcp_v6_prot.name, "MPTCPv6");
	mptcp_v6_prot.slab = NULL;
2258
	mptcp_v6_prot.destroy = mptcp_v6_destroy;
2259
	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
M
Mat Martineau 已提交
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271

	err = proto_register(&mptcp_v6_prot, 1);
	if (err)
		return err;

	err = inet6_register_protosw(&mptcp_v6_protosw);
	if (err)
		proto_unregister(&mptcp_v6_prot);

	return err;
}
#endif