protocol.c 59.3 KB
Newer Older
M
Mat Martineau 已提交
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
 *
 * Copyright (c) 2017 - 2019, Intel Corporation.
 */

#define pr_fmt(fmt) "MPTCP: " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
12 13
#include <linux/sched/signal.h>
#include <linux/atomic.h>
M
Mat Martineau 已提交
14 15 16 17 18
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
#include <net/tcp.h>
19
#include <net/tcp_states.h>
20 21 22
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
#endif
M
Mat Martineau 已提交
23 24
#include <net/mptcp.h>
#include "protocol.h"
25
#include "mib.h"
M
Mat Martineau 已提交
26

27 28
#define MPTCP_SAME_STATE TCP_MAX_STATES

29 30 31 32 33 34 35
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct mptcp6_sock {
	struct mptcp_sock msk;
	struct ipv6_pinfo np;
};
#endif

36 37 38 39 40 41
struct mptcp_skb_cb {
	u32 offset;
};

#define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))

42 43
static struct percpu_counter mptcp_sockets_allocated;

44 45 46 47 48 49
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
 * completed yet or has failed, return the subflow socket.
 * Otherwise return NULL.
 */
static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
{
50
	if (!msk->subflow || READ_ONCE(msk->can_ack))
51 52 53 54 55
		return NULL;

	return msk->subflow;
}

56
static bool mptcp_is_tcpsk(struct sock *sk)
F
Florian Westphal 已提交
57 58 59 60 61 62 63 64 65 66 67 68
{
	struct socket *sock = sk->sk_socket;

	if (unlikely(sk->sk_prot == &tcp_prot)) {
		/* we are being invoked after mptcp_accept() has
		 * accepted a non-mp-capable flow: sk is a tcp_sk,
		 * not an mptcp one.
		 *
		 * Hand the socket over to tcp so all further socket ops
		 * bypass mptcp.
		 */
		sock->ops = &inet_stream_ops;
69
		return true;
F
Florian Westphal 已提交
70 71 72
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
		sock->ops = &inet6_stream_ops;
73
		return true;
F
Florian Westphal 已提交
74 75 76
#endif
	}

77
	return false;
F
Florian Westphal 已提交
78 79
}

80
static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
81 82 83
{
	sock_owned_by_me((const struct sock *)msk);

84
	if (likely(!__mptcp_check_fallback(msk)))
85 86
		return NULL;

87
	return msk->first;
88 89
}

90
static int __mptcp_socket_create(struct mptcp_sock *msk)
91 92 93 94 95 96 97 98
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int err;

	err = mptcp_subflow_create_socket(sk, &ssock);
	if (err)
99
		return err;
100

101
	msk->first = ssock->sk;
102 103
	msk->subflow = ssock;
	subflow = mptcp_subflow_ctx(ssock->sk);
104
	list_add(&subflow->node, &msk->conn_list);
105 106
	subflow->request_mptcp = 1;

107 108 109 110 111
	/* accept() will wait on first subflow sk_wq, and we always wakes up
	 * via msk->sk_socket
	 */
	RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);

112
	return 0;
113 114
}

115 116 117 118 119
static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
			     struct sk_buff *skb,
			     unsigned int offset, size_t copy_len)
{
	struct sock *sk = (struct sock *)msk;
120
	struct sk_buff *tail;
121 122 123

	__skb_unlink(skb, &ssk->sk_receive_queue);

124 125
	skb_ext_reset(skb);
	skb_orphan(skb);
126
	WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142

	tail = skb_peek_tail(&sk->sk_receive_queue);
	if (offset == 0 && tail) {
		bool fragstolen;
		int delta;

		if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
			kfree_skb_partial(skb, fragstolen);
			atomic_add(delta, &sk->sk_rmem_alloc);
			sk_mem_charge(sk, delta);
			return;
		}
	}

	skb_set_owner_r(skb, sk);
	__skb_queue_tail(&sk->sk_receive_queue, skb);
143 144 145
	MPTCP_SKB_CB(skb)->offset = offset;
}

146 147 148 149 150 151 152 153
static void mptcp_stop_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	mptcp_sk(sk)->timer_ival = 0;
}

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* both sockets must be locked */
static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
				    struct sock *ssk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
	u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);

	/* revalidate data sequence number.
	 *
	 * mptcp_subflow_data_available() is usually called
	 * without msk lock.  Its unlikely (but possible)
	 * that msk->ack_seq has been advanced since the last
	 * call found in-sequence data.
	 */
	if (likely(dsn == msk->ack_seq))
		return true;

	subflow->data_avail = 0;
	return mptcp_subflow_data_available(ssk);
}

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static void mptcp_check_data_fin_ack(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (__mptcp_check_fallback(msk))
		return;

	/* Look for an acknowledged DATA_FIN */
	if (((1 << sk->sk_state) &
	     (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
	    msk->write_seq == atomic64_read(&msk->snd_una)) {
		mptcp_stop_timer(sk);

		WRITE_ONCE(msk->snd_data_fin_enable, 0);

		switch (sk->sk_state) {
		case TCP_FIN_WAIT1:
			inet_sk_state_store(sk, TCP_FIN_WAIT2);
			sk->sk_state_change(sk);
			break;
		case TCP_CLOSING:
		case TCP_LAST_ACK:
			inet_sk_state_store(sk, TCP_CLOSE);
			sk->sk_state_change(sk);
			break;
		}

		if (sk->sk_shutdown == SHUTDOWN_MASK ||
		    sk->sk_state == TCP_CLOSE)
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
		else
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
	}
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (READ_ONCE(msk->rcv_data_fin) &&
	    ((1 << sk->sk_state) &
	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);

		if (msk->ack_seq == rcv_data_fin_seq) {
			if (seq)
				*seq = rcv_data_fin_seq;

			return true;
		}
	}

	return false;
}

static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
{
	long tout = ssk && inet_csk(ssk)->icsk_pending ?
				      inet_csk(ssk)->icsk_timeout - jiffies : 0;

	if (tout <= 0)
		tout = mptcp_sk(sk)->timer_ival;
	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
}

static void mptcp_check_data_fin(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	u64 rcv_data_fin_seq;

	if (__mptcp_check_fallback(msk) || !msk->first)
		return;

	/* Need to ack a DATA_FIN received from a peer while this side
	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
	 * msk->rcv_data_fin was set when parsing the incoming options
	 * at the subflow level and the msk lock was not held, so this
	 * is the first opportunity to act on the DATA_FIN and change
	 * the msk state.
	 *
	 * If we are caught up to the sequence number of the incoming
	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
	 * not caught up, do nothing and let the recv code send DATA_ACK
	 * when catching up.
	 */

	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
		struct mptcp_subflow_context *subflow;

264
		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
265 266 267
		WRITE_ONCE(msk->rcv_data_fin, 0);

		sk->sk_shutdown |= RCV_SHUTDOWN;
268 269
		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306

		switch (sk->sk_state) {
		case TCP_ESTABLISHED:
			inet_sk_state_store(sk, TCP_CLOSE_WAIT);
			break;
		case TCP_FIN_WAIT1:
			inet_sk_state_store(sk, TCP_CLOSING);
			break;
		case TCP_FIN_WAIT2:
			inet_sk_state_store(sk, TCP_CLOSE);
			// @@ Close subflows now?
			break;
		default:
			/* Other states not expected */
			WARN_ON_ONCE(1);
			break;
		}

		mptcp_set_timeout(sk, NULL);
		mptcp_for_each_subflow(msk, subflow) {
			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

			lock_sock(ssk);
			tcp_send_ack(ssk);
			release_sock(ssk);
		}

		sk->sk_state_change(sk);

		if (sk->sk_shutdown == SHUTDOWN_MASK ||
		    sk->sk_state == TCP_CLOSE)
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
		else
			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
	}
}

307 308 309 310 311
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
					   struct sock *ssk,
					   unsigned int *bytes)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
312
	struct sock *sk = (struct sock *)msk;
313 314 315 316
	unsigned int moved = 0;
	bool more_data_avail;
	struct tcp_sock *tp;
	bool done = false;
317

318 319 320 321 322
	if (!mptcp_subflow_dsn_valid(msk, ssk)) {
		*bytes = 0;
		return false;
	}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	tp = tcp_sk(ssk);
	do {
		u32 map_remaining, offset;
		u32 seq = tp->copied_seq;
		struct sk_buff *skb;
		bool fin;

		/* try to move as much data as available */
		map_remaining = subflow->map_data_len -
				mptcp_subflow_get_map_offset(subflow);

		skb = skb_peek(&ssk->sk_receive_queue);
		if (!skb)
			break;

338 339 340 341 342 343 344 345 346
		if (__mptcp_check_fallback(msk)) {
			/* if we are running under the workqueue, TCP could have
			 * collapsed skbs between dummy map creation and now
			 * be sure to adjust the size
			 */
			map_remaining = skb->len;
			subflow->map_data_len = skb->len;
		}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
		offset = seq - TCP_SKB_CB(skb)->seq;
		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
		if (fin) {
			done = true;
			seq++;
		}

		if (offset < skb->len) {
			size_t len = skb->len - offset;

			if (tp->urg_data)
				done = true;

			__mptcp_move_skb(msk, ssk, skb, offset, len);
			seq += len;
			moved += len;

			if (WARN_ON_ONCE(map_remaining < len))
				break;
		} else {
			WARN_ON_ONCE(!fin);
			sk_eat_skb(ssk, skb);
			done = true;
		}

		WRITE_ONCE(tp->copied_seq, seq);
		more_data_avail = mptcp_subflow_data_available(ssk);
374 375 376 377 378

		if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
			done = true;
			break;
		}
379 380 381 382
	} while (more_data_avail);

	*bytes = moved;

383 384 385 386 387 388 389 390 391
	/* If the moves have caught up with the DATA_FIN sequence number
	 * it's time to ack the DATA_FIN and change socket state, but
	 * this is not a good place to change state. Let the workqueue
	 * do it.
	 */
	if (mptcp_pending_data_fin(sk, NULL) &&
	    schedule_work(&msk->work))
		sock_hold(sk);

392 393 394
	return done;
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
/* In most cases we will be able to lock the mptcp socket.  If its already
 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 */
static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
{
	struct sock *sk = (struct sock *)msk;
	unsigned int moved = 0;

	if (READ_ONCE(sk->sk_lock.owned))
		return false;

	if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock)))
		return false;

	/* must re-check after taking the lock */
	if (!READ_ONCE(sk->sk_lock.owned))
		__mptcp_move_skbs_from_subflow(msk, ssk, &moved);

	spin_unlock_bh(&sk->sk_lock.slock);

	return moved > 0;
}

void mptcp_data_ready(struct sock *sk, struct sock *ssk)
419 420 421 422
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	set_bit(MPTCP_DATA_READY, &msk->flags);
423

424 425 426 427
	if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
	    move_skbs_to_msk(msk, ssk))
		goto wake;

428 429 430 431
	/* don't schedule if mptcp sk is (still) over limit */
	if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
		goto wake;

432 433 434 435
	/* mptcp socket is owned, release_cb should retry */
	if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
			      &sk->sk_tsq_flags)) {
		sock_hold(sk);
436

437 438 439 440 441
		/* need to try again, its possible release_cb() has already
		 * been called after the test_and_set_bit() above.
		 */
		move_skbs_to_msk(msk, ssk);
	}
442
wake:
443 444 445
	sk->sk_data_ready(sk);
}

446 447 448 449 450 451 452 453 454 455
static void __mptcp_flush_join_list(struct mptcp_sock *msk)
{
	if (likely(list_empty(&msk->join_list)))
		return;

	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
}

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
static bool mptcp_timer_pending(struct sock *sk)
{
	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
}

static void mptcp_reset_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	unsigned long tout;

	/* should never be called with mptcp level timer cleared */
	tout = READ_ONCE(mptcp_sk(sk)->timer_ival);
	if (WARN_ON_ONCE(!tout))
		tout = TCP_RTO_MIN;
	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
}

void mptcp_data_acked(struct sock *sk)
{
	mptcp_reset_timer(sk);
476

477 478
	if ((!sk_stream_is_writeable(sk) ||
	     (inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
479 480
	    schedule_work(&mptcp_sk(sk)->work))
		sock_hold(sk);
481 482
}

483 484 485 486 487 488 489 490 491
void mptcp_subflow_eof(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
	    schedule_work(&msk->work))
		sock_hold(sk);
}

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
static void mptcp_check_for_eof(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	int receivers = 0;

	mptcp_for_each_subflow(msk, subflow)
		receivers += !subflow->rx_eof;

	if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
		/* hopefully temporary hack: propagate shutdown status
		 * to msk, when all subflows agree on it
		 */
		sk->sk_shutdown |= RCV_SHUTDOWN;

		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
		sk->sk_data_ready(sk);
	}
}

513 514
static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
{
515 516
	const struct sock *sk = (const struct sock *)msk;

517
	if (!msk->cached_ext)
518
		msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
519 520 521 522

	return !!msk->cached_ext;
}

523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;

	sock_owned_by_me(sk);

	mptcp_for_each_subflow(msk, subflow) {
		if (subflow->data_avail)
			return mptcp_subflow_tcp_sock(subflow);
	}

	return NULL;
}

538 539 540
static bool mptcp_skb_can_collapse_to(u64 write_seq,
				      const struct sk_buff *skb,
				      const struct mptcp_ext *mpext)
541 542 543 544 545
{
	if (!tcp_skb_can_collapse_to(skb))
		return false;

	/* can collapse only if MPTCP level sequence is in order */
546
	return mpext && mpext->data_seq + mpext->data_len == write_seq;
547 548
}

549 550 551 552 553 554 555 556
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
				       const struct page_frag *pfrag,
				       const struct mptcp_data_frag *df)
{
	return df && pfrag->page == df->page &&
		df->data_seq + df->data_len == msk->write_seq;
}

557 558 559
static void dfrag_uncharge(struct sock *sk, int len)
{
	sk_mem_uncharge(sk, len);
560
	sk_wmem_queued_add(sk, -len);
561 562 563
}

static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
564
{
565 566
	int len = dfrag->data_len + dfrag->overhead;

567
	list_del(&dfrag->list);
568
	dfrag_uncharge(sk, len);
569 570 571 572 573 574 575
	put_page(dfrag->page);
}

static void mptcp_clean_una(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;
576
	bool cleaned = false;
577 578 579 580 581 582 583 584
	u64 snd_una;

	/* on fallback we just need to ignore snd_una, as this is really
	 * plain TCP
	 */
	if (__mptcp_check_fallback(msk))
		atomic64_set(&msk->snd_una, msk->write_seq);
	snd_una = atomic64_read(&msk->snd_una);
585 586 587 588 589

	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
			break;

590 591 592 593
		dfrag_clear(sk, dfrag);
		cleaned = true;
	}

594 595
	dfrag = mptcp_rtx_head(sk);
	if (dfrag && after64(snd_una, dfrag->data_seq)) {
596 597 598 599
		u64 delta = snd_una - dfrag->data_seq;

		if (WARN_ON_ONCE(delta > dfrag->data_len))
			goto out;
600 601

		dfrag->data_seq += delta;
602
		dfrag->offset += delta;
603 604 605 606 607 608
		dfrag->data_len -= delta;

		dfrag_uncharge(sk, delta);
		cleaned = true;
	}

609
out:
610 611
	if (cleaned) {
		sk_mem_reclaim_partial(sk);
612 613 614 615

		/* Only wake up writers if a subflow is ready */
		if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
			sk_stream_write_space(sk);
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	}
}

/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
 * data
 */
static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{
	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
					pfrag, sk->sk_allocation)))
		return true;

	sk->sk_prot->enter_memory_pressure(sk);
	sk_stream_moderate_sndbuf(sk);
	return false;
}

static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
		      int orig_offset)
{
	int offset = ALIGN(orig_offset, sizeof(long));
	struct mptcp_data_frag *dfrag;

	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
	dfrag->data_len = 0;
	dfrag->data_seq = msk->write_seq;
	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
	dfrag->page = pfrag->page;

	return dfrag;
}

650
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
651 652
			      struct msghdr *msg, struct mptcp_data_frag *dfrag,
			      long *timeo, int *pmss_now,
653
			      int *ps_goal)
654
{
655 656
	int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0;
	bool dfrag_collapsed, can_collapse = false;
657 658
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_ext *mpext = NULL;
659
	bool retransmission = !!dfrag;
660
	struct sk_buff *skb, *tail;
661
	struct page_frag *pfrag;
662 663
	struct page *page;
	u64 *write_seq;
664 665 666 667
	size_t psize;

	/* use the mptcp page cache so that we can easily move the data
	 * from one substream to another, but do per subflow memory accounting
668 669
	 * Note: pfrag is used only !retransmission, but the compiler if
	 * fooled into a warning if we don't init here
670 671
	 */
	pfrag = sk_page_frag(sk);
672 673 674 675 676 677 678
	if (!retransmission) {
		write_seq = &msk->write_seq;
		page = pfrag->page;
	} else {
		write_seq = &dfrag->data_seq;
		page = dfrag->page;
	}
679 680 681

	/* compute copy limit */
	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
682 683 684 685 686 687 688 689 690 691 692 693 694 695
	*pmss_now = mss_now;
	*ps_goal = size_goal;
	avail_size = size_goal;
	skb = tcp_write_queue_tail(ssk);
	if (skb) {
		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);

		/* Limit the write to the size available in the
		 * current skb, if any, so that we create at most a new skb.
		 * Explicitly tells TCP internals to avoid collapsing on later
		 * queue management operation, to avoid breaking the ext <->
		 * SSN association set here
		 */
		can_collapse = (size_goal - skb->len > 0) &&
696
			      mptcp_skb_can_collapse_to(*write_seq, skb, mpext);
697 698 699 700 701
		if (!can_collapse)
			TCP_SKB_CB(skb)->eor = 1;
		else
			avail_size = size_goal - skb->len;
	}
702

703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	if (!retransmission) {
		/* reuse tail pfrag, if possible, or carve a new one from the
		 * page allocator
		 */
		dfrag = mptcp_rtx_tail(sk);
		offset = pfrag->offset;
		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
		if (!dfrag_collapsed) {
			dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
			offset = dfrag->offset;
			frag_truesize = dfrag->overhead;
		}
		psize = min_t(size_t, pfrag->size - offset, avail_size);

		/* Copy to page */
		pr_debug("left=%zu", msg_data_left(msg));
		psize = copy_page_from_iter(pfrag->page, offset,
					    min_t(size_t, msg_data_left(msg),
						  psize),
					    &msg->msg_iter);
		pr_debug("left=%zu", msg_data_left(msg));
		if (!psize)
			return -EINVAL;

727 728
		if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
			iov_iter_revert(&msg->msg_iter, psize);
729
			return -ENOMEM;
730
		}
731
	} else {
732
		offset = dfrag->offset;
733
		psize = min_t(size_t, dfrag->data_len, avail_size);
734
	}
735

736 737
	/* tell the TCP stack to delay the push so that we can safely
	 * access the skb after the sendpages call
738
	 */
739
	ret = do_tcp_sendpages(ssk, page, offset, psize,
740
			       msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
741
	if (ret <= 0) {
742 743
		if (!retransmission)
			iov_iter_revert(&msg->msg_iter, psize);
744
		return ret;
745
	}
746 747

	frag_truesize += ret;
748 749 750
	if (!retransmission) {
		if (unlikely(ret < psize))
			iov_iter_revert(&msg->msg_iter, psize - ret);
751

752 753 754 755 756 757 758 759 760 761 762
		/* send successful, keep track of sent data for mptcp-level
		 * retransmission
		 */
		dfrag->data_len += ret;
		if (!dfrag_collapsed) {
			get_page(dfrag->page);
			list_add_tail(&dfrag->list, &msk->rtx_queue);
			sk_wmem_queued_add(sk, frag_truesize);
		} else {
			sk_wmem_queued_add(sk, ret);
		}
763

764 765 766 767 768
		/* charge data on mptcp rtx queue to the master socket
		 * Note: we charge such data both to sk and ssk
		 */
		sk->sk_forward_alloc -= frag_truesize;
	}
769

770 771 772 773 774 775 776 777 778 779 780 781
	/* if the tail skb extension is still the cached one, collapsing
	 * really happened. Note: we can't check for 'same skb' as the sk_buff
	 * hdr on tail can be transmitted, freed and re-allocated by the
	 * do_tcp_sendpages() call
	 */
	tail = tcp_write_queue_tail(ssk);
	if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) {
		WARN_ON_ONCE(!can_collapse);
		mpext->data_len += ret;
		goto out;
	}

782 783 784 785 786
	skb = tcp_write_queue_tail(ssk);
	mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
	msk->cached_ext = NULL;

	memset(mpext, 0, sizeof(*mpext));
787
	mpext->data_seq = *write_seq;
788 789 790 791 792 793 794 795 796
	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
	mpext->data_len = ret;
	mpext->use_map = 1;
	mpext->dsn64 = 1;

	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
		 mpext->dsn64);

797
out:
798 799
	if (!retransmission)
		pfrag->offset += frag_truesize;
800
	WRITE_ONCE(*write_seq, *write_seq + ret);
801 802 803 804 805
	mptcp_subflow_ctx(ssk)->rel_write_seq += ret;

	return ret;
}

806 807 808 809 810 811 812 813 814
static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
{
	clear_bit(MPTCP_SEND_SPACE, &msk->flags);
	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */

	/* enables sk->write_space() callbacks */
	set_bit(SOCK_NOSPACE, &sock->flags);
}

815 816 817 818 819 820 821
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *backup = NULL;

	sock_owned_by_me((const struct sock *)msk);

822 823 824
	if (!mptcp_ext_cache_refill(msk))
		return NULL;

825 826 827 828 829 830
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		if (!sk_stream_memory_free(ssk)) {
			struct socket *sock = ssk->sk_socket;

831 832
			if (sock)
				mptcp_nospace(msk, sock);
833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849

			return NULL;
		}

		if (subflow->backup) {
			if (!backup)
				backup = ssk;

			continue;
		}

		return ssk;
	}

	return backup;
}

850 851 852 853 854 855 856 857
static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
{
	struct socket *sock;

	if (likely(sk_stream_is_writeable(ssk)))
		return;

	sock = READ_ONCE(ssk->sk_socket);
858 859
	if (sock)
		mptcp_nospace(msk, sock);
860 861
}

M
Mat Martineau 已提交
862 863
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
864
	int mss_now = 0, size_goal = 0, ret = 0;
M
Mat Martineau 已提交
865
	struct mptcp_sock *msk = mptcp_sk(sk);
866
	struct page_frag *pfrag;
867
	size_t copied = 0;
868
	struct sock *ssk;
869
	bool tx_ok;
870
	long timeo;
M
Mat Martineau 已提交
871 872 873 874

	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
		return -EOPNOTSUPP;

875
	lock_sock(sk);
876 877 878 879 880 881 882 883 884

	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
		ret = sk_stream_wait_connect(sk, &timeo);
		if (ret)
			goto out;
	}

885
	pfrag = sk_page_frag(sk);
886
restart:
887 888
	mptcp_clean_una(sk);

889 890 891 892 893
	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
		ret = -EPIPE;
		goto out;
	}

894
	__mptcp_flush_join_list(msk);
895
	ssk = mptcp_subflow_get_send(msk);
896 897 898
	while (!sk_stream_memory_free(sk) ||
	       !ssk ||
	       !mptcp_page_frag_refill(ssk, pfrag)) {
899 900 901 902 903 904 905 906 907 908 909 910 911
		if (ssk) {
			/* make sure retransmit timer is
			 * running before we wait for memory.
			 *
			 * The retransmit timer might be needed
			 * to make the peer send an up-to-date
			 * MPTCP Ack.
			 */
			mptcp_set_timeout(sk, ssk);
			if (!mptcp_timer_pending(sk))
				mptcp_reset_timer(sk);
		}

912 913 914 915
		ret = sk_stream_wait_memory(sk, &timeo);
		if (ret)
			goto out;

916 917
		mptcp_clean_una(sk);

918 919 920 921 922
		ssk = mptcp_subflow_get_send(msk);
		if (list_empty(&msk->conn_list)) {
			ret = -ENOTCONN;
			goto out;
		}
923 924
	}

925
	pr_debug("conn_list->subflow=%p", ssk);
926

927
	lock_sock(ssk);
928 929
	tx_ok = msg_data_left(msg);
	while (tx_ok) {
930
		ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
931
					 &size_goal);
932 933 934 935 936 937
		if (ret < 0) {
			if (ret == -EAGAIN && timeo > 0) {
				mptcp_set_timeout(sk, ssk);
				release_sock(ssk);
				goto restart;
			}
938
			break;
939
		}
940 941

		copied += ret;
942

943 944 945 946
		tx_ok = msg_data_left(msg);
		if (!tx_ok)
			break;

947
		if (!sk_stream_memory_free(ssk) ||
948
		    !mptcp_page_frag_refill(ssk, pfrag) ||
949
		    !mptcp_ext_cache_refill(msk)) {
950 951 952 953 954 955 956 957
			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
			tcp_push(ssk, msg->msg_flags, mss_now,
				 tcp_sk(ssk)->nonagle, size_goal);
			mptcp_set_timeout(sk, ssk);
			release_sock(ssk);
			goto restart;
		}

958 959 960 961 962
		/* memory is charged to mptcp level socket as well, i.e.
		 * if msg is very large, mptcp socket may run out of buffer
		 * space.  mptcp_clean_una() will release data that has
		 * been acked at mptcp level in the mean time, so there is
		 * a good chance we can continue sending data right away.
963 964 965 966 967 968 969
		 *
		 * Normally, when the tcp subflow can accept more data, then
		 * so can the MPTCP socket.  However, we need to cope with
		 * peers that might lag behind in their MPTCP-level
		 * acknowledgements, i.e.  data might have been acked at
		 * tcp level only.  So, we must also check the MPTCP socket
		 * limits before we send more data.
970 971 972 973 974 975 976 977 978 979 980 981 982
		 */
		if (unlikely(!sk_stream_memory_free(sk))) {
			tcp_push(ssk, msg->msg_flags, mss_now,
				 tcp_sk(ssk)->nonagle, size_goal);
			mptcp_clean_una(sk);
			if (!sk_stream_memory_free(sk)) {
				/* can't send more for now, need to wait for
				 * MPTCP-level ACKs from peer.
				 *
				 * Wakeup will happen via mptcp_clean_una().
				 */
				mptcp_set_timeout(sk, ssk);
				release_sock(ssk);
983
				goto restart;
984 985
			}
		}
986 987
	}

988
	mptcp_set_timeout(sk, ssk);
989 990 991
	if (copied) {
		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
			 size_goal);
992 993 994 995

		/* start the timer, if it's not pending */
		if (!mptcp_timer_pending(sk))
			mptcp_reset_timer(sk);
996
	}
997

998
	ssk_check_wmem(msk, ssk);
999
	release_sock(ssk);
1000
out:
1001
	release_sock(sk);
1002
	return copied ? : ret;
M
Mat Martineau 已提交
1003 1004
}

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
static void mptcp_wait_data(struct sock *sk, long *timeo)
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct mptcp_sock *msk = mptcp_sk(sk);

	add_wait_queue(sk_sleep(sk), &wait);
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);

	sk_wait_event(sk, timeo,
		      test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);

	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	remove_wait_queue(sk_sleep(sk), &wait);
}

1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
				struct msghdr *msg,
				size_t len)
{
	struct sock *sk = (struct sock *)msk;
	struct sk_buff *skb;
	int copied = 0;

	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
		u32 offset = MPTCP_SKB_CB(skb)->offset;
		u32 data_len = skb->len - offset;
		u32 count = min_t(size_t, len - copied, data_len);
		int err;

		err = skb_copy_datagram_msg(skb, offset, msg, count);
		if (unlikely(err < 0)) {
			if (!copied)
				return err;
			break;
		}

		copied += count;

		if (count < data_len) {
			MPTCP_SKB_CB(skb)->offset += count;
			break;
		}

		__skb_unlink(skb, &sk->sk_receive_queue);
		__kfree_skb(skb);

		if (copied >= len)
			break;
	}

	return copied;
}

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
 *
 * Only difference: Use highest rtt estimate of the subflows in use.
 */
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	u32 time, advmss = 1;
	u64 rtt_us, mstamp;

	sock_owned_by_me(sk);

	if (copied <= 0)
		return;

	msk->rcvq_space.copied += copied;

	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);

	rtt_us = msk->rcvq_space.rtt_us;
	if (rtt_us && time < (rtt_us >> 3))
		return;

	rtt_us = 0;
	mptcp_for_each_subflow(msk, subflow) {
		const struct tcp_sock *tp;
		u64 sf_rtt_us;
		u32 sf_advmss;

		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));

		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
		sf_advmss = READ_ONCE(tp->advmss);

		rtt_us = max(sf_rtt_us, rtt_us);
		advmss = max(sf_advmss, advmss);
	}

	msk->rcvq_space.rtt_us = rtt_us;
	if (time < (rtt_us >> 3) || rtt_us == 0)
		return;

	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
		goto new_measure;

	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
		int rcvmem, rcvbuf;
		u64 rcvwin, grow;

		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;

		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);

		do_div(grow, msk->rcvq_space.space);
		rcvwin += (grow << 1);

		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
		while (tcp_win_from_space(sk, rcvmem) < advmss)
			rcvmem += 128;

		do_div(rcvwin, advmss);
		rcvbuf = min_t(u64, rcvwin * rcvmem,
			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);

		if (rcvbuf > sk->sk_rcvbuf) {
			u32 window_clamp;

			window_clamp = tcp_win_from_space(sk, rcvbuf);
			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);

			/* Make subflows follow along.  If we do not do this, we
			 * get drops at subflow level if skbs can't be moved to
			 * the mptcp rx queue fast enough (announced rcv_win can
			 * exceed ssk->sk_rcvbuf).
			 */
			mptcp_for_each_subflow(msk, subflow) {
				struct sock *ssk;

				ssk = mptcp_subflow_tcp_sock(subflow);
				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
				tcp_sk(ssk)->window_clamp = window_clamp;
			}
		}
	}

	msk->rcvq_space.space = msk->rcvq_space.copied;
new_measure:
	msk->rcvq_space.copied = 0;
	msk->rcvq_space.time = mstamp;
}

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
static bool __mptcp_move_skbs(struct mptcp_sock *msk)
{
	unsigned int moved = 0;
	bool done;

	do {
		struct sock *ssk = mptcp_subflow_recv_lookup(msk);

		if (!ssk)
			break;

		lock_sock(ssk);
		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
		release_sock(ssk);
	} while (!done);

	return moved > 0;
}

M
Mat Martineau 已提交
1171 1172 1173 1174
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
			 int nonblock, int flags, int *addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1175
	int copied = 0;
1176 1177
	int target;
	long timeo;
M
Mat Martineau 已提交
1178 1179 1180 1181

	if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT))
		return -EOPNOTSUPP;

1182
	lock_sock(sk);
1183 1184 1185 1186
	timeo = sock_rcvtimeo(sk, nonblock);

	len = min_t(size_t, len, INT_MAX);
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1187
	__mptcp_flush_join_list(msk);
1188

1189
	while (len > (size_t)copied) {
1190 1191
		int bytes_read;

1192 1193 1194 1195 1196 1197
		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
		if (unlikely(bytes_read < 0)) {
			if (!copied)
				copied = bytes_read;
			goto out_err;
		}
1198

1199
		copied += bytes_read;
1200

1201 1202 1203
		if (skb_queue_empty(&sk->sk_receive_queue) &&
		    __mptcp_move_skbs(msk))
			continue;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223

		/* only the master socket status is relevant here. The exit
		 * conditions mirror closely tcp_recvmsg()
		 */
		if (copied >= target)
			break;

		if (copied) {
			if (sk->sk_err ||
			    sk->sk_state == TCP_CLOSE ||
			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
			    !timeo ||
			    signal_pending(current))
				break;
		} else {
			if (sk->sk_err) {
				copied = sock_error(sk);
				break;
			}

1224 1225 1226
			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
				mptcp_check_for_eof(msk);

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
			if (sk->sk_shutdown & RCV_SHUTDOWN)
				break;

			if (sk->sk_state == TCP_CLOSE) {
				copied = -ENOTCONN;
				break;
			}

			if (!timeo) {
				copied = -EAGAIN;
				break;
			}

			if (signal_pending(current)) {
				copied = sock_intr_errno(timeo);
				break;
			}
		}

		pr_debug("block timeout %ld", timeo);
		mptcp_wait_data(sk, &timeo);
1248 1249
	}

1250 1251
	if (skb_queue_empty(&sk->sk_receive_queue)) {
		/* entire backlog drained, clear DATA_READY. */
1252
		clear_bit(MPTCP_DATA_READY, &msk->flags);
1253

1254 1255
		/* .. race-breaker: ssk might have gotten new data
		 * after last __mptcp_move_skbs() returned false.
1256
		 */
1257
		if (unlikely(__mptcp_move_skbs(msk)))
1258
			set_bit(MPTCP_DATA_READY, &msk->flags);
1259 1260 1261
	} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
		/* data to read but mptcp_wait_data() cleared DATA_READY */
		set_bit(MPTCP_DATA_READY, &msk->flags);
1262
	}
1263
out_err:
1264 1265
	mptcp_rcv_space_adjust(msk, copied);

1266
	release_sock(sk);
1267 1268 1269
	return copied;
}

1270 1271 1272 1273
static void mptcp_retransmit_handler(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

1274
	if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
1275
		mptcp_stop_timer(sk);
1276 1277 1278 1279 1280
	} else {
		set_bit(MPTCP_WORK_RTX, &msk->flags);
		if (schedule_work(&msk->work))
			sock_hold(sk);
	}
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
}

static void mptcp_retransmit_timer(struct timer_list *t)
{
	struct inet_connection_sock *icsk = from_timer(icsk, t,
						       icsk_retransmit_timer);
	struct sock *sk = &icsk->icsk_inet.sk;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		mptcp_retransmit_handler(sk);
	} else {
		/* delegate our work to tcp_release_cb() */
		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED,
				      &sk->sk_tsq_flags))
			sock_hold(sk);
	}
	bh_unlock_sock(sk);
	sock_put(sk);
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
/* Find an idle subflow.  Return NULL if there is unacked data at tcp
 * level.
 *
 * A backup subflow is returned only if that is the only kind available.
 */
static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *backup = NULL;

	sock_owned_by_me((const struct sock *)msk);

	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		/* still data outstanding at TCP level?  Don't retransmit. */
		if (!tcp_write_queue_empty(ssk))
			return NULL;

		if (subflow->backup) {
			if (!backup)
				backup = ssk;
			continue;
		}

		return ssk;
	}

	return backup;
}

1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
/* subflow sockets can be either outgoing (connect) or incoming
 * (accept).
 *
 * Outgoing subflows use in-kernel sockets.
 * Incoming subflows do not have their own 'struct socket' allocated,
 * so we need to use tcp_close() after detaching them from the mptcp
 * parent socket.
 */
static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
			      struct mptcp_subflow_context *subflow,
			      long timeout)
{
	struct socket *sock = READ_ONCE(ssk->sk_socket);

	list_del(&subflow->node);

	if (sock && sock != sk->sk_socket) {
		/* outgoing subflow */
		sock_release(sock);
	} else {
		/* incoming subflow */
		tcp_close(ssk, timeout);
	}
M
Mat Martineau 已提交
1356 1357
}

P
Paolo Abeni 已提交
1358 1359 1360 1361 1362
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
{
	return 0;
}

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
static void pm_work(struct mptcp_sock *msk)
{
	struct mptcp_pm_data *pm = &msk->pm;

	spin_lock_bh(&msk->pm.lock);

	pr_debug("msk=%p status=%x", msk, pm->status);
	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
		mptcp_pm_nl_add_addr_received(msk);
	}
	if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
		pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
		mptcp_pm_nl_fully_established(msk);
	}
	if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
		pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
		mptcp_pm_nl_subflow_established(msk);
	}

	spin_unlock_bh(&msk->pm.lock);
}

P
Paolo Abeni 已提交
1386 1387 1388
static void mptcp_worker(struct work_struct *work)
{
	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1389
	struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1390
	int orig_len, orig_offset, mss_now = 0, size_goal = 0;
1391 1392 1393
	struct mptcp_data_frag *dfrag;
	u64 orig_write_seq;
	size_t copied = 0;
1394 1395 1396
	struct msghdr msg = {
		.msg_flags = MSG_DONTWAIT,
	};
1397
	long timeo = 0;
P
Paolo Abeni 已提交
1398 1399

	lock_sock(sk);
1400
	mptcp_clean_una(sk);
1401
	mptcp_check_data_fin_ack(sk);
1402
	__mptcp_flush_join_list(msk);
1403
	__mptcp_move_skbs(msk);
1404

1405 1406 1407
	if (msk->pm.status)
		pm_work(msk);

1408 1409 1410
	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
		mptcp_check_for_eof(msk);

1411 1412
	mptcp_check_data_fin(sk);

1413 1414 1415 1416 1417 1418 1419
	if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
		goto unlock;

	dfrag = mptcp_rtx_head(sk);
	if (!dfrag)
		goto unlock;

1420 1421 1422
	if (!mptcp_ext_cache_refill(msk))
		goto reset_unlock;

1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
	ssk = mptcp_subflow_get_retrans(msk);
	if (!ssk)
		goto reset_unlock;

	lock_sock(ssk);

	orig_len = dfrag->data_len;
	orig_offset = dfrag->offset;
	orig_write_seq = dfrag->data_seq;
	while (dfrag->data_len > 0) {
1433 1434
		int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
					     &mss_now, &size_goal);
1435 1436 1437
		if (ret < 0)
			break;

1438
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
1439 1440 1441
		copied += ret;
		dfrag->data_len -= ret;
		dfrag->offset += ret;
1442 1443 1444

		if (!mptcp_ext_cache_refill(msk))
			break;
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	}
	if (copied)
		tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
			 size_goal);

	dfrag->data_seq = orig_write_seq;
	dfrag->offset = orig_offset;
	dfrag->data_len = orig_len;

	mptcp_set_timeout(sk, ssk);
	release_sock(ssk);

reset_unlock:
	if (!mptcp_timer_pending(sk))
		mptcp_reset_timer(sk);

unlock:
P
Paolo Abeni 已提交
1462 1463 1464 1465
	release_sock(sk);
	sock_put(sk);
}

1466
static int __mptcp_init_sock(struct sock *sk)
M
Mat Martineau 已提交
1467
{
1468 1469
	struct mptcp_sock *msk = mptcp_sk(sk);

1470 1471
	spin_lock_init(&msk->join_list_lock);

1472
	INIT_LIST_HEAD(&msk->conn_list);
1473
	INIT_LIST_HEAD(&msk->join_list);
1474
	INIT_LIST_HEAD(&msk->rtx_queue);
1475
	__set_bit(MPTCP_SEND_SPACE, &msk->flags);
P
Paolo Abeni 已提交
1476
	INIT_WORK(&msk->work, mptcp_worker);
1477

1478
	msk->first = NULL;
P
Paolo Abeni 已提交
1479
	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
1480

1481 1482
	mptcp_pm_data_init(msk);

1483 1484 1485
	/* re-use the csk retrans timer for MPTCP-level retrans */
	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);

M
Mat Martineau 已提交
1486 1487 1488
	return 0;
}

1489 1490
static int mptcp_init_sock(struct sock *sk)
{
1491 1492
	struct net *net = sock_net(sk);
	int ret;
1493

1494 1495 1496 1497 1498 1499 1500
	if (!mptcp_is_enabled(net))
		return -ENOPROTOOPT;

	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
		return -ENOMEM;

	ret = __mptcp_init_sock(sk);
1501 1502 1503
	if (ret)
		return ret;

1504 1505 1506 1507
	ret = __mptcp_socket_create(mptcp_sk(sk));
	if (ret)
		return ret;

1508
	sk_sockets_allocated_inc(sk);
1509
	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
1510
	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
1511

1512 1513 1514 1515 1516 1517 1518 1519
	return 0;
}

static void __mptcp_clear_xmit(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;

1520 1521
	sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);

1522
	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
1523
		dfrag_clear(sk, dfrag);
1524 1525
}

P
Paolo Abeni 已提交
1526 1527 1528 1529 1530 1531 1532 1533
static void mptcp_cancel_work(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (cancel_work_sync(&msk->work))
		sock_put(sk);
}

1534
static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
1535 1536 1537 1538 1539 1540 1541
{
	lock_sock(ssk);

	switch (ssk->sk_state) {
	case TCP_LISTEN:
		if (!(how & RCV_SHUTDOWN))
			break;
1542
		fallthrough;
1543 1544 1545 1546
	case TCP_SYN_SENT:
		tcp_disconnect(ssk, O_NONBLOCK);
		break;
	default:
1547 1548 1549 1550 1551 1552 1553 1554 1555
		if (__mptcp_check_fallback(mptcp_sk(sk))) {
			pr_debug("Fallback");
			ssk->sk_shutdown |= how;
			tcp_shutdown(ssk, how);
		} else {
			pr_debug("Sending DATA_FIN on subflow %p", ssk);
			mptcp_set_timeout(sk, ssk);
			tcp_send_ack(ssk);
		}
1556 1557 1558 1559 1560 1561
		break;
	}

	release_sock(ssk);
}

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
static const unsigned char new_state[16] = {
	/* current state:     new state:      action:	*/
	[0 /* (Invalid) */] = TCP_CLOSE,
	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
	[TCP_SYN_SENT]      = TCP_CLOSE,
	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
	[TCP_CLOSE]         = TCP_CLOSE,
	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
	[TCP_LAST_ACK]      = TCP_LAST_ACK,
	[TCP_LISTEN]        = TCP_CLOSE,
	[TCP_CLOSING]       = TCP_CLOSING,
	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
};

static int mptcp_close_state(struct sock *sk)
{
	int next = (int)new_state[sk->sk_state];
	int ns = next & TCP_STATE_MASK;

	inet_sk_state_store(sk, ns);

	return next & TCP_ACTION_FIN;
}

1589
static void mptcp_close(struct sock *sk, long timeout)
M
Mat Martineau 已提交
1590
{
1591
	struct mptcp_subflow_context *subflow, *tmp;
M
Mat Martineau 已提交
1592
	struct mptcp_sock *msk = mptcp_sk(sk);
1593
	LIST_HEAD(conn_list);
M
Mat Martineau 已提交
1594

1595
	lock_sock(sk);
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == TCP_LISTEN) {
		inet_sk_state_store(sk, TCP_CLOSE);
		goto cleanup;
	} else if (sk->sk_state == TCP_CLOSE) {
		goto cleanup;
	}

	if (__mptcp_check_fallback(msk)) {
		goto update_state;
	} else if (mptcp_close_state(sk)) {
		pr_debug("Sending DATA_FIN sk=%p", sk);
		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
		WRITE_ONCE(msk->snd_data_fin_enable, 1);

		mptcp_for_each_subflow(msk, subflow) {
			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);

			mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK);
		}
	}
1618

1619 1620 1621
	sk_stream_wait_close(sk, timeout);

update_state:
M
Mat Martineau 已提交
1622 1623
	inet_sk_state_store(sk, TCP_CLOSE);

1624
cleanup:
1625 1626 1627 1628 1629 1630
	/* be sure to always acquire the join list lock, to sync vs
	 * mptcp_finish_join().
	 */
	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
1631 1632
	list_splice_init(&msk->conn_list, &conn_list);

1633 1634
	__mptcp_clear_xmit(sk);

1635 1636 1637
	release_sock(sk);

	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
1638 1639
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
		__mptcp_close_ssk(sk, ssk, subflow, timeout);
M
Mat Martineau 已提交
1640 1641
	}

P
Paolo Abeni 已提交
1642 1643
	mptcp_cancel_work(sk);

1644 1645
	__skb_queue_purge(&sk->sk_receive_queue);

1646
	sk_common_release(sk);
M
Mat Martineau 已提交
1647 1648
}

1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
	struct ipv6_pinfo *msk6 = inet6_sk(msk);

	msk->sk_v6_daddr = ssk->sk_v6_daddr;
	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;

	if (msk6 && ssk6) {
		msk6->saddr = ssk6->saddr;
		msk6->flow_label = ssk6->flow_label;
	}
#endif

	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}

1672 1673
static int mptcp_disconnect(struct sock *sk, int flags)
{
1674 1675 1676 1677 1678 1679
	/* Should never be called.
	 * inet_stream_connect() calls ->disconnect, but that
	 * refers to the subflow socket, not the mptcp one.
	 */
	WARN_ON_ONCE(1);
	return 0;
1680 1681
}

1682 1683 1684 1685 1686 1687 1688 1689 1690
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{
	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);

	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
#endif

1691
struct sock *mptcp_sk_clone(const struct sock *sk,
1692
			    const struct mptcp_options_received *mp_opt,
1693
			    struct request_sock *req)
1694
{
P
Paolo Abeni 已提交
1695
	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1696
	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
P
Paolo Abeni 已提交
1697 1698
	struct mptcp_sock *msk;
	u64 ack_seq;
1699 1700 1701 1702 1703 1704 1705 1706 1707

	if (!nsk)
		return NULL;

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	if (nsk->sk_family == AF_INET6)
		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif

P
Paolo Abeni 已提交
1708 1709 1710 1711 1712 1713
	__mptcp_init_sock(nsk);

	msk = mptcp_sk(nsk);
	msk->local_key = subflow_req->local_key;
	msk->token = subflow_req->token;
	msk->subflow = NULL;
1714
	WRITE_ONCE(msk->fully_established, false);
P
Paolo Abeni 已提交
1715 1716

	msk->write_seq = subflow_req->idsn + 1;
1717
	atomic64_set(&msk->snd_una, msk->write_seq);
1718
	if (mp_opt->mp_capable) {
P
Paolo Abeni 已提交
1719
		msk->can_ack = true;
1720
		msk->remote_key = mp_opt->sndr_key;
P
Paolo Abeni 已提交
1721 1722
		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
		ack_seq++;
1723
		WRITE_ONCE(msk->ack_seq, ack_seq);
P
Paolo Abeni 已提交
1724
	}
1725

1726
	sock_reset_flag(nsk, SOCK_RCU_FREE);
1727 1728
	/* will be fully established after successful MPC subflow creation */
	inet_sk_state_store(nsk, TCP_SYN_RECV);
P
Paolo Abeni 已提交
1729 1730 1731 1732
	bh_unlock_sock(nsk);

	/* keep a single reference */
	__sock_put(nsk);
1733 1734 1735
	return nsk;
}

1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
{
	const struct tcp_sock *tp = tcp_sk(ssk);

	msk->rcvq_space.copied = 0;
	msk->rcvq_space.rtt_us = 0;

	msk->rcvq_space.time = tp->tcp_mstamp;

	/* initial rcv_space offering made to peer */
	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
				      TCP_INIT_CWND * tp->advmss);
	if (msk->rcvq_space.space == 0)
		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
}

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
				 bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct socket *listener;
	struct sock *newsk;

	listener = __mptcp_nmpc_socket(msk);
	if (WARN_ON_ONCE(!listener)) {
		*err = -EINVAL;
		return NULL;
	}

	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
	newsk = inet_csk_accept(listener->sk, flags, err, kern);
	if (!newsk)
		return NULL;

	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
	if (sk_is_mptcp(newsk)) {
		struct mptcp_subflow_context *subflow;
		struct sock *new_mptcp_sock;
		struct sock *ssk = newsk;

		subflow = mptcp_subflow_ctx(newsk);
P
Paolo Abeni 已提交
1777
		new_mptcp_sock = subflow->conn;
1778

P
Paolo Abeni 已提交
1779 1780 1781 1782 1783 1784
		/* is_mptcp should be false if subflow->conn is missing, see
		 * subflow_syn_recv_sock()
		 */
		if (WARN_ON_ONCE(!new_mptcp_sock)) {
			tcp_sk(newsk)->is_mptcp = 0;
			return newsk;
1785 1786
		}

P
Paolo Abeni 已提交
1787 1788
		/* acquire the 2nd reference for the owning socket */
		sock_hold(new_mptcp_sock);
1789

P
Paolo Abeni 已提交
1790 1791
		local_bh_disable();
		bh_lock_sock(new_mptcp_sock);
1792
		msk = mptcp_sk(new_mptcp_sock);
1793
		msk->first = newsk;
1794 1795 1796 1797 1798

		newsk = new_mptcp_sock;
		mptcp_copy_inaddrs(newsk, ssk);
		list_add(&subflow->node, &msk->conn_list);

1799
		mptcp_rcv_space_init(msk, ssk);
1800
		bh_unlock_sock(new_mptcp_sock);
1801 1802

		__MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
1803
		local_bh_enable();
1804 1805 1806
	} else {
		MPTCP_INC_STATS(sock_net(sk),
				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
1807 1808 1809 1810 1811
	}

	return newsk;
}

1812 1813
static void mptcp_destroy(struct sock *sk)
{
1814 1815
	struct mptcp_sock *msk = mptcp_sk(sk);

P
Paolo Abeni 已提交
1816
	mptcp_token_destroy(msk);
1817 1818
	if (msk->cached_ext)
		__skb_ext_put(msk->cached_ext);
1819 1820

	sk_sockets_allocated_dec(sk);
1821 1822
}

1823
static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
1824
				       sockptr_t optval, unsigned int optlen)
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
{
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int ret;

	switch (optname) {
	case SO_REUSEPORT:
	case SO_REUSEADDR:
		lock_sock(sk);
		ssock = __mptcp_nmpc_socket(msk);
		if (!ssock) {
			release_sock(sk);
			return -EINVAL;
		}

1840
		ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
		if (ret == 0) {
			if (optname == SO_REUSEPORT)
				sk->sk_reuseport = ssock->sk->sk_reuseport;
			else if (optname == SO_REUSEADDR)
				sk->sk_reuse = ssock->sk->sk_reuse;
		}
		release_sock(sk);
		return ret;
	}

1851
	return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
1852 1853
}

1854
static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
1855
			       sockptr_t optval, unsigned int optlen)
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
{
	struct sock *sk = (struct sock *)msk;
	int ret = -EOPNOTSUPP;
	struct socket *ssock;

	switch (optname) {
	case IPV6_V6ONLY:
		lock_sock(sk);
		ssock = __mptcp_nmpc_socket(msk);
		if (!ssock) {
			release_sock(sk);
			return -EINVAL;
		}

		ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
		if (ret == 0)
			sk->sk_ipv6only = ssock->sk->sk_ipv6only;

		release_sock(sk);
		break;
	}

	return ret;
}

1881
static int mptcp_setsockopt(struct sock *sk, int level, int optname,
1882
			    sockptr_t optval, unsigned int optlen)
1883 1884
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1885
	struct sock *ssk;
1886 1887 1888

	pr_debug("msk=%p", msk);

1889
	if (level == SOL_SOCKET)
1890
		return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
1891

1892
	/* @@ the meaning of setsockopt() when the socket is connected and
1893 1894 1895 1896
	 * there are multiple subflows is not yet defined. It is up to the
	 * MPTCP-level socket to configure the subflows until the subflow
	 * is in TCP fallback, when TCP socket options are passed through
	 * to the one remaining subflow.
1897 1898
	 */
	lock_sock(sk);
1899
	ssk = __mptcp_tcp_fallback(msk);
1900
	release_sock(sk);
1901 1902
	if (ssk)
		return tcp_setsockopt(ssk, level, optname, optval, optlen);
1903

1904 1905 1906
	if (level == SOL_IPV6)
		return mptcp_setsockopt_v6(msk, optname, optval, optlen);

1907
	return -EOPNOTSUPP;
1908 1909 1910
}

static int mptcp_getsockopt(struct sock *sk, int level, int optname,
1911
			    char __user *optval, int __user *option)
1912 1913
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1914
	struct sock *ssk;
1915 1916 1917

	pr_debug("msk=%p", msk);

1918 1919 1920 1921 1922
	/* @@ the meaning of setsockopt() when the socket is connected and
	 * there are multiple subflows is not yet defined. It is up to the
	 * MPTCP-level socket to configure the subflows until the subflow
	 * is in TCP fallback, when socket options are passed through
	 * to the one remaining subflow.
1923 1924
	 */
	lock_sock(sk);
1925
	ssk = __mptcp_tcp_fallback(msk);
1926
	release_sock(sk);
1927 1928
	if (ssk)
		return tcp_getsockopt(ssk, level, optname, optval, option);
1929

1930
	return -EOPNOTSUPP;
1931 1932
}

1933 1934
#define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
			    TCPF_WRITE_TIMER_DEFERRED)
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949

/* this is very alike tcp_release_cb() but we must handle differently a
 * different set of events
 */
static void mptcp_release_cb(struct sock *sk)
{
	unsigned long flags, nflags;

	do {
		flags = sk->sk_tsq_flags;
		if (!(flags & MPTCP_DEFERRED_ALL))
			return;
		nflags = flags & ~MPTCP_DEFERRED_ALL;
	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);

1950 1951
	sock_release_ownership(sk);

1952 1953 1954 1955 1956 1957 1958 1959
	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
		struct mptcp_sock *msk = mptcp_sk(sk);
		struct sock *ssk;

		ssk = mptcp_subflow_recv_lookup(msk);
		if (!ssk || !schedule_work(&msk->work))
			__sock_put(sk);
	}
1960 1961 1962 1963 1964

	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
		mptcp_retransmit_handler(sk);
		__sock_put(sk);
	}
1965 1966
}

P
Paolo Abeni 已提交
1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
static int mptcp_hash(struct sock *sk)
{
	/* should never be called,
	 * we hash the TCP subflows not the master socket
	 */
	WARN_ON_ONCE(1);
	return 0;
}

static void mptcp_unhash(struct sock *sk)
{
	/* called from sk_common_release(), but nothing to do here */
}

1981
static int mptcp_get_port(struct sock *sk, unsigned short snum)
M
Mat Martineau 已提交
1982 1983
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1984
	struct socket *ssock;
M
Mat Martineau 已提交
1985

1986 1987 1988 1989
	ssock = __mptcp_nmpc_socket(msk);
	pr_debug("msk=%p, subflow=%p", msk, ssock);
	if (WARN_ON_ONCE(!ssock))
		return -EINVAL;
M
Mat Martineau 已提交
1990

1991 1992
	return inet_csk_get_port(ssock->sk, snum);
}
M
Mat Martineau 已提交
1993

1994 1995 1996 1997 1998
void mptcp_finish_connect(struct sock *ssk)
{
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk;
	struct sock *sk;
1999
	u64 ack_seq;
M
Mat Martineau 已提交
2000

2001 2002 2003 2004
	subflow = mptcp_subflow_ctx(ssk);
	sk = subflow->conn;
	msk = mptcp_sk(sk);

2005 2006
	pr_debug("msk=%p, token=%u", sk, subflow->token);

2007 2008
	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
	ack_seq++;
2009 2010
	subflow->map_seq = ack_seq;
	subflow->map_subflow_seq = 1;
2011

2012 2013 2014 2015 2016
	/* the socket is not connected yet, no msk/subflow ops can access/race
	 * accessing the field below
	 */
	WRITE_ONCE(msk->remote_key, subflow->remote_key);
	WRITE_ONCE(msk->local_key, subflow->local_key);
2017 2018
	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
	WRITE_ONCE(msk->ack_seq, ack_seq);
2019
	WRITE_ONCE(msk->can_ack, 1);
2020
	atomic64_set(&msk->snd_una, msk->write_seq);
2021 2022

	mptcp_pm_new_connection(msk, 0);
2023 2024

	mptcp_rcv_space_init(msk, ssk);
M
Mat Martineau 已提交
2025 2026
}

2027 2028 2029 2030 2031 2032 2033 2034 2035
static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
{
	write_lock_bh(&sk->sk_callback_lock);
	rcu_assign_pointer(sk->sk_wq, &parent->wq);
	sk_set_socket(sk, parent);
	sk->sk_uid = SOCK_INODE(parent)->i_uid;
	write_unlock_bh(&sk->sk_callback_lock);
}

2036 2037 2038 2039 2040 2041
bool mptcp_finish_join(struct sock *sk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
	struct sock *parent = (void *)msk;
	struct socket *parent_sock;
2042
	bool ret;
2043 2044 2045 2046

	pr_debug("msk=%p, subflow=%p", msk, subflow);

	/* mptcp socket already closing? */
2047
	if (!mptcp_is_fully_established(parent))
2048 2049 2050 2051 2052
		return false;

	if (!msk->pm.server_side)
		return true;

2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
	if (!mptcp_pm_allow_new_subflow(msk))
		return false;

	/* active connections are already on conn_list, and we can't acquire
	 * msk lock here.
	 * use the join list lock as synchronization point and double-check
	 * msk status to avoid racing with mptcp_close()
	 */
	spin_lock_bh(&msk->join_list_lock);
	ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
	if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
		list_add_tail(&subflow->node, &msk->join_list);
	spin_unlock_bh(&msk->join_list_lock);
	if (!ret)
		return false;

	/* attach to msk socket only after we are sure he will deal with us
	 * at close time
	 */
2072 2073 2074
	parent_sock = READ_ONCE(parent->sk_socket);
	if (parent_sock && !sk->sk_socket)
		mptcp_sock_graft(sk, parent_sock);
2075
	subflow->map_seq = READ_ONCE(msk->ack_seq);
2076
	return true;
2077 2078
}

2079 2080 2081 2082 2083 2084 2085
static bool mptcp_memory_free(const struct sock *sk, int wake)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
}

M
Mat Martineau 已提交
2086 2087 2088 2089
static struct proto mptcp_prot = {
	.name		= "MPTCP",
	.owner		= THIS_MODULE,
	.init		= mptcp_init_sock,
2090
	.disconnect	= mptcp_disconnect,
M
Mat Martineau 已提交
2091
	.close		= mptcp_close,
2092
	.accept		= mptcp_accept,
2093 2094
	.setsockopt	= mptcp_setsockopt,
	.getsockopt	= mptcp_getsockopt,
M
Mat Martineau 已提交
2095
	.shutdown	= tcp_shutdown,
2096
	.destroy	= mptcp_destroy,
M
Mat Martineau 已提交
2097 2098
	.sendmsg	= mptcp_sendmsg,
	.recvmsg	= mptcp_recvmsg,
2099
	.release_cb	= mptcp_release_cb,
P
Paolo Abeni 已提交
2100 2101
	.hash		= mptcp_hash,
	.unhash		= mptcp_unhash,
2102
	.get_port	= mptcp_get_port,
2103 2104 2105
	.sockets_allocated	= &mptcp_sockets_allocated,
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
2106
	.stream_memory_free	= mptcp_memory_free,
2107 2108
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
	.sysctl_mem	= sysctl_tcp_mem,
M
Mat Martineau 已提交
2109
	.obj_size	= sizeof(struct mptcp_sock),
P
Paolo Abeni 已提交
2110
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
M
Mat Martineau 已提交
2111 2112 2113
	.no_autobind	= true,
};

2114 2115 2116 2117
static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
2118
	int err;
2119 2120

	lock_sock(sock->sk);
2121 2122 2123
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2124 2125 2126 2127
		goto unlock;
	}

	err = ssock->ops->bind(ssock, uaddr, addr_len);
2128 2129
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
2130 2131 2132 2133 2134 2135

unlock:
	release_sock(sock->sk);
	return err;
}

2136 2137 2138 2139 2140 2141 2142
static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
					 struct mptcp_subflow_context *subflow)
{
	subflow->request_mptcp = 0;
	__mptcp_do_fallback(msk);
}

2143 2144 2145 2146
static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
				int addr_len, int flags)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
P
Paolo Abeni 已提交
2147
	struct mptcp_subflow_context *subflow;
2148 2149 2150 2151
	struct socket *ssock;
	int err;

	lock_sock(sock->sk);
P
Paolo Abeni 已提交
2152 2153 2154 2155 2156 2157 2158 2159
	if (sock->state != SS_UNCONNECTED && msk->subflow) {
		/* pending connection or invalid state, let existing subflow
		 * cope with that
		 */
		ssock = msk->subflow;
		goto do_connect;
	}

2160 2161 2162
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2163 2164 2165
		goto unlock;
	}

2166 2167
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
P
Paolo Abeni 已提交
2168
	subflow = mptcp_subflow_ctx(ssock->sk);
2169 2170 2171 2172 2173
#ifdef CONFIG_TCP_MD5SIG
	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
	 * TCP option space.
	 */
	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
2174
		mptcp_subflow_early_fallback(msk, subflow);
2175
#endif
P
Paolo Abeni 已提交
2176
	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
2177
		mptcp_subflow_early_fallback(msk, subflow);
2178

P
Paolo Abeni 已提交
2179
do_connect:
2180
	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
P
Paolo Abeni 已提交
2181 2182 2183 2184 2185
	sock->state = ssock->state;

	/* on successful connect, the msk state will be moved to established by
	 * subflow_finish_connect()
	 */
2186
	if (!err || err == -EINPROGRESS)
P
Paolo Abeni 已提交
2187 2188 2189
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
	else
		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
2190 2191 2192 2193 2194 2195

unlock:
	release_sock(sock->sk);
	return err;
}

2196 2197 2198 2199 2200 2201 2202 2203 2204
static int mptcp_listen(struct socket *sock, int backlog)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
2205 2206 2207
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
2208 2209 2210
		goto unlock;
	}

2211 2212
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_LISTEN);
2213 2214
	sock_set_flag(sock->sk, SOCK_RCU_FREE);

2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
	err = ssock->ops->listen(ssock, backlog);
	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);

unlock:
	release_sock(sock->sk);
	return err;
}

static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
			       int flags, bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
	if (sock->sk->sk_state != TCP_LISTEN)
		goto unlock_fail;

	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock)
		goto unlock_fail;

P
Paolo Abeni 已提交
2242
	clear_bit(MPTCP_DATA_READY, &msk->flags);
2243 2244 2245 2246
	sock_hold(ssock->sk);
	release_sock(sock->sk);

	err = ssock->ops->accept(sock, newsock, flags, kern);
2247
	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
2248 2249 2250 2251 2252 2253
		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
		struct mptcp_subflow_context *subflow;

		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
		 * This is needed so NOSPACE flag can be set from tcp stack.
		 */
2254
		__mptcp_flush_join_list(msk);
2255
		mptcp_for_each_subflow(msk, subflow) {
2256 2257 2258 2259 2260 2261 2262
			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

			if (!ssk->sk_socket)
				mptcp_sock_graft(ssk, newsock);
		}
	}

P
Paolo Abeni 已提交
2263 2264
	if (inet_csk_listen_poll(ssock->sk))
		set_bit(MPTCP_DATA_READY, &msk->flags);
2265 2266 2267 2268 2269 2270 2271 2272
	sock_put(ssock->sk);
	return err;

unlock_fail:
	release_sock(sock->sk);
	return -EINVAL;
}

P
Paolo Abeni 已提交
2273 2274 2275 2276 2277 2278
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
{
	return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
	       0;
}

2279 2280 2281
static __poll_t mptcp_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait)
{
2282
	struct sock *sk = sock->sk;
2283
	struct mptcp_sock *msk;
2284
	__poll_t mask = 0;
P
Paolo Abeni 已提交
2285
	int state;
2286

2287 2288 2289
	msk = mptcp_sk(sk);
	sock_poll_wait(file, sock, wait);

P
Paolo Abeni 已提交
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
	state = inet_sk_state_load(sk);
	if (state == TCP_LISTEN)
		return mptcp_check_readable(msk);

	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
		mask |= mptcp_check_readable(msk);
		if (sk_stream_is_writeable(sk) &&
		    test_bit(MPTCP_SEND_SPACE, &msk->flags))
			mask |= EPOLLOUT | EPOLLWRNORM;
	}
2300 2301 2302
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;

2303 2304 2305
	return mask;
}

2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
static int mptcp_shutdown(struct socket *sock, int how)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct mptcp_subflow_context *subflow;
	int ret = 0;

	pr_debug("sk=%p, how=%d", msk, how);

	lock_sock(sock->sk);

	how++;
	if ((how & ~SHUTDOWN_MASK) || !how) {
		ret = -EINVAL;
		goto out_unlock;
	}

	if (sock->state == SS_CONNECTING) {
		if ((1 << sock->sk->sk_state) &
		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
			sock->state = SS_DISCONNECTING;
		else
			sock->state = SS_CONNECTED;
	}

2330 2331 2332 2333
	/* If we've already sent a FIN, or it's a closed state, skip this. */
	if (__mptcp_check_fallback(msk)) {
		if (how == SHUT_WR || how == SHUT_RDWR)
			inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
2334

2335 2336
		mptcp_for_each_subflow(msk, subflow) {
			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2337

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
			mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
		}
	} else if ((how & SEND_SHUTDOWN) &&
		   ((1 << sock->sk->sk_state) &
		    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
		     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) &&
		   mptcp_close_state(sock->sk)) {
		__mptcp_flush_join_list(msk);

		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
		WRITE_ONCE(msk->snd_data_fin_enable, 1);

		mptcp_for_each_subflow(msk, subflow) {
			struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);

			mptcp_subflow_shutdown(sock->sk, tcp_sk, how);
		}
2355 2356
	}

2357 2358 2359
	/* Wake up anyone sleeping in poll. */
	sock->sk->sk_state_change(sock->sk);

2360 2361 2362 2363 2364 2365
out_unlock:
	release_sock(sock->sk);

	return ret;
}

2366 2367 2368 2369 2370 2371 2372 2373
static const struct proto_ops mptcp_stream_ops = {
	.family		   = PF_INET,
	.owner		   = THIS_MODULE,
	.release	   = inet_release,
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
2374
	.getname	   = inet_getname,
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
	.poll		   = mptcp_poll,
	.ioctl		   = inet_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
	.shutdown	   = mptcp_shutdown,
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet_sendmsg,
	.recvmsg	   = inet_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
};
2387

M
Mat Martineau 已提交
2388 2389 2390 2391
static struct inet_protosw mptcp_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_prot,
2392 2393
	.ops		= &mptcp_stream_ops,
	.flags		= INET_PROTOSW_ICSK,
M
Mat Martineau 已提交
2394 2395
};

2396
void __init mptcp_proto_init(void)
M
Mat Martineau 已提交
2397
{
2398 2399
	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;

2400 2401 2402
	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
		panic("Failed to allocate MPTCP pcpu counter\n");

2403
	mptcp_subflow_init();
2404
	mptcp_pm_init();
P
Paolo Abeni 已提交
2405
	mptcp_token_init();
2406

M
Mat Martineau 已提交
2407 2408 2409 2410
	if (proto_register(&mptcp_prot, 1) != 0)
		panic("Failed to register MPTCP proto.\n");

	inet_register_protosw(&mptcp_protosw);
2411 2412

	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
M
Mat Martineau 已提交
2413 2414 2415
}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2416 2417 2418 2419 2420 2421 2422 2423
static const struct proto_ops mptcp_v6_stream_ops = {
	.family		   = PF_INET6,
	.owner		   = THIS_MODULE,
	.release	   = inet6_release,
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
2424
	.getname	   = inet6_getname,
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
	.poll		   = mptcp_poll,
	.ioctl		   = inet6_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
	.shutdown	   = mptcp_shutdown,
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet6_sendmsg,
	.recvmsg	   = inet6_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
#ifdef CONFIG_COMPAT
2437
	.compat_ioctl	   = inet6_compat_ioctl,
2438 2439 2440
#endif
};

M
Mat Martineau 已提交
2441 2442
static struct proto mptcp_v6_prot;

2443 2444 2445 2446 2447 2448
static void mptcp_v6_destroy(struct sock *sk)
{
	mptcp_destroy(sk);
	inet6_destroy_sock(sk);
}

M
Mat Martineau 已提交
2449 2450 2451 2452
static struct inet_protosw mptcp_v6_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_v6_prot,
2453
	.ops		= &mptcp_v6_stream_ops,
M
Mat Martineau 已提交
2454 2455 2456
	.flags		= INET_PROTOSW_ICSK,
};

2457
int __init mptcp_proto_v6_init(void)
M
Mat Martineau 已提交
2458 2459 2460 2461 2462 2463
{
	int err;

	mptcp_v6_prot = mptcp_prot;
	strcpy(mptcp_v6_prot.name, "MPTCPv6");
	mptcp_v6_prot.slab = NULL;
2464
	mptcp_v6_prot.destroy = mptcp_v6_destroy;
2465
	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
M
Mat Martineau 已提交
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477

	err = proto_register(&mptcp_v6_prot, 1);
	if (err)
		return err;

	err = inet6_register_protosw(&mptcp_v6_protosw);
	if (err)
		proto_unregister(&mptcp_v6_prot);

	return err;
}
#endif