protocol.c 89.2 KB
Newer Older
M
Mat Martineau 已提交
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
 *
 * Copyright (c) 2017 - 2019, Intel Corporation.
 */

#define pr_fmt(fmt) "MPTCP: " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
12 13
#include <linux/sched/signal.h>
#include <linux/atomic.h>
M
Mat Martineau 已提交
14 15 16 17 18
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/protocol.h>
#include <net/tcp.h>
19
#include <net/tcp_states.h>
20 21 22
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
#include <net/transp_v6.h>
#endif
M
Mat Martineau 已提交
23
#include <net/mptcp.h>
P
Paolo Abeni 已提交
24
#include <net/xfrm.h>
M
Mat Martineau 已提交
25
#include "protocol.h"
26
#include "mib.h"
M
Mat Martineau 已提交
27

28 29 30
#define CREATE_TRACE_POINTS
#include <trace/events/mptcp.h>

31 32 33 34 35 36 37
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct mptcp6_sock {
	struct mptcp_sock msk;
	struct ipv6_pinfo np;
};
#endif

38
struct mptcp_skb_cb {
39 40
	u64 map_seq;
	u64 end_seq;
41
	u32 offset;
42
	u8  has_rxtstamp:1;
43 44 45 46
};

#define MPTCP_SKB_CB(__skb)	((struct mptcp_skb_cb *)&((__skb)->cb[0]))

47 48 49 50
enum {
	MPTCP_CMSG_TS = BIT(0),
};

51 52
static struct percpu_counter mptcp_sockets_allocated;

P
Paolo Abeni 已提交
53
static void __mptcp_destroy_sock(struct sock *sk);
54
static void __mptcp_check_send_data_fin(struct sock *sk);
P
Paolo Abeni 已提交
55

P
Paolo Abeni 已提交
56 57 58
DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
static struct net_device mptcp_napi_dev;

59 60 61 62
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
 * completed yet or has failed, return the subflow socket.
 * Otherwise return NULL.
 */
63
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
64
{
65
	if (!msk->subflow || READ_ONCE(msk->can_ack))
66 67 68 69 70
		return NULL;

	return msk->subflow;
}

71 72 73
/* Returns end sequence number of the receiver's advertised window */
static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
{
74
	return READ_ONCE(msk->wnd_end);
75 76
}

77
static bool mptcp_is_tcpsk(struct sock *sk)
F
Florian Westphal 已提交
78 79 80 81 82 83 84 85 86 87 88 89
{
	struct socket *sock = sk->sk_socket;

	if (unlikely(sk->sk_prot == &tcp_prot)) {
		/* we are being invoked after mptcp_accept() has
		 * accepted a non-mp-capable flow: sk is a tcp_sk,
		 * not an mptcp one.
		 *
		 * Hand the socket over to tcp so all further socket ops
		 * bypass mptcp.
		 */
		sock->ops = &inet_stream_ops;
90
		return true;
F
Florian Westphal 已提交
91 92 93
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	} else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
		sock->ops = &inet6_stream_ops;
94
		return true;
F
Florian Westphal 已提交
95 96 97
#endif
	}

98
	return false;
F
Florian Westphal 已提交
99 100
}

101
static int __mptcp_socket_create(struct mptcp_sock *msk)
102 103 104 105 106 107 108 109
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	struct socket *ssock;
	int err;

	err = mptcp_subflow_create_socket(sk, &ssock);
	if (err)
110
		return err;
111

112
	msk->first = ssock->sk;
113 114
	msk->subflow = ssock;
	subflow = mptcp_subflow_ctx(ssock->sk);
115
	list_add(&subflow->node, &msk->conn_list);
P
Paolo Abeni 已提交
116
	sock_hold(ssock->sk);
117
	subflow->request_mptcp = 1;
118
	mptcp_sock_graft(msk->first, sk->sk_socket);
119

120
	return 0;
121 122
}

123 124 125 126 127 128
static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
{
	sk_drops_add(sk, skb);
	__kfree_skb(skb);
}

129 130 131 132 133 134 135 136 137 138
static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
			       struct sk_buff *from)
{
	bool fragstolen;
	int delta;

	if (MPTCP_SKB_CB(from)->offset ||
	    !skb_try_coalesce(to, from, &fragstolen, &delta))
		return false;

P
Paolo Abeni 已提交
139 140 141
	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
		 to->len, MPTCP_SKB_CB(from)->end_seq);
142
	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
143 144 145 146 147 148
	kfree_skb_partial(from, fragstolen);
	atomic_add(delta, &sk->sk_rmem_alloc);
	sk_mem_charge(sk, delta);
	return true;
}

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
				   struct sk_buff *from)
{
	if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
		return false;

	return mptcp_try_coalesce((struct sock *)msk, to, from);
}

/* "inspired" by tcp_data_queue_ofo(), main differences:
 * - use mptcp seqs
 * - don't cope with sacks
 */
static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
{
	struct sock *sk = (struct sock *)msk;
	struct rb_node **p, *parent;
	u64 seq, end_seq, max_seq;
	struct sk_buff *skb1;

	seq = MPTCP_SKB_CB(skb)->map_seq;
	end_seq = MPTCP_SKB_CB(skb)->end_seq;
171
	max_seq = READ_ONCE(msk->rcv_wnd_sent);
172

P
Paolo Abeni 已提交
173 174
	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
175
	if (after64(end_seq, max_seq)) {
176 177
		/* out of window */
		mptcp_drop(sk, skb);
178 179 180
		pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
			 (unsigned long long)end_seq - (unsigned long)max_seq,
			 (unsigned long long)msk->rcv_wnd_sent);
P
Paolo Abeni 已提交
181
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
182 183 184 185
		return;
	}

	p = &msk->out_of_order_queue.rb_node;
P
Paolo Abeni 已提交
186
	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
187 188 189 190 191 192 193 194 195 196
	if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
		rb_link_node(&skb->rbnode, NULL, p);
		rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
		msk->ooo_last_skb = skb;
		goto end;
	}

	/* with 2 subflows, adding at end of ooo queue is quite likely
	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
	 */
P
Paolo Abeni 已提交
197 198 199
	if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
200
		return;
P
Paolo Abeni 已提交
201
	}
202 203 204

	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
	if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
P
Paolo Abeni 已提交
205
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
		parent = &msk->ooo_last_skb->rbnode;
		p = &parent->rb_right;
		goto insert;
	}

	/* Find place to insert this segment. Handle overlaps on the way. */
	parent = NULL;
	while (*p) {
		parent = *p;
		skb1 = rb_to_skb(parent);
		if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
			p = &parent->rb_left;
			continue;
		}
		if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
			if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
				/* All the bits are present. Drop. */
				mptcp_drop(sk, skb);
P
Paolo Abeni 已提交
224
				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
				return;
			}
			if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
				/* partial overlap:
				 *     |     skb      |
				 *  |     skb1    |
				 * continue traversing
				 */
			} else {
				/* skb's seq == skb1's seq and skb covers skb1.
				 * Replace skb1 with skb.
				 */
				rb_replace_node(&skb1->rbnode, &skb->rbnode,
						&msk->out_of_order_queue);
				mptcp_drop(sk, skb1);
P
Paolo Abeni 已提交
240
				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
241 242 243
				goto merge_right;
			}
		} else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
P
Paolo Abeni 已提交
244
			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
245 246 247 248
			return;
		}
		p = &parent->rb_right;
	}
P
Paolo Abeni 已提交
249

250 251 252 253 254 255 256 257 258 259 260 261
insert:
	/* Insert segment into RB tree. */
	rb_link_node(&skb->rbnode, parent, p);
	rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);

merge_right:
	/* Remove other segments covered by skb. */
	while ((skb1 = skb_rb_next(skb)) != NULL) {
		if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
			break;
		rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
		mptcp_drop(sk, skb1);
P
Paolo Abeni 已提交
262
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
263 264 265 266 267 268 269 270 271 272 273 274 275
	}
	/* If there is no skb after us, we are the last_skb ! */
	if (!skb1)
		msk->ooo_last_skb = skb;

end:
	skb_condense(skb);
	skb_set_owner_r(skb, sk);
}

static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
			     struct sk_buff *skb, unsigned int offset,
			     size_t copy_len)
276
{
277
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
278
	struct sock *sk = (struct sock *)msk;
279
	struct sk_buff *tail;
280
	bool has_rxtstamp;
281 282 283

	__skb_unlink(skb, &ssk->sk_receive_queue);

284 285
	skb_ext_reset(skb);
	skb_orphan(skb);
286

287 288
	/* try to fetch required memory from subflow */
	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
289 290 291
		int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;

		if (ssk->sk_forward_alloc < amount)
292
			goto drop;
293 294 295

		ssk->sk_forward_alloc -= amount;
		sk->sk_forward_alloc += amount;
296 297
	}

298 299
	has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;

300 301 302 303 304 305
	/* the skb map_seq accounts for the skb offset:
	 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
	 * value
	 */
	MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
	MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
306
	MPTCP_SKB_CB(skb)->offset = offset;
307
	MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
308

309 310
	if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
		/* in sequence */
311
		WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
312 313 314
		tail = skb_peek_tail(&sk->sk_receive_queue);
		if (tail && mptcp_try_coalesce(sk, tail, skb))
			return true;
315

316 317 318 319 320 321 322 323 324 325 326
		skb_set_owner_r(skb, sk);
		__skb_queue_tail(&sk->sk_receive_queue, skb);
		return true;
	} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
		mptcp_data_queue_ofo(msk, skb);
		return false;
	}

	/* old data, keep it simple and drop the whole pkt, sender
	 * will retransmit as needed, if needed.
	 */
P
Paolo Abeni 已提交
327
	MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
328
drop:
329 330
	mptcp_drop(sk, skb);
	return false;
331 332
}

333 334 335 336 337 338 339 340
static void mptcp_stop_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	mptcp_sk(sk)->timer_ival = 0;
}

P
Paolo Abeni 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353
static void mptcp_close_wake_up(struct sock *sk)
{
	if (sock_flag(sk, SOCK_DEAD))
		return;

	sk->sk_state_change(sk);
	if (sk->sk_shutdown == SHUTDOWN_MASK ||
	    sk->sk_state == TCP_CLOSE)
		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
	else
		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}

354
static bool mptcp_pending_data_fin_ack(struct sock *sk)
355 356 357
{
	struct mptcp_sock *msk = mptcp_sk(sk);

358 359 360 361 362 363 364 365 366
	return !__mptcp_check_fallback(msk) &&
	       ((1 << sk->sk_state) &
		(TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) &&
	       msk->write_seq == READ_ONCE(msk->snd_una);
}

static void mptcp_check_data_fin_ack(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
367 368

	/* Look for an acknowledged DATA_FIN */
369
	if (mptcp_pending_data_fin_ack(sk)) {
370 371 372 373 374 375 376 377 378 379 380 381
		WRITE_ONCE(msk->snd_data_fin_enable, 0);

		switch (sk->sk_state) {
		case TCP_FIN_WAIT1:
			inet_sk_state_store(sk, TCP_FIN_WAIT2);
			break;
		case TCP_CLOSING:
		case TCP_LAST_ACK:
			inet_sk_state_store(sk, TCP_CLOSE);
			break;
		}

P
Paolo Abeni 已提交
382
		mptcp_close_wake_up(sk);
383 384 385
	}
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (READ_ONCE(msk->rcv_data_fin) &&
	    ((1 << sk->sk_state) &
	     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) {
		u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);

		if (msk->ack_seq == rcv_data_fin_seq) {
			if (seq)
				*seq = rcv_data_fin_seq;

			return true;
		}
	}

	return false;
}

M
Mat Martineau 已提交
406 407 408 409 410 411 412 413
static void mptcp_set_datafin_timeout(const struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
				       TCP_RTO_MIN << icsk->icsk_retransmits);
}

P
Paolo Abeni 已提交
414
static void __mptcp_set_timeout(struct sock *sk, long tout)
415 416 417 418
{
	mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN;
}

P
Paolo Abeni 已提交
419 420 421 422
static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow)
{
	const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

423 424
	return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
	       inet_csk(ssk)->icsk_timeout - jiffies : 0;
P
Paolo Abeni 已提交
425 426 427 428 429 430 431 432 433 434 435 436
}

static void mptcp_set_timeout(struct sock *sk)
{
	struct mptcp_subflow_context *subflow;
	long tout = 0;

	mptcp_for_each_subflow(mptcp_sk(sk), subflow)
		tout = max(tout, mptcp_timeout_from_subflow(subflow));
	__mptcp_set_timeout(sk, tout);
}

437 438 439
static bool tcp_can_send_ack(const struct sock *ssk)
{
	return !((1 << inet_sk_state_load(ssk)) &
440
	       (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
441 442 443
}

static void mptcp_send_ack(struct mptcp_sock *msk)
444 445 446 447 448
{
	struct mptcp_subflow_context *subflow;

	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
449
		bool slow;
450

451
		slow = lock_sock_fast(ssk);
452
		if (tcp_can_send_ack(ssk))
453
			tcp_send_ack(ssk);
454
		unlock_sock_fast(ssk, slow);
455
	}
456 457
}

P
Paolo Abeni 已提交
458
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
459
{
460
	bool slow;
461

462
	slow = lock_sock_fast(ssk);
P
Paolo Abeni 已提交
463
	if (tcp_can_send_ack(ssk))
464
		tcp_cleanup_rbuf(ssk, 1);
465
	unlock_sock_fast(ssk, slow);
P
Paolo Abeni 已提交
466 467 468 469 470
}

static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
{
	const struct inet_connection_sock *icsk = inet_csk(ssk);
471
	u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending);
P
Paolo Abeni 已提交
472 473 474 475 476 477 478
	const struct tcp_sock *tp = tcp_sk(ssk);

	return (ack_pending & ICSK_ACK_SCHED) &&
		((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) >
		  READ_ONCE(icsk->icsk_ack.rcv_mss)) ||
		 (rx_empty && ack_pending &
			      (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
479 480 481 482
}

static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
{
483
	int old_space = READ_ONCE(msk->old_wspace);
484
	struct mptcp_subflow_context *subflow;
485
	struct sock *sk = (struct sock *)msk;
P
Paolo Abeni 已提交
486 487
	int space =  __mptcp_space(sk);
	bool cleanup, rx_empty;
488

P
Paolo Abeni 已提交
489
	cleanup = (space > 0) && (space >= (old_space << 1));
490
	rx_empty = !__mptcp_rmem(sk);
491

P
Paolo Abeni 已提交
492 493
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
494

P
Paolo Abeni 已提交
495 496
		if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
			mptcp_subflow_cleanup_rbuf(ssk);
497 498 499 500
	}
}

static bool mptcp_check_data_fin(struct sock *sk)
501 502 503
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	u64 rcv_data_fin_seq;
504
	bool ret = false;
505

506
	if (__mptcp_check_fallback(msk))
507
		return ret;
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522

	/* Need to ack a DATA_FIN received from a peer while this side
	 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
	 * msk->rcv_data_fin was set when parsing the incoming options
	 * at the subflow level and the msk lock was not held, so this
	 * is the first opportunity to act on the DATA_FIN and change
	 * the msk state.
	 *
	 * If we are caught up to the sequence number of the incoming
	 * DATA_FIN, send the DATA_ACK now and do state transition.  If
	 * not caught up, do nothing and let the recv code send DATA_ACK
	 * when catching up.
	 */

	if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) {
523
		WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
524 525 526
		WRITE_ONCE(msk->rcv_data_fin, 0);

		sk->sk_shutdown |= RCV_SHUTDOWN;
527 528
		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545

		switch (sk->sk_state) {
		case TCP_ESTABLISHED:
			inet_sk_state_store(sk, TCP_CLOSE_WAIT);
			break;
		case TCP_FIN_WAIT1:
			inet_sk_state_store(sk, TCP_CLOSING);
			break;
		case TCP_FIN_WAIT2:
			inet_sk_state_store(sk, TCP_CLOSE);
			break;
		default:
			/* Other states not expected */
			WARN_ON_ONCE(1);
			break;
		}

546
		ret = true;
547
		mptcp_send_ack(msk);
P
Paolo Abeni 已提交
548
		mptcp_close_wake_up(sk);
549
	}
550
	return ret;
551 552
}

553 554 555 556 557
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
					   struct sock *ssk,
					   unsigned int *bytes)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
558
	struct sock *sk = (struct sock *)msk;
559 560 561 562
	unsigned int moved = 0;
	bool more_data_avail;
	struct tcp_sock *tp;
	bool done = false;
563 564 565 566 567 568 569 570 571 572 573 574
	int sk_rbuf;

	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);

	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
		int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);

		if (unlikely(ssk_rbuf > sk_rbuf)) {
			WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
			sk_rbuf = ssk_rbuf;
		}
	}
575

576
	pr_debug("msk=%p ssk=%p", msk, ssk);
577 578 579 580 581 582 583 584 585 586 587 588
	tp = tcp_sk(ssk);
	do {
		u32 map_remaining, offset;
		u32 seq = tp->copied_seq;
		struct sk_buff *skb;
		bool fin;

		/* try to move as much data as available */
		map_remaining = subflow->map_data_len -
				mptcp_subflow_get_map_offset(subflow);

		skb = skb_peek(&ssk->sk_receive_queue);
589 590 591 592 593 594 595
		if (!skb) {
			/* if no data is found, a racing workqueue/recvmsg
			 * already processed the new data, stop here or we
			 * can enter an infinite loop
			 */
			if (!moved)
				done = true;
596
			break;
597
		}
598

599 600 601 602 603 604 605 606 607
		if (__mptcp_check_fallback(msk)) {
			/* if we are running under the workqueue, TCP could have
			 * collapsed skbs between dummy map creation and now
			 * be sure to adjust the size
			 */
			map_remaining = skb->len;
			subflow->map_data_len = skb->len;
		}

608 609 610 611 612 613 614 615 616 617 618 619 620
		offset = seq - TCP_SKB_CB(skb)->seq;
		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
		if (fin) {
			done = true;
			seq++;
		}

		if (offset < skb->len) {
			size_t len = skb->len - offset;

			if (tp->urg_data)
				done = true;

621 622
			if (__mptcp_move_skb(msk, ssk, skb, offset, len))
				moved += len;
623 624 625 626 627 628 629 630 631 632 633 634
			seq += len;

			if (WARN_ON_ONCE(map_remaining < len))
				break;
		} else {
			WARN_ON_ONCE(!fin);
			sk_eat_skb(ssk, skb);
			done = true;
		}

		WRITE_ONCE(tp->copied_seq, seq);
		more_data_avail = mptcp_subflow_data_available(ssk);
635

636
		if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
637 638 639
			done = true;
			break;
		}
640 641
	} while (more_data_avail);

642
	*bytes += moved;
643 644 645
	return done;
}

646
static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
647 648 649 650 651 652 653 654
{
	struct sock *sk = (struct sock *)msk;
	struct sk_buff *skb, *tail;
	bool moved = false;
	struct rb_node *p;
	u64 end_seq;

	p = rb_first(&msk->out_of_order_queue);
P
Paolo Abeni 已提交
655
	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
656 657 658 659 660 661 662 663 664 665 666
	while (p) {
		skb = rb_to_skb(p);
		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
			break;

		p = rb_next(p);
		rb_erase(&skb->rbnode, &msk->out_of_order_queue);

		if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
				      msk->ack_seq))) {
			mptcp_drop(sk, skb);
P
Paolo Abeni 已提交
667
			MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
668 669 670 671 672 673 674 675 676
			continue;
		}

		end_seq = MPTCP_SKB_CB(skb)->end_seq;
		tail = skb_peek_tail(&sk->sk_receive_queue);
		if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;

			/* skip overlapping data, if any */
P
Paolo Abeni 已提交
677 678 679
			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
				 delta);
680 681 682 683 684 685 686 687 688
			MPTCP_SKB_CB(skb)->offset += delta;
			__skb_queue_tail(&sk->sk_receive_queue, skb);
		}
		msk->ack_seq = end_seq;
		moved = true;
	}
	return moved;
}

689 690 691
/* In most cases we will be able to lock the mptcp socket.  If its already
 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 */
692
static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
693 694 695 696
{
	struct sock *sk = (struct sock *)msk;
	unsigned int moved = 0;

697 698
	__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
	__mptcp_ofo_queue(msk);
699 700 701 702 703 704
	if (unlikely(ssk->sk_err)) {
		if (!sock_owned_by_user(sk))
			__mptcp_error_report(sk);
		else
			set_bit(MPTCP_ERROR_REPORT,  &msk->flags);
	}
705

706 707 708 709 710 711 712
	/* If the moves have caught up with the DATA_FIN sequence number
	 * it's time to ack the DATA_FIN and change socket state, but
	 * this is not a good place to change state. Let the workqueue
	 * do it.
	 */
	if (mptcp_pending_data_fin(sk, NULL))
		mptcp_schedule_work(sk);
713
	return moved > 0;
714 715 716
}

void mptcp_data_ready(struct sock *sk, struct sock *ssk)
717
{
718
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
719
	struct mptcp_sock *msk = mptcp_sk(sk);
720
	int sk_rbuf, ssk_rbuf;
721

722 723 724 725 726 727 728
	/* The peer can send data while we are shutting down this
	 * subflow at msk destruction time, but we must avoid enqueuing
	 * more data to the msk receive queue
	 */
	if (unlikely(subflow->disposable))
		return;

729 730 731 732 733
	ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
	sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
	if (unlikely(ssk_rbuf > sk_rbuf))
		sk_rbuf = ssk_rbuf;

734
	/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
735 736
	if (__mptcp_rmem(sk) > sk_rbuf) {
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
737
		return;
738
	}
739

740 741 742 743
	/* Wake-up the reader only for in-sequence data */
	mptcp_data_lock(sk);
	if (move_skbs_to_msk(msk, ssk)) {
		set_bit(MPTCP_DATA_READY, &msk->flags);
744
		sk->sk_data_ready(sk);
745 746
	}
	mptcp_data_unlock(sk);
747 748
}

749
static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
750
{
P
Paolo Abeni 已提交
751
	struct mptcp_subflow_context *subflow;
752
	bool ret = false;
P
Paolo Abeni 已提交
753

754
	if (likely(list_empty(&msk->join_list)))
755
		return false;
756 757

	spin_lock_bh(&msk->join_list_lock);
758 759
	list_for_each_entry(subflow, &msk->join_list, node) {
		u32 sseq = READ_ONCE(subflow->setsockopt_seq);
760

761 762 763 764
		mptcp_propagate_sndbuf((struct sock *)msk, mptcp_subflow_tcp_sock(subflow));
		if (READ_ONCE(msk->setsockopt_seq) != sseq)
			ret = true;
	}
765 766
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
767

768
	return ret;
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
}

void __mptcp_flush_join_list(struct mptcp_sock *msk)
{
	if (likely(!mptcp_do_flush_join_list(msk)))
		return;

	if (!test_and_set_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags))
		mptcp_schedule_work((struct sock *)msk);
}

static void mptcp_flush_join_list(struct mptcp_sock *msk)
{
	bool sync_needed = test_and_clear_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags);

	might_sleep();

	if (!mptcp_do_flush_join_list(msk) && !sync_needed)
		return;

	mptcp_sockopt_sync_all(msk);
790 791
}

792 793 794 795 796 797 798 799 800 801
static bool mptcp_timer_pending(struct sock *sk)
{
	return timer_pending(&inet_csk(sk)->icsk_retransmit_timer);
}

static void mptcp_reset_timer(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	unsigned long tout;

P
Paolo Abeni 已提交
802 803 804 805
	/* prevent rescheduling on close */
	if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE))
		return;

P
Paolo Abeni 已提交
806
	tout = mptcp_sk(sk)->timer_ival;
807 808 809
	sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout);
}

P
Paolo Abeni 已提交
810 811 812 813 814 815 816 817 818 819 820 821 822
bool mptcp_schedule_work(struct sock *sk)
{
	if (inet_sk_state_load(sk) != TCP_CLOSE &&
	    schedule_work(&mptcp_sk(sk)->work)) {
		/* each subflow already holds a reference to the sk, and the
		 * workqueue is invoked by a subflow, so sk can't go away here.
		 */
		sock_hold(sk);
		return true;
	}
	return false;
}

823 824
void mptcp_subflow_eof(struct sock *sk)
{
P
Paolo Abeni 已提交
825 826
	if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags))
		mptcp_schedule_work(sk);
827 828
}

829 830 831 832 833 834 835 836
static void mptcp_check_for_eof(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	int receivers = 0;

	mptcp_for_each_subflow(msk, subflow)
		receivers += !subflow->rx_eof;
P
Paolo Abeni 已提交
837 838
	if (receivers)
		return;
839

P
Paolo Abeni 已提交
840
	if (!(sk->sk_shutdown & RCV_SHUTDOWN)) {
841 842 843 844 845 846 847 848 849
		/* hopefully temporary hack: propagate shutdown status
		 * to msk, when all subflows agree on it
		 */
		sk->sk_shutdown |= RCV_SHUTDOWN;

		smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
		set_bit(MPTCP_DATA_READY, &msk->flags);
		sk->sk_data_ready(sk);
	}
P
Paolo Abeni 已提交
850 851 852 853 854 855

	switch (sk->sk_state) {
	case TCP_ESTABLISHED:
		inet_sk_state_store(sk, TCP_CLOSE_WAIT);
		break;
	case TCP_FIN_WAIT1:
856 857 858
		inet_sk_state_store(sk, TCP_CLOSING);
		break;
	case TCP_FIN_WAIT2:
P
Paolo Abeni 已提交
859 860 861 862 863 864
		inet_sk_state_store(sk, TCP_CLOSE);
		break;
	default:
		return;
	}
	mptcp_close_wake_up(sk);
865 866
}

867 868 869 870 871 872 873 874
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;

	sock_owned_by_me(sk);

	mptcp_for_each_subflow(msk, subflow) {
875
		if (READ_ONCE(subflow->data_avail))
876 877 878 879 880 881
			return mptcp_subflow_tcp_sock(subflow);
	}

	return NULL;
}

882 883 884
static bool mptcp_skb_can_collapse_to(u64 write_seq,
				      const struct sk_buff *skb,
				      const struct mptcp_ext *mpext)
885 886 887 888
{
	if (!tcp_skb_can_collapse_to(skb))
		return false;

889 890 891 892 893
	/* can collapse only if MPTCP level sequence is in order and this
	 * mapping has not been xmitted yet
	 */
	return mpext && mpext->data_seq + mpext->data_len == write_seq &&
	       !mpext->frozen;
894 895
}

P
Paolo Abeni 已提交
896 897 898 899 900
/* we can append data to the given data frag if:
 * - there is space available in the backing page_frag
 * - the data frag tail matches the current page_frag free offset
 * - the data frag end sequence number matches the current write seq
 */
901 902 903 904 905
static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
				       const struct page_frag *pfrag,
				       const struct mptcp_data_frag *df)
{
	return df && pfrag->page == df->page &&
906
		pfrag->size - pfrag->offset > 0 &&
P
Paolo Abeni 已提交
907
		pfrag->offset == (df->offset + df->data_len) &&
908 909 910
		df->data_seq + df->data_len == msk->write_seq;
}

P
Paolo Abeni 已提交
911
static int mptcp_wmem_with_overhead(int size)
P
Paolo Abeni 已提交
912
{
P
Paolo Abeni 已提交
913
	return size + ((sizeof(struct mptcp_data_frag) * size) >> PAGE_SHIFT);
P
Paolo Abeni 已提交
914 915 916 917
}

static void __mptcp_wmem_reserve(struct sock *sk, int size)
{
P
Paolo Abeni 已提交
918
	int amount = mptcp_wmem_with_overhead(size);
P
Paolo Abeni 已提交
919 920 921
	struct mptcp_sock *msk = mptcp_sk(sk);

	WARN_ON_ONCE(msk->wmem_reserved);
922 923 924
	if (WARN_ON_ONCE(amount < 0))
		amount = 0;

P
Paolo Abeni 已提交
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
	if (amount <= sk->sk_forward_alloc)
		goto reserve;

	/* under memory pressure try to reserve at most a single page
	 * otherwise try to reserve the full estimate and fallback
	 * to a single page before entering the error path
	 */
	if ((tcp_under_memory_pressure(sk) && amount > PAGE_SIZE) ||
	    !sk_wmem_schedule(sk, amount)) {
		if (amount <= PAGE_SIZE)
			goto nomem;

		amount = PAGE_SIZE;
		if (!sk_wmem_schedule(sk, amount))
			goto nomem;
	}

reserve:
	msk->wmem_reserved = amount;
	sk->sk_forward_alloc -= amount;
	return;

nomem:
	/* we will wait for memory on next allocation */
	msk->wmem_reserved = -1;
}

static void __mptcp_update_wmem(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

956 957 958 959
#ifdef CONFIG_LOCKDEP
	WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
#endif

P
Paolo Abeni 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
	if (!msk->wmem_reserved)
		return;

	if (msk->wmem_reserved < 0)
		msk->wmem_reserved = 0;
	if (msk->wmem_reserved > 0) {
		sk->sk_forward_alloc += msk->wmem_reserved;
		msk->wmem_reserved = 0;
	}
}

static bool mptcp_wmem_alloc(struct sock *sk, int size)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	/* check for pre-existing error condition */
	if (msk->wmem_reserved < 0)
		return false;

	if (msk->wmem_reserved >= size)
		goto account;

982 983 984
	mptcp_data_lock(sk);
	if (!sk_wmem_schedule(sk, size)) {
		mptcp_data_unlock(sk);
P
Paolo Abeni 已提交
985
		return false;
986
	}
P
Paolo Abeni 已提交
987 988 989

	sk->sk_forward_alloc -= size;
	msk->wmem_reserved += size;
990
	mptcp_data_unlock(sk);
P
Paolo Abeni 已提交
991 992 993 994 995 996

account:
	msk->wmem_reserved -= size;
	return true;
}

997 998 999 1000 1001 1002 1003 1004 1005
static void mptcp_wmem_uncharge(struct sock *sk, int size)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (msk->wmem_reserved < 0)
		msk->wmem_reserved = 0;
	msk->wmem_reserved += size;
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
static void mptcp_mem_reclaim_partial(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	/* if we are experiencing a transint allocation error,
	 * the forward allocation memory has been already
	 * released
	 */
	if (msk->wmem_reserved < 0)
		return;

	mptcp_data_lock(sk);
	sk->sk_forward_alloc += msk->wmem_reserved;
	sk_mem_reclaim_partial(sk);
	msk->wmem_reserved = sk->sk_forward_alloc;
	sk->sk_forward_alloc = 0;
	mptcp_data_unlock(sk);
}

1025 1026 1027
static void dfrag_uncharge(struct sock *sk, int len)
{
	sk_mem_uncharge(sk, len);
1028
	sk_wmem_queued_add(sk, -len);
1029 1030 1031
}

static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
1032
{
1033 1034
	int len = dfrag->data_len + dfrag->overhead;

1035
	list_del(&dfrag->list);
1036
	dfrag_uncharge(sk, len);
1037 1038 1039
	put_page(dfrag->page);
}

1040
static void __mptcp_clean_una(struct sock *sk)
1041 1042 1043
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;
1044
	bool cleaned = false;
1045 1046 1047 1048 1049 1050
	u64 snd_una;

	/* on fallback we just need to ignore snd_una, as this is really
	 * plain TCP
	 */
	if (__mptcp_check_fallback(msk))
1051
		msk->snd_una = READ_ONCE(msk->snd_nxt);
1052

1053
	snd_una = msk->snd_una;
1054 1055 1056 1057
	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
		if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
			break;

1058 1059 1060 1061 1062 1063 1064 1065
		if (unlikely(dfrag == msk->first_pending)) {
			/* in recovery mode can see ack after the current snd head */
			if (WARN_ON_ONCE(!msk->recovery))
				break;

			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
		}

1066 1067 1068 1069
		dfrag_clear(sk, dfrag);
		cleaned = true;
	}

1070 1071
	dfrag = mptcp_rtx_head(sk);
	if (dfrag && after64(snd_una, dfrag->data_seq)) {
1072 1073
		u64 delta = snd_una - dfrag->data_seq;

1074 1075 1076 1077 1078 1079 1080 1081
		/* prevent wrap around in recovery mode */
		if (unlikely(delta > dfrag->already_sent)) {
			if (WARN_ON_ONCE(!msk->recovery))
				goto out;
			if (WARN_ON_ONCE(delta > dfrag->data_len))
				goto out;
			dfrag->already_sent += delta - dfrag->already_sent;
		}
1082 1083

		dfrag->data_seq += delta;
1084
		dfrag->offset += delta;
1085
		dfrag->data_len -= delta;
1086
		dfrag->already_sent -= delta;
1087 1088 1089 1090 1091

		dfrag_uncharge(sk, delta);
		cleaned = true;
	}

1092 1093 1094 1095
	/* all retransmitted data acked, recovery completed */
	if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
		msk->recovery = false;

1096
out:
1097 1098 1099 1100 1101 1102
	if (cleaned) {
		if (tcp_under_memory_pressure(sk)) {
			__mptcp_update_wmem(sk);
			sk_mem_reclaim_partial(sk);
		}
	}
1103

1104
	if (snd_una == READ_ONCE(msk->snd_nxt) && !msk->recovery) {
P
Paolo Abeni 已提交
1105
		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1106 1107 1108
			mptcp_stop_timer(sk);
	} else {
		mptcp_reset_timer(sk);
1109 1110 1111
	}
}

P
Paolo Abeni 已提交
1112 1113
static void __mptcp_clean_una_wakeup(struct sock *sk)
{
1114 1115 1116
#ifdef CONFIG_LOCKDEP
	WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
#endif
P
Paolo Abeni 已提交
1117 1118 1119 1120
	__mptcp_clean_una(sk);
	mptcp_write_space(sk);
}

1121 1122 1123 1124 1125 1126 1127
static void mptcp_clean_una_wakeup(struct sock *sk)
{
	mptcp_data_lock(sk);
	__mptcp_clean_una_wakeup(sk);
	mptcp_data_unlock(sk);
}

1128
static void mptcp_enter_memory_pressure(struct sock *sk)
1129
{
1130 1131 1132 1133
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk = mptcp_sk(sk);
	bool first = true;

1134
	sk_stream_moderate_sndbuf(sk);
1135 1136 1137 1138 1139 1140 1141 1142
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		if (first)
			tcp_enter_memory_pressure(ssk);
		sk_stream_moderate_sndbuf(ssk);
		first = false;
	}
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
}

/* ensure we get enough memory for the frag hdr, beyond some minimal amount of
 * data
 */
static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{
	if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag),
					pfrag, sk->sk_allocation)))
		return true;

	mptcp_enter_memory_pressure(sk);
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
	return false;
}

static struct mptcp_data_frag *
mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
		      int orig_offset)
{
	int offset = ALIGN(orig_offset, sizeof(long));
	struct mptcp_data_frag *dfrag;

	dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset);
	dfrag->data_len = 0;
	dfrag->data_seq = msk->write_seq;
	dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag);
	dfrag->offset = offset + sizeof(struct mptcp_data_frag);
1170
	dfrag->already_sent = 0;
1171 1172 1173 1174 1175
	dfrag->page = pfrag->page;

	return dfrag;
}

1176 1177 1178
struct mptcp_sendmsg_info {
	int mss_now;
	int size_goal;
1179 1180 1181
	u16 limit;
	u16 sent;
	unsigned int flags;
1182 1183
};

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
static int mptcp_check_allowed_size(struct mptcp_sock *msk, u64 data_seq,
				    int avail_size)
{
	u64 window_end = mptcp_wnd_end(msk);

	if (__mptcp_check_fallback(msk))
		return avail_size;

	if (!before64(data_seq + avail_size, window_end)) {
		u64 allowed_size = window_end - data_seq;

		return min_t(unsigned int, allowed_size, avail_size);
	}

	return avail_size;
}

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp)
{
	struct skb_ext *mpext = __skb_ext_alloc(gfp);

	if (!mpext)
		return false;
	__skb_ext_set(skb, SKB_EXT_MPTCP, mpext);
	return true;
}

1211
static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
1212 1213 1214
{
	struct sk_buff *skb;

1215
	skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
1216
	if (likely(skb)) {
1217
		if (likely(__mptcp_add_ext(skb, gfp))) {
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
			skb_reserve(skb, MAX_TCP_HEADER);
			skb->reserved_tailroom = skb->end - skb->tail;
			return skb;
		}
		__kfree_skb(skb);
	} else {
		mptcp_enter_memory_pressure(sk);
	}
	return NULL;
}

1229
static bool __mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1230 1231 1232 1233 1234 1235
{
	struct sk_buff *skb;

	if (ssk->sk_tx_skb_cache) {
		skb = ssk->sk_tx_skb_cache;
		if (unlikely(!skb_ext_find(skb, SKB_EXT_MPTCP) &&
1236
			     !__mptcp_add_ext(skb, gfp)))
1237 1238 1239 1240
			return false;
		return true;
	}

1241
	skb = __mptcp_do_alloc_tx_skb(sk, gfp);
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
	if (!skb)
		return false;

	if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
		ssk->sk_tx_skb_cache = skb;
		return true;
	}
	kfree_skb(skb);
	return false;
}

static bool mptcp_must_reclaim_memory(struct sock *sk, struct sock *ssk)
{
	return !ssk->sk_tx_skb_cache &&
	       tcp_under_memory_pressure(sk);
}

static bool mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk)
{
	if (unlikely(mptcp_must_reclaim_memory(sk, ssk)))
		mptcp_mem_reclaim_partial(sk);
1263
	return __mptcp_alloc_tx_skb(sk, ssk, sk->sk_allocation);
1264 1265
}

G
Geliang Tang 已提交
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
/* note: this always recompute the csum on the whole skb, even
 * if we just appended a single frag. More status info needed
 */
static void mptcp_update_data_checksum(struct sk_buff *skb, int added)
{
	struct mptcp_ext *mpext = mptcp_get_ext(skb);
	__wsum csum = ~csum_unfold(mpext->csum);
	int offset = skb->len - added;

	mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset));
}

1278
static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1279
			      struct mptcp_data_frag *dfrag,
1280
			      struct mptcp_sendmsg_info *info)
1281
{
1282
	u64 data_seq = dfrag->data_seq + info->sent;
1283
	struct mptcp_sock *msk = mptcp_sk(sk);
1284
	bool zero_window_probe = false;
1285
	struct mptcp_ext *mpext = NULL;
1286
	struct sk_buff *skb, *tail;
1287
	bool can_collapse = false;
1288
	int size_bias = 0;
1289
	int avail_size;
1290
	size_t ret = 0;
1291

1292
	pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1293 1294 1295 1296
		 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);

	/* compute send limit */
	info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1297
	avail_size = info->size_goal;
1298 1299 1300 1301 1302 1303 1304 1305
	skb = tcp_write_queue_tail(ssk);
	if (skb) {
		/* Limit the write to the size available in the
		 * current skb, if any, so that we create at most a new skb.
		 * Explicitly tells TCP internals to avoid collapsing on later
		 * queue management operation, to avoid breaking the ext <->
		 * SSN association set here
		 */
1306
		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
1307
		can_collapse = (info->size_goal - skb->len > 0) &&
1308
			 mptcp_skb_can_collapse_to(data_seq, skb, mpext);
1309
		if (!can_collapse) {
1310
			TCP_SKB_CB(skb)->eor = 1;
1311 1312
		} else {
			size_bias = skb->len;
1313
			avail_size = info->size_goal - skb->len;
1314
		}
1315
	}
1316

1317 1318 1319
	/* Zero window and all data acked? Probe. */
	avail_size = mptcp_check_allowed_size(msk, data_seq, avail_size);
	if (avail_size == 0) {
1320 1321 1322
		u64 snd_una = READ_ONCE(msk->snd_una);

		if (skb || snd_una != msk->snd_nxt)
1323 1324
			return 0;
		zero_window_probe = true;
1325
		data_seq = snd_una - 1;
1326 1327 1328
		avail_size = 1;
	}

1329 1330 1331
	if (WARN_ON_ONCE(info->sent > info->limit ||
			 info->limit > dfrag->data_len))
		return 0;
1332

1333
	ret = info->limit - info->sent;
1334 1335
	tail = tcp_build_frag(ssk, avail_size + size_bias, info->flags,
			      dfrag->page, dfrag->offset + info->sent, &ret);
P
Paolo Abeni 已提交
1336 1337 1338
	if (!tail) {
		tcp_remove_empty_skb(sk, tcp_write_queue_tail(ssk));
		return -ENOMEM;
1339
	}
1340

P
Paolo Abeni 已提交
1341
	/* if the tail skb is still the cached one, collapsing really happened.
1342
	 */
P
Paolo Abeni 已提交
1343
	if (skb == tail) {
1344
		TCP_SKB_CB(tail)->tcp_flags &= ~TCPHDR_PSH;
1345
		mpext->data_len += ret;
1346
		WARN_ON_ONCE(!can_collapse);
1347
		WARN_ON_ONCE(zero_window_probe);
1348 1349 1350
		goto out;
	}

1351 1352 1353 1354 1355
	mpext = skb_ext_find(tail, SKB_EXT_MPTCP);
	if (WARN_ON_ONCE(!mpext)) {
		/* should never reach here, stream corrupted */
		return -EINVAL;
	}
1356 1357

	memset(mpext, 0, sizeof(*mpext));
1358
	mpext->data_seq = data_seq;
1359 1360 1361 1362 1363 1364 1365 1366 1367
	mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
	mpext->data_len = ret;
	mpext->use_map = 1;
	mpext->dsn64 = 1;

	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
		 mpext->dsn64);

1368 1369 1370
	if (zero_window_probe) {
		mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
		mpext->frozen = 1;
G
Geliang Tang 已提交
1371 1372
		if (READ_ONCE(msk->csum_enabled))
			mptcp_update_data_checksum(tail, ret);
1373
		tcp_push_pending_frames(ssk);
G
Geliang Tang 已提交
1374
		return 0;
1375
	}
1376
out:
G
Geliang Tang 已提交
1377 1378
	if (READ_ONCE(msk->csum_enabled))
		mptcp_update_data_checksum(tail, ret);
1379 1380 1381 1382
	mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
	return ret;
}

1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
#define MPTCP_SEND_BURST_SIZE		((1 << 16) - \
					 sizeof(struct tcphdr) - \
					 MAX_TCP_OPTION_SPACE - \
					 sizeof(struct ipv6hdr) - \
					 sizeof(struct frag_hdr))

struct subflow_send_info {
	struct sock *ssk;
	u64 ratio;
};

1394 1395 1396 1397 1398 1399
void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
{
	if (!subflow->stale)
		return;

	subflow->stale = 0;
1400
	MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER);
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
}

bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
	if (unlikely(subflow->stale)) {
		u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp);

		if (subflow->stale_rcv_tstamp == rcv_tstamp)
			return false;

		mptcp_subflow_set_active(subflow);
	}
	return __mptcp_subflow_active(subflow);
}

P
Paolo Abeni 已提交
1416 1417 1418 1419
/* implement the mptcp packet scheduler;
 * returns the subflow that will transmit the next DSS
 * additionally updates the rtx timeout
 */
P
Paolo Abeni 已提交
1420
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1421
{
1422
	struct subflow_send_info send_info[2];
1423
	struct mptcp_subflow_context *subflow;
P
Paolo Abeni 已提交
1424
	struct sock *sk = (struct sock *)msk;
1425 1426
	int i, nr_active = 0;
	struct sock *ssk;
P
Paolo Abeni 已提交
1427
	long tout = 0;
1428 1429
	u64 ratio;
	u32 pace;
1430

P
Paolo Abeni 已提交
1431
	sock_owned_by_me(sk);
1432

1433 1434
	if (__mptcp_check_fallback(msk)) {
		if (!msk->first)
1435
			return NULL;
1436 1437 1438 1439 1440 1441
		return sk_stream_memory_free(msk->first) ? msk->first : NULL;
	}

	/* re-use last subflow, if the burst allow that */
	if (msk->last_snd && msk->snd_burst > 0 &&
	    sk_stream_memory_free(msk->last_snd) &&
P
Paolo Abeni 已提交
1442 1443
	    mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
		mptcp_set_timeout(sk);
1444
		return msk->last_snd;
P
Paolo Abeni 已提交
1445
	}
1446

1447 1448 1449 1450 1451 1452
	/* pick the subflow with the lower wmem/wspace ratio */
	for (i = 0; i < 2; ++i) {
		send_info[i].ssk = NULL;
		send_info[i].ratio = -1;
	}
	mptcp_for_each_subflow(msk, subflow) {
1453
		trace_mptcp_subflow_get_send(subflow);
1454 1455 1456 1457
		ssk =  mptcp_subflow_tcp_sock(subflow);
		if (!mptcp_subflow_active(subflow))
			continue;

P
Paolo Abeni 已提交
1458
		tout = max(tout, mptcp_timeout_from_subflow(subflow));
1459
		nr_active += !subflow->backup;
1460
		if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
1461
			continue;
1462

1463 1464
		pace = READ_ONCE(ssk->sk_pacing_rate);
		if (!pace)
1465 1466
			continue;

1467 1468 1469 1470 1471 1472
		ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
				pace);
		if (ratio < send_info[subflow->backup].ratio) {
			send_info[subflow->backup].ssk = ssk;
			send_info[subflow->backup].ratio = ratio;
		}
1473
	}
P
Paolo Abeni 已提交
1474
	__mptcp_set_timeout(sk, tout);
1475

1476 1477 1478 1479 1480 1481 1482
	/* pick the best backup if no other subflow is active */
	if (!nr_active)
		send_info[0].ssk = send_info[1].ssk;

	if (send_info[0].ssk) {
		msk->last_snd = send_info[0].ssk;
		msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
1483
				       tcp_sk(msk->last_snd)->snd_wnd);
1484 1485
		return msk->last_snd;
	}
P
Paolo Abeni 已提交
1486

1487
	return NULL;
1488 1489
}

1490 1491 1492 1493 1494 1495 1496
static void mptcp_push_release(struct sock *sk, struct sock *ssk,
			       struct mptcp_sendmsg_info *info)
{
	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
	release_sock(ssk);
}

1497
void __mptcp_push_pending(struct sock *sk, unsigned int flags)
M
Mat Martineau 已提交
1498
{
1499
	struct sock *prev_ssk = NULL, *ssk = NULL;
M
Mat Martineau 已提交
1500
	struct mptcp_sock *msk = mptcp_sk(sk);
1501
	struct mptcp_sendmsg_info info = {
1502
				.flags = flags,
1503
	};
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	struct mptcp_data_frag *dfrag;
	int len, copied = 0;

	while ((dfrag = mptcp_send_head(sk))) {
		info.sent = dfrag->already_sent;
		info.limit = dfrag->data_len;
		len = dfrag->data_len - dfrag->already_sent;
		while (len > 0) {
			int ret = 0;

			prev_ssk = ssk;
1515
			mptcp_flush_join_list(msk);
P
Paolo Abeni 已提交
1516
			ssk = mptcp_subflow_get_send(msk);
1517

1518 1519
			/* First check. If the ssk has changed since
			 * the last round, release prev_ssk
1520 1521 1522 1523 1524 1525
			 */
			if (ssk != prev_ssk && prev_ssk)
				mptcp_push_release(sk, prev_ssk, &info);
			if (!ssk)
				goto out;

1526 1527 1528 1529 1530
			/* Need to lock the new subflow only if different
			 * from the previous one, otherwise we are still
			 * helding the relevant lock
			 */
			if (ssk != prev_ssk)
1531 1532
				lock_sock(ssk);

1533 1534 1535 1536 1537 1538 1539 1540 1541
			/* keep it simple and always provide a new skb for the
			 * subflow, even if we will not use it when collapsing
			 * on the pending one
			 */
			if (!mptcp_alloc_tx_skb(sk, ssk)) {
				mptcp_push_release(sk, ssk, &info);
				goto out;
			}

1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
			if (ret <= 0) {
				mptcp_push_release(sk, ssk, &info);
				goto out;
			}

			info.sent += ret;
			dfrag->already_sent += ret;
			msk->snd_nxt += ret;
			msk->snd_burst -= ret;
1552
			msk->tx_pending_data -= ret;
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
			copied += ret;
			len -= ret;
		}
		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
	}

	/* at this point we held the socket lock for the last subflow we used */
	if (ssk)
		mptcp_push_release(sk, ssk, &info);

out:
P
Paolo Abeni 已提交
1564 1565 1566 1567
	/* ensure the rtx timer is running */
	if (!mptcp_timer_pending(sk))
		mptcp_reset_timer(sk);
	if (copied)
1568 1569 1570
		__mptcp_check_send_data_fin(sk);
}

1571 1572 1573 1574 1575
static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_sendmsg_info info;
	struct mptcp_data_frag *dfrag;
P
Paolo Abeni 已提交
1576
	struct sock *xmit_ssk;
1577
	int len, copied = 0;
P
Paolo Abeni 已提交
1578
	bool first = true;
1579 1580 1581 1582 1583 1584 1585 1586 1587

	info.flags = 0;
	while ((dfrag = mptcp_send_head(sk))) {
		info.sent = dfrag->already_sent;
		info.limit = dfrag->data_len;
		len = dfrag->data_len - dfrag->already_sent;
		while (len > 0) {
			int ret = 0;

P
Paolo Abeni 已提交
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
			/* the caller already invoked the packet scheduler,
			 * check for a different subflow usage only after
			 * spooling the first chunk of data
			 */
			xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
			if (!xmit_ssk)
				goto out;
			if (xmit_ssk != ssk) {
				mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
				goto out;
			}

1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
			if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
				__mptcp_update_wmem(sk);
				sk_mem_reclaim_partial(sk);
			}
			if (!__mptcp_alloc_tx_skb(sk, ssk, GFP_ATOMIC))
				goto out;

			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
			if (ret <= 0)
				goto out;

			info.sent += ret;
			dfrag->already_sent += ret;
			msk->snd_nxt += ret;
			msk->snd_burst -= ret;
			msk->tx_pending_data -= ret;
			copied += ret;
			len -= ret;
P
Paolo Abeni 已提交
1618
			first = false;
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		}
		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
	}

out:
	/* __mptcp_alloc_tx_skb could have released some wmem and we are
	 * not going to flush it via release_sock()
	 */
	__mptcp_update_wmem(sk);
	if (copied) {
		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
			 info.size_goal);
1631 1632 1633
		if (!mptcp_timer_pending(sk))
			mptcp_reset_timer(sk);

1634 1635 1636 1637 1638 1639
		if (msk->snd_data_fin_enable &&
		    msk->snd_nxt + 1 == msk->write_seq)
			mptcp_schedule_work(sk);
	}
}

P
Paolo Abeni 已提交
1640 1641 1642 1643 1644 1645 1646 1647 1648
static void mptcp_set_nospace(struct sock *sk)
{
	/* enable autotune */
	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);

	/* will be cleared on avail space */
	set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
}

1649 1650 1651
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1652
	struct page_frag *pfrag;
1653
	size_t copied = 0;
1654
	int ret = 0;
1655
	long timeo;
M
Mat Martineau 已提交
1656

1657 1658
	/* we don't support FASTOPEN yet */
	if (msg->msg_flags & MSG_FASTOPEN)
M
Mat Martineau 已提交
1659 1660
		return -EOPNOTSUPP;

1661 1662 1663
	/* silently ignore everything else */
	msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL;

1664
	mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len)));
1665 1666 1667 1668 1669 1670 1671 1672 1673

	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
		ret = sk_stream_wait_connect(sk, &timeo);
		if (ret)
			goto out;
	}

1674
	pfrag = sk_page_frag(sk);
1675

1676
	while (msg_data_left(msg)) {
1677
		int total_ts, frag_truesize = 0;
1678 1679 1680
		struct mptcp_data_frag *dfrag;
		bool dfrag_collapsed;
		size_t psize, offset;
1681

1682 1683
		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
			ret = -EPIPE;
1684 1685
			goto out;
		}
P
Paolo Abeni 已提交
1686

1687 1688 1689 1690 1691 1692
		/* reuse tail pfrag, if possible, or carve a new one from the
		 * page allocator
		 */
		dfrag = mptcp_pending_tail(sk);
		dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
		if (!dfrag_collapsed) {
1693 1694 1695
			if (!sk_stream_memory_free(sk))
				goto wait_for_memory;

1696 1697 1698 1699 1700
			if (!mptcp_page_frag_refill(sk, pfrag))
				goto wait_for_memory;

			dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
			frag_truesize = dfrag->overhead;
1701
		}
1702

1703 1704 1705
		/* we do not bound vs wspace, to allow a single packet.
		 * memory accounting will prevent execessive memory usage
		 * anyway
1706
		 */
1707 1708 1709
		offset = dfrag->offset + dfrag->data_len;
		psize = pfrag->size - offset;
		psize = min_t(size_t, psize, msg_data_left(msg));
1710
		total_ts = psize + frag_truesize;
1711

P
Paolo Abeni 已提交
1712
		if (!mptcp_wmem_alloc(sk, total_ts))
1713 1714
			goto wait_for_memory;

1715 1716
		if (copy_page_from_iter(dfrag->page, offset, psize,
					&msg->msg_iter) != psize) {
1717
			mptcp_wmem_uncharge(sk, psize + frag_truesize);
1718 1719
			ret = -EFAULT;
			goto out;
1720 1721
		}

1722 1723 1724 1725 1726 1727
		/* data successfully copied into the write queue */
		copied += psize;
		dfrag->data_len += psize;
		frag_truesize += psize;
		pfrag->offset += frag_truesize;
		WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
P
Paolo Abeni 已提交
1728
		msk->tx_pending_data += psize;
1729 1730 1731

		/* charge data on mptcp pending queue to the msk socket
		 * Note: we charge such data both to sk and ssk
1732
		 */
1733 1734 1735 1736 1737 1738
		sk_wmem_queued_add(sk, frag_truesize);
		if (!dfrag_collapsed) {
			get_page(dfrag->page);
			list_add_tail(&dfrag->list, &msk->rtx_queue);
			if (!msk->first_pending)
				WRITE_ONCE(msk->first_pending, dfrag);
1739
		}
1740
		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
1741 1742
			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
			 !dfrag_collapsed);
1743

1744
		continue;
1745

1746
wait_for_memory:
P
Paolo Abeni 已提交
1747
		mptcp_set_nospace(sk);
P
Paolo Abeni 已提交
1748
		__mptcp_push_pending(sk, msg->msg_flags);
1749 1750 1751
		ret = sk_stream_wait_memory(sk, &timeo);
		if (ret)
			goto out;
1752
	}
1753

P
Paolo Abeni 已提交
1754
	if (copied)
P
Paolo Abeni 已提交
1755
		__mptcp_push_pending(sk, msg->msg_flags);
1756

1757
out:
1758
	release_sock(sk);
1759
	return copied ? : ret;
M
Mat Martineau 已提交
1760 1761
}

1762 1763 1764 1765 1766 1767 1768 1769 1770
static void mptcp_wait_data(struct sock *sk, long *timeo)
{
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
	struct mptcp_sock *msk = mptcp_sk(sk);

	add_wait_queue(sk_sleep(sk), &wait);
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);

	sk_wait_event(sk, timeo,
1771
		      test_bit(MPTCP_DATA_READY, &msk->flags), &wait);
1772 1773 1774 1775 1776

	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
	remove_wait_queue(sk_sleep(sk), &wait);
}

1777 1778
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
				struct msghdr *msg,
1779 1780 1781
				size_t len, int flags,
				struct scm_timestamping_internal *tss,
				int *cmsg_flags)
1782
{
Y
Yonglong Li 已提交
1783
	struct sk_buff *skb, *tmp;
1784 1785
	int copied = 0;

Y
Yonglong Li 已提交
1786
	skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
1787 1788 1789 1790 1791
		u32 offset = MPTCP_SKB_CB(skb)->offset;
		u32 data_len = skb->len - offset;
		u32 count = min_t(size_t, len - copied, data_len);
		int err;

P
Paolo Abeni 已提交
1792 1793 1794 1795 1796 1797 1798
		if (!(flags & MSG_TRUNC)) {
			err = skb_copy_datagram_msg(skb, offset, msg, count);
			if (unlikely(err < 0)) {
				if (!copied)
					return err;
				break;
			}
1799 1800
		}

1801 1802 1803 1804 1805
		if (MPTCP_SKB_CB(skb)->has_rxtstamp) {
			tcp_update_recv_tstamps(skb, tss);
			*cmsg_flags |= MPTCP_CMSG_TS;
		}

1806 1807 1808
		copied += count;

		if (count < data_len) {
Y
Yonglong Li 已提交
1809 1810
			if (!(flags & MSG_PEEK))
				MPTCP_SKB_CB(skb)->offset += count;
1811 1812 1813
			break;
		}

Y
Yonglong Li 已提交
1814 1815 1816
		if (!(flags & MSG_PEEK)) {
			/* we will bulk release the skb memory later */
			skb->destructor = NULL;
1817
			WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
Y
Yonglong Li 已提交
1818 1819 1820
			__skb_unlink(skb, &msk->receive_queue);
			__kfree_skb(skb);
		}
1821 1822 1823 1824 1825 1826 1827 1828

		if (copied >= len)
			break;
	}

	return copied;
}

1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
 *
 * Only difference: Use highest rtt estimate of the subflows in use.
 */
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;
	u32 time, advmss = 1;
	u64 rtt_us, mstamp;

	sock_owned_by_me(sk);

	if (copied <= 0)
		return;

	msk->rcvq_space.copied += copied;

	mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
	time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);

	rtt_us = msk->rcvq_space.rtt_us;
	if (rtt_us && time < (rtt_us >> 3))
		return;

	rtt_us = 0;
	mptcp_for_each_subflow(msk, subflow) {
		const struct tcp_sock *tp;
		u64 sf_rtt_us;
		u32 sf_advmss;

		tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));

		sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
		sf_advmss = READ_ONCE(tp->advmss);

		rtt_us = max(sf_rtt_us, rtt_us);
		advmss = max(sf_advmss, advmss);
	}

	msk->rcvq_space.rtt_us = rtt_us;
	if (time < (rtt_us >> 3) || rtt_us == 0)
		return;

	if (msk->rcvq_space.copied <= msk->rcvq_space.space)
		goto new_measure;

	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
		int rcvmem, rcvbuf;
		u64 rcvwin, grow;

		rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;

		grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);

		do_div(grow, msk->rcvq_space.space);
		rcvwin += (grow << 1);

		rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
		while (tcp_win_from_space(sk, rcvmem) < advmss)
			rcvmem += 128;

		do_div(rcvwin, advmss);
		rcvbuf = min_t(u64, rcvwin * rcvmem,
			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);

		if (rcvbuf > sk->sk_rcvbuf) {
			u32 window_clamp;

			window_clamp = tcp_win_from_space(sk, rcvbuf);
			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);

			/* Make subflows follow along.  If we do not do this, we
			 * get drops at subflow level if skbs can't be moved to
			 * the mptcp rx queue fast enough (announced rcv_win can
			 * exceed ssk->sk_rcvbuf).
			 */
			mptcp_for_each_subflow(msk, subflow) {
				struct sock *ssk;
1909
				bool slow;
1910 1911

				ssk = mptcp_subflow_tcp_sock(subflow);
1912
				slow = lock_sock_fast(ssk);
1913 1914
				WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
				tcp_sk(ssk)->window_clamp = window_clamp;
1915 1916
				tcp_cleanup_rbuf(ssk, 1);
				unlock_sock_fast(ssk, slow);
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
			}
		}
	}

	msk->rcvq_space.space = msk->rcvq_space.copied;
new_measure:
	msk->rcvq_space.copied = 0;
	msk->rcvq_space.time = mstamp;
}

1927 1928 1929 1930 1931 1932 1933 1934 1935
static void __mptcp_update_rmem(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (!msk->rmem_released)
		return;

	atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
	sk_mem_uncharge(sk, msk->rmem_released);
1936
	WRITE_ONCE(msk->rmem_released, 0);
1937 1938 1939 1940 1941 1942 1943 1944 1945
}

static void __mptcp_splice_receive_queue(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

	skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
}

1946
static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1947
{
1948
	struct sock *sk = (struct sock *)msk;
1949
	unsigned int moved = 0;
1950
	bool ret, done;
1951

1952
	mptcp_flush_join_list(msk);
1953 1954
	do {
		struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1955
		bool slowpath;
1956

1957 1958 1959 1960 1961
		/* we can have data pending in the subflows only if the msk
		 * receive buffer was full at subflow_data_ready() time,
		 * that is an unlikely slow path.
		 */
		if (likely(!ssk))
1962 1963
			break;

1964
		slowpath = lock_sock_fast(ssk);
1965
		mptcp_data_lock(sk);
1966
		__mptcp_update_rmem(sk);
1967
		done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1968
		mptcp_data_unlock(sk);
1969 1970 1971

		if (unlikely(ssk->sk_err))
			__mptcp_error_report(sk);
1972
		unlock_sock_fast(ssk, slowpath);
1973 1974
	} while (!done);

1975 1976 1977 1978 1979 1980 1981 1982 1983
	/* acquire the data lock only if some input data is pending */
	ret = moved > 0;
	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
	    !skb_queue_empty_lockless(&sk->sk_receive_queue)) {
		mptcp_data_lock(sk);
		__mptcp_update_rmem(sk);
		ret |= __mptcp_ofo_queue(msk);
		__mptcp_splice_receive_queue(sk);
		mptcp_data_unlock(sk);
1984
	}
1985 1986 1987
	if (ret)
		mptcp_check_data_fin((struct sock *)msk);
	return !skb_queue_empty(&msk->receive_queue);
1988 1989
}

M
Mat Martineau 已提交
1990 1991 1992 1993
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
			 int nonblock, int flags, int *addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
1994 1995
	struct scm_timestamping_internal tss;
	int copied = 0, cmsg_flags = 0;
1996 1997
	int target;
	long timeo;
M
Mat Martineau 已提交
1998

1999 2000 2001 2002
	/* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
	if (unlikely(flags & MSG_ERRQUEUE))
		return inet_recv_error(sk, msg, len, addr_len);

2003
	mptcp_lock_sock(sk, __mptcp_splice_receive_queue(sk));
2004 2005 2006 2007 2008
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		copied = -ENOTCONN;
		goto out_err;
	}

2009 2010 2011 2012 2013
	timeo = sock_rcvtimeo(sk, nonblock);

	len = min_t(size_t, len, INT_MAX);
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);

2014
	while (copied < len) {
2015
		int bytes_read;
2016

2017
		bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
2018 2019 2020 2021 2022
		if (unlikely(bytes_read < 0)) {
			if (!copied)
				copied = bytes_read;
			goto out_err;
		}
2023

2024
		copied += bytes_read;
2025

2026
		/* be sure to advertise window change */
2027 2028 2029 2030
		mptcp_cleanup_rbuf(msk);

		if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
			continue;
2031

2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
		/* only the master socket status is relevant here. The exit
		 * conditions mirror closely tcp_recvmsg()
		 */
		if (copied >= target)
			break;

		if (copied) {
			if (sk->sk_err ||
			    sk->sk_state == TCP_CLOSE ||
			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
			    !timeo ||
			    signal_pending(current))
				break;
		} else {
			if (sk->sk_err) {
				copied = sock_error(sk);
				break;
			}

2051 2052 2053
			if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
				mptcp_check_for_eof(msk);

2054 2055 2056 2057
			if (sk->sk_shutdown & RCV_SHUTDOWN) {
				/* race breaker: the shutdown could be after the
				 * previous receive queue check
				 */
2058
				if (__mptcp_move_skbs(msk))
2059
					continue;
2060
				break;
2061
			}
2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080

			if (sk->sk_state == TCP_CLOSE) {
				copied = -ENOTCONN;
				break;
			}

			if (!timeo) {
				copied = -EAGAIN;
				break;
			}

			if (signal_pending(current)) {
				copied = sock_intr_errno(timeo);
				break;
			}
		}

		pr_debug("block timeout %ld", timeo);
		mptcp_wait_data(sk, &timeo);
2081 2082
	}

2083 2084
	if (skb_queue_empty_lockless(&sk->sk_receive_queue) &&
	    skb_queue_empty(&msk->receive_queue)) {
2085
		/* entire backlog drained, clear DATA_READY. */
2086
		clear_bit(MPTCP_DATA_READY, &msk->flags);
2087

2088 2089
		/* .. race-breaker: ssk might have gotten new data
		 * after last __mptcp_move_skbs() returned false.
2090
		 */
2091
		if (unlikely(__mptcp_move_skbs(msk)))
2092 2093
			set_bit(MPTCP_DATA_READY, &msk->flags);
	}
2094

2095
out_err:
2096 2097 2098 2099 2100
	if (cmsg_flags && copied >= 0) {
		if (cmsg_flags & MPTCP_CMSG_TS)
			tcp_recv_timestamp(msg, sk, &tss);
	}

2101 2102
	pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
		 msk, test_bit(MPTCP_DATA_READY, &msk->flags),
2103
		 skb_queue_empty_lockless(&sk->sk_receive_queue), copied);
Y
Yonglong Li 已提交
2104 2105
	if (!(flags & MSG_PEEK))
		mptcp_rcv_space_adjust(msk, copied);
2106

2107
	release_sock(sk);
2108 2109 2110
	return copied;
}

2111 2112 2113 2114 2115
static void mptcp_retransmit_timer(struct timer_list *t)
{
	struct inet_connection_sock *icsk = from_timer(icsk, t,
						       icsk_retransmit_timer);
	struct sock *sk = &icsk->icsk_inet.sk;
P
Paolo Abeni 已提交
2116
	struct mptcp_sock *msk = mptcp_sk(sk);
2117 2118 2119

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
P
Paolo Abeni 已提交
2120 2121 2122
		/* we need a process context to retransmit */
		if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
			mptcp_schedule_work(sk);
2123 2124
	} else {
		/* delegate our work to tcp_release_cb() */
P
Paolo Abeni 已提交
2125
		set_bit(MPTCP_RETRANSMIT, &msk->flags);
2126 2127 2128 2129 2130
	}
	bh_unlock_sock(sk);
	sock_put(sk);
}

P
Paolo Abeni 已提交
2131 2132 2133 2134 2135
static void mptcp_timeout_timer(struct timer_list *t)
{
	struct sock *sk = from_timer(sk, t, sk_timer);

	mptcp_schedule_work(sk);
2136
	sock_put(sk);
P
Paolo Abeni 已提交
2137 2138
}

2139 2140 2141 2142 2143
/* Find an idle subflow.  Return NULL if there is unacked data at tcp
 * level.
 *
 * A backup subflow is returned only if that is the only kind available.
 */
2144
static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2145
{
2146
	struct sock *backup = NULL, *pick = NULL;
2147
	struct mptcp_subflow_context *subflow;
2148
	int min_stale_count = INT_MAX;
2149 2150 2151

	sock_owned_by_me((const struct sock *)msk);

2152
	if (__mptcp_check_fallback(msk))
2153
		return NULL;
2154

2155 2156 2157
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

2158
		if (!__mptcp_subflow_active(subflow))
2159 2160
			continue;

2161 2162 2163 2164 2165
		/* still data outstanding at TCP level? skip this */
		if (!tcp_rtx_and_write_queues_empty(ssk)) {
			mptcp_pm_subflow_chk_stale(msk, ssk);
			min_stale_count = min_t(int, min_stale_count, subflow->stale_count);
			continue;
2166
		}
2167 2168 2169 2170 2171 2172 2173

		if (subflow->backup) {
			if (!backup)
				backup = ssk;
			continue;
		}

2174 2175
		if (!pick)
			pick = ssk;
2176 2177
	}

2178 2179 2180 2181 2182
	if (pick)
		return pick;

	/* use backup only if there are no progresses anywhere */
	return min_stale_count > 1 ? backup : NULL;
2183 2184
}

2185 2186 2187 2188 2189 2190 2191 2192
static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
{
	if (msk->subflow) {
		iput(SOCK_INODE(msk->subflow));
		msk->subflow = NULL;
	}
}

2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236
bool __mptcp_retransmit_pending_data(struct sock *sk)
{
	struct mptcp_data_frag *cur, *rtx_head;
	struct mptcp_sock *msk = mptcp_sk(sk);

	if (__mptcp_check_fallback(mptcp_sk(sk)))
		return false;

	if (tcp_rtx_and_write_queues_empty(sk))
		return false;

	/* the closing socket has some data untransmitted and/or unacked:
	 * some data in the mptcp rtx queue has not really xmitted yet.
	 * keep it simple and re-inject the whole mptcp level rtx queue
	 */
	mptcp_data_lock(sk);
	__mptcp_clean_una_wakeup(sk);
	rtx_head = mptcp_rtx_head(sk);
	if (!rtx_head) {
		mptcp_data_unlock(sk);
		return false;
	}

	/* will accept ack for reijected data before re-sending them */
	if (!msk->recovery || after64(msk->snd_nxt, msk->recovery_snd_nxt))
		msk->recovery_snd_nxt = msk->snd_nxt;
	msk->recovery = true;
	mptcp_data_unlock(sk);

	msk->first_pending = rtx_head;
	msk->tx_pending_data += msk->snd_nxt - rtx_head->data_seq;
	msk->snd_nxt = rtx_head->data_seq;
	msk->snd_burst = 0;

	/* be sure to clear the "sent status" on all re-injected fragments */
	list_for_each_entry(cur, &msk->rtx_queue, list) {
		if (!cur->already_sent)
			break;
		cur->already_sent = 0;
	}

	return true;
}

2237 2238 2239 2240 2241 2242 2243 2244
/* subflow sockets can be either outgoing (connect) or incoming
 * (accept).
 *
 * Outgoing subflows use in-kernel sockets.
 * Incoming subflows do not have their own 'struct socket' allocated,
 * so we need to use tcp_close() after detaching them from the mptcp
 * parent socket.
 */
2245 2246
static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
			      struct mptcp_subflow_context *subflow)
2247
{
2248
	struct mptcp_sock *msk = mptcp_sk(sk);
2249
	bool need_push;
2250

2251 2252
	list_del(&subflow->node);

P
Paolo Abeni 已提交
2253
	lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
P
Paolo Abeni 已提交
2254 2255 2256 2257

	/* if we are invoked by the msk cleanup code, the subflow is
	 * already orphaned
	 */
2258
	if (ssk->sk_socket)
P
Paolo Abeni 已提交
2259 2260
		sock_orphan(ssk);

2261
	need_push = __mptcp_retransmit_pending_data(sk);
2262 2263
	subflow->disposable = 1;

P
Paolo Abeni 已提交
2264 2265 2266 2267 2268 2269
	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
	 * the ssk has been already destroyed, we just need to release the
	 * reference owned by msk;
	 */
	if (!inet_csk(ssk)->icsk_ulp_ops) {
		kfree_rcu(subflow, rcu);
2270
	} else {
2271
		/* otherwise tcp will dispose of the ssk and subflow ctx */
P
Paolo Abeni 已提交
2272 2273 2274 2275
		__tcp_close(ssk, 0);

		/* close acquired an extra ref */
		__sock_put(ssk);
2276
	}
P
Paolo Abeni 已提交
2277 2278 2279
	release_sock(ssk);

	sock_put(ssk);
2280 2281 2282

	if (ssk == msk->last_snd)
		msk->last_snd = NULL;
2283

2284 2285 2286
	if (ssk == msk->first)
		msk->first = NULL;

2287 2288
	if (msk->subflow && ssk == msk->subflow->sk)
		mptcp_dispose_initial_subflow(msk);
2289 2290 2291

	if (need_push)
		__mptcp_push_pending(sk, 0);
M
Mat Martineau 已提交
2292 2293
}

2294 2295 2296
void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
		     struct mptcp_subflow_context *subflow)
{
2297 2298
	if (sk->sk_state == TCP_ESTABLISHED)
		mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2299 2300 2301
	__mptcp_close_ssk(sk, ssk, subflow);
}

P
Paolo Abeni 已提交
2302 2303 2304 2305 2306
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
{
	return 0;
}

P
Paolo Abeni 已提交
2307 2308 2309 2310
static void __mptcp_close_subflow(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow, *tmp;

2311 2312
	might_sleep();

P
Paolo Abeni 已提交
2313 2314 2315 2316 2317 2318
	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		if (inet_sk_state_load(ssk) != TCP_CLOSE)
			continue;

2319 2320 2321 2322
		/* 'subflow_data_ready' will re-sched once rx queue is empty */
		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
			continue;

2323
		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
P
Paolo Abeni 已提交
2324 2325 2326
	}
}

P
Paolo Abeni 已提交
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345
static bool mptcp_check_close_timeout(const struct sock *sk)
{
	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
	struct mptcp_subflow_context *subflow;

	if (delta >= TCP_TIMEWAIT_LEN)
		return true;

	/* if all subflows are in closed status don't bother with additional
	 * timeout
	 */
	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
		if (inet_sk_state_load(mptcp_subflow_tcp_sock(subflow)) !=
		    TCP_CLOSE)
			return false;
	}
	return true;
}

2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
static void mptcp_check_fastclose(struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow, *tmp;
	struct sock *sk = &msk->sk.icsk_inet.sk;

	if (likely(!READ_ONCE(msk->rcv_fastclose)))
		return;

	mptcp_token_destroy(msk);

	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2358
		bool slow;
2359

2360
		slow = lock_sock_fast(tcp_sk);
2361 2362 2363 2364
		if (tcp_sk->sk_state != TCP_CLOSE) {
			tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
			tcp_set_state(tcp_sk, TCP_CLOSE);
		}
2365
		unlock_sock_fast(tcp_sk, slow);
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
	}

	inet_sk_state_store(sk, TCP_CLOSE);
	sk->sk_shutdown = SHUTDOWN_MASK;
	smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
	set_bit(MPTCP_DATA_READY, &msk->flags);
	set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);

	mptcp_close_wake_up(sk);
}

2377
static void __mptcp_retrans(struct sock *sk)
P
Paolo Abeni 已提交
2378
{
2379
	struct mptcp_sock *msk = mptcp_sk(sk);
2380
	struct mptcp_sendmsg_info info = {};
2381 2382
	struct mptcp_data_frag *dfrag;
	size_t copied = 0;
2383 2384
	struct sock *ssk;
	int ret;
2385

2386
	mptcp_clean_una_wakeup(sk);
2387
	dfrag = mptcp_rtx_head(sk);
M
Mat Martineau 已提交
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398
	if (!dfrag) {
		if (mptcp_data_fin_enabled(msk)) {
			struct inet_connection_sock *icsk = inet_csk(sk);

			icsk->icsk_retransmits++;
			mptcp_set_datafin_timeout(sk);
			mptcp_send_ack(msk);

			goto reset_timer;
		}

2399
		return;
M
Mat Martineau 已提交
2400
	}
2401 2402 2403

	ssk = mptcp_subflow_get_retrans(msk);
	if (!ssk)
2404
		goto reset_timer;
2405 2406 2407

	lock_sock(ssk);

2408 2409
	/* limit retransmission to the bytes already sent on some subflows */
	info.sent = 0;
2410 2411
	info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent;
	while (info.sent < info.limit) {
2412 2413 2414
		if (!mptcp_alloc_tx_skb(sk, ssk))
			break;

2415
		ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2416
		if (ret <= 0)
2417 2418
			break;

2419
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS);
2420
		copied += ret;
2421
		info.sent += ret;
2422
	}
2423 2424
	if (copied) {
		dfrag->already_sent = max(dfrag->already_sent, info.sent);
2425 2426
		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
			 info.size_goal);
2427
	}
2428 2429 2430

	release_sock(ssk);

2431
reset_timer:
2432 2433
	if (!mptcp_timer_pending(sk))
		mptcp_reset_timer(sk);
2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
}

static void mptcp_worker(struct work_struct *work)
{
	struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
	struct sock *sk = &msk->sk.icsk_inet.sk;
	int state;

	lock_sock(sk);
	state = sk->sk_state;
	if (unlikely(state == TCP_CLOSE))
		goto unlock;

	mptcp_check_data_fin_ack(sk);
2448
	mptcp_flush_join_list(msk);
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476

	mptcp_check_fastclose(msk);

	if (msk->pm.status)
		mptcp_pm_nl_work(msk);

	if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
		mptcp_check_for_eof(msk);

	__mptcp_check_send_data_fin(sk);
	mptcp_check_data_fin(sk);

	/* There is no point in keeping around an orphaned sk timedout or
	 * closed, but we need the msk around to reply to incoming DATA_FIN,
	 * even if it is orphaned and in FIN_WAIT2 state
	 */
	if (sock_flag(sk, SOCK_DEAD) &&
	    (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) {
		inet_sk_state_store(sk, TCP_CLOSE);
		__mptcp_destroy_sock(sk);
		goto unlock;
	}

	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
		__mptcp_close_subflow(msk);

	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
		__mptcp_retrans(sk);
2477 2478

unlock:
P
Paolo Abeni 已提交
2479 2480 2481 2482
	release_sock(sk);
	sock_put(sk);
}

2483
static int __mptcp_init_sock(struct sock *sk)
M
Mat Martineau 已提交
2484
{
2485 2486
	struct mptcp_sock *msk = mptcp_sk(sk);

2487 2488
	spin_lock_init(&msk->join_list_lock);

2489
	INIT_LIST_HEAD(&msk->conn_list);
2490
	INIT_LIST_HEAD(&msk->join_list);
2491
	INIT_LIST_HEAD(&msk->rtx_queue);
P
Paolo Abeni 已提交
2492
	INIT_WORK(&msk->work, mptcp_worker);
2493
	__skb_queue_head_init(&msk->receive_queue);
2494
	msk->out_of_order_queue = RB_ROOT;
2495
	msk->first_pending = NULL;
P
Paolo Abeni 已提交
2496
	msk->wmem_reserved = 0;
2497
	WRITE_ONCE(msk->rmem_released, 0);
2498
	msk->tx_pending_data = 0;
P
Paolo Abeni 已提交
2499
	msk->timer_ival = TCP_RTO_MIN;
2500

2501
	msk->first = NULL;
P
Paolo Abeni 已提交
2502
	inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
2503
	WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2504
	msk->recovery = false;
2505

2506 2507
	mptcp_pm_data_init(msk);

2508 2509
	/* re-use the csk retrans timer for MPTCP-level retrans */
	timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
P
Paolo Abeni 已提交
2510
	timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0);
2511

M
Mat Martineau 已提交
2512 2513 2514
	return 0;
}

2515 2516
static int mptcp_init_sock(struct sock *sk)
{
2517
	struct inet_connection_sock *icsk = inet_csk(sk);
2518 2519
	struct net *net = sock_net(sk);
	int ret;
2520

2521 2522 2523 2524
	ret = __mptcp_init_sock(sk);
	if (ret)
		return ret;

2525 2526 2527 2528 2529 2530
	if (!mptcp_is_enabled(net))
		return -ENOPROTOOPT;

	if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
		return -ENOMEM;

2531 2532 2533 2534
	ret = __mptcp_socket_create(mptcp_sk(sk));
	if (ret)
		return ret;

2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
	/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
	 * propagate the correct value
	 */
	tcp_assign_congestion_control(sk);
	strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);

	/* no need to keep a reference to the ops, the name will suffice */
	tcp_cleanup_congestion_control(sk);
	icsk->icsk_ca_ops = NULL;

2545
	sk_sockets_allocated_inc(sk);
2546
	sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
P
Paolo Abeni 已提交
2547
	sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
2548

2549 2550 2551 2552 2553 2554 2555 2556
	return 0;
}

static void __mptcp_clear_xmit(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;

2557
	WRITE_ONCE(msk->first_pending, NULL);
2558
	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
2559
		dfrag_clear(sk, dfrag);
2560 2561
}

P
Paolo Abeni 已提交
2562 2563 2564 2565
static void mptcp_cancel_work(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);

2566
	if (cancel_work_sync(&msk->work))
P
Paolo Abeni 已提交
2567
		__sock_put(sk);
P
Paolo Abeni 已提交
2568 2569
}

2570
void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
2571 2572 2573 2574 2575 2576 2577
{
	lock_sock(ssk);

	switch (ssk->sk_state) {
	case TCP_LISTEN:
		if (!(how & RCV_SHUTDOWN))
			break;
2578
		fallthrough;
2579 2580 2581 2582
	case TCP_SYN_SENT:
		tcp_disconnect(ssk, O_NONBLOCK);
		break;
	default:
2583 2584 2585 2586 2587 2588 2589
		if (__mptcp_check_fallback(mptcp_sk(sk))) {
			pr_debug("Fallback");
			ssk->sk_shutdown |= how;
			tcp_shutdown(ssk, how);
		} else {
			pr_debug("Sending DATA_FIN on subflow %p", ssk);
			tcp_send_ack(ssk);
M
Mat Martineau 已提交
2590 2591
			if (!mptcp_timer_pending(sk))
				mptcp_reset_timer(sk);
2592
		}
2593 2594 2595 2596 2597 2598
		break;
	}

	release_sock(ssk);
}

2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
static const unsigned char new_state[16] = {
	/* current state:     new state:      action:	*/
	[0 /* (Invalid) */] = TCP_CLOSE,
	[TCP_ESTABLISHED]   = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
	[TCP_SYN_SENT]      = TCP_CLOSE,
	[TCP_SYN_RECV]      = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
	[TCP_FIN_WAIT1]     = TCP_FIN_WAIT1,
	[TCP_FIN_WAIT2]     = TCP_FIN_WAIT2,
	[TCP_TIME_WAIT]     = TCP_CLOSE,	/* should not happen ! */
	[TCP_CLOSE]         = TCP_CLOSE,
	[TCP_CLOSE_WAIT]    = TCP_LAST_ACK  | TCP_ACTION_FIN,
	[TCP_LAST_ACK]      = TCP_LAST_ACK,
	[TCP_LISTEN]        = TCP_CLOSE,
	[TCP_CLOSING]       = TCP_CLOSING,
	[TCP_NEW_SYN_RECV]  = TCP_CLOSE,	/* should not happen ! */
};

static int mptcp_close_state(struct sock *sk)
{
	int next = (int)new_state[sk->sk_state];
	int ns = next & TCP_STATE_MASK;

	inet_sk_state_store(sk, ns);

	return next & TCP_ACTION_FIN;
}

P
Paolo Abeni 已提交
2626
static void __mptcp_check_send_data_fin(struct sock *sk)
M
Mat Martineau 已提交
2627
{
P
Paolo Abeni 已提交
2628
	struct mptcp_subflow_context *subflow;
M
Mat Martineau 已提交
2629 2630
	struct mptcp_sock *msk = mptcp_sk(sk);

P
Paolo Abeni 已提交
2631 2632 2633
	pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
		 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
		 msk->snd_nxt, msk->write_seq);
2634

P
Paolo Abeni 已提交
2635 2636 2637 2638 2639 2640 2641 2642 2643
	/* we still need to enqueue subflows or not really shutting down,
	 * skip this
	 */
	if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
	    mptcp_send_head(sk))
		return;

	WRITE_ONCE(msk->snd_nxt, msk->write_seq);

2644 2645 2646 2647 2648 2649 2650 2651 2652 2653
	/* fallback socket will not get data_fin/ack, can move to the next
	 * state now
	 */
	if (__mptcp_check_fallback(msk)) {
		if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
			inet_sk_state_store(sk, TCP_CLOSE);
			mptcp_close_wake_up(sk);
		} else if (sk->sk_state == TCP_FIN_WAIT1) {
			inet_sk_state_store(sk, TCP_FIN_WAIT2);
		}
2654 2655
	}

2656
	mptcp_flush_join_list(msk);
P
Paolo Abeni 已提交
2657 2658
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2659

P
Paolo Abeni 已提交
2660
		mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN);
2661
	}
P
Paolo Abeni 已提交
2662
}
2663

P
Paolo Abeni 已提交
2664 2665 2666
static void __mptcp_wr_shutdown(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
2667

P
Paolo Abeni 已提交
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
	pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
		 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
		 !!mptcp_send_head(sk));

	/* will be ignored by fallback sockets */
	WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
	WRITE_ONCE(msk->snd_data_fin_enable, 1);

	__mptcp_check_send_data_fin(sk);
}

static void __mptcp_destroy_sock(struct sock *sk)
{
	struct mptcp_subflow_context *subflow, *tmp;
	struct mptcp_sock *msk = mptcp_sk(sk);
	LIST_HEAD(conn_list);

	pr_debug("msk=%p", msk);
M
Mat Martineau 已提交
2686

2687 2688
	might_sleep();

2689 2690 2691 2692 2693 2694
	/* be sure to always acquire the join list lock, to sync vs
	 * mptcp_finish_join().
	 */
	spin_lock_bh(&msk->join_list_lock);
	list_splice_tail_init(&msk->join_list, &msk->conn_list);
	spin_unlock_bh(&msk->join_list_lock);
2695 2696
	list_splice_init(&msk->conn_list, &conn_list);

2697
	sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
P
Paolo Abeni 已提交
2698 2699
	sk_stop_timer(sk, &sk->sk_timer);
	msk->pm.status = 0;
2700 2701

	list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
2702
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
P
Paolo Abeni 已提交
2703
		__mptcp_close_ssk(sk, ssk, subflow);
M
Mat Martineau 已提交
2704 2705
	}

P
Paolo Abeni 已提交
2706
	sk->sk_prot->destroy(sk);
P
Paolo Abeni 已提交
2707

P
Paolo Abeni 已提交
2708
	WARN_ON_ONCE(msk->wmem_reserved);
2709
	WARN_ON_ONCE(msk->rmem_released);
P
Paolo Abeni 已提交
2710 2711
	sk_stream_kill_queues(sk);
	xfrm_sk_free_policy(sk);
2712

P
Paolo Abeni 已提交
2713
	sk_refcnt_debug_release(sk);
2714
	mptcp_dispose_initial_subflow(msk);
P
Paolo Abeni 已提交
2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
	sock_put(sk);
}

static void mptcp_close(struct sock *sk, long timeout)
{
	struct mptcp_subflow_context *subflow;
	bool do_cancel_work = false;

	lock_sock(sk);
	sk->sk_shutdown = SHUTDOWN_MASK;

	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
		inet_sk_state_store(sk, TCP_CLOSE);
		goto cleanup;
	}
2730

P
Paolo Abeni 已提交
2731 2732 2733 2734 2735 2736 2737 2738
	if (mptcp_close_state(sk))
		__mptcp_wr_shutdown(sk);

	sk_stream_wait_close(sk, timeout);

cleanup:
	/* orphan all the subflows */
	inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
2739
	mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
P
Paolo Abeni 已提交
2740
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2741
		bool slow = lock_sock_fast(ssk);
P
Paolo Abeni 已提交
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758

		sock_orphan(ssk);
		unlock_sock_fast(ssk, slow);
	}
	sock_orphan(sk);

	sock_hold(sk);
	pr_debug("msk=%p state=%d", sk, sk->sk_state);
	if (sk->sk_state == TCP_CLOSE) {
		__mptcp_destroy_sock(sk);
		do_cancel_work = true;
	} else {
		sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN);
	}
	release_sock(sk);
	if (do_cancel_work)
		mptcp_cancel_work(sk);
2759 2760 2761 2762

	if (mptcp_sk(sk)->token)
		mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);

P
Paolo Abeni 已提交
2763
	sock_put(sk);
M
Mat Martineau 已提交
2764 2765
}

2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788
static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
	struct ipv6_pinfo *msk6 = inet6_sk(msk);

	msk->sk_v6_daddr = ssk->sk_v6_daddr;
	msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;

	if (msk6 && ssk6) {
		msk6->saddr = ssk6->saddr;
		msk6->flow_label = ssk6->flow_label;
	}
#endif

	inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
	inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
	inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
	inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
	inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
	inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
}

2789 2790
static int mptcp_disconnect(struct sock *sk, int flags)
{
P
Paolo Abeni 已提交
2791 2792 2793
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk = mptcp_sk(sk);

2794 2795
	mptcp_do_flush_join_list(msk);

2796 2797 2798 2799 2800 2801 2802
	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		lock_sock(ssk);
		tcp_disconnect(ssk, flags);
		release_sock(ssk);
	}
2803
	return 0;
2804 2805
}

2806 2807 2808 2809 2810 2811 2812 2813 2814
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
{
	unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo);

	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
#endif

2815
struct sock *mptcp_sk_clone(const struct sock *sk,
2816
			    const struct mptcp_options_received *mp_opt,
2817
			    struct request_sock *req)
2818
{
P
Paolo Abeni 已提交
2819
	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
2820
	struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
P
Paolo Abeni 已提交
2821 2822
	struct mptcp_sock *msk;
	u64 ack_seq;
2823 2824 2825 2826 2827 2828 2829 2830 2831

	if (!nsk)
		return NULL;

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
	if (nsk->sk_family == AF_INET6)
		inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif

P
Paolo Abeni 已提交
2832 2833 2834 2835 2836 2837
	__mptcp_init_sock(nsk);

	msk = mptcp_sk(nsk);
	msk->local_key = subflow_req->local_key;
	msk->token = subflow_req->token;
	msk->subflow = NULL;
2838
	WRITE_ONCE(msk->fully_established, false);
2839
	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
2840
		WRITE_ONCE(msk->csum_enabled, true);
P
Paolo Abeni 已提交
2841 2842

	msk->write_seq = subflow_req->idsn + 1;
P
Paolo Abeni 已提交
2843
	msk->snd_nxt = msk->write_seq;
2844 2845
	msk->snd_una = msk->write_seq;
	msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
2846
	msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
2847

2848
	if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
P
Paolo Abeni 已提交
2849
		msk->can_ack = true;
2850
		msk->remote_key = mp_opt->sndr_key;
P
Paolo Abeni 已提交
2851 2852
		mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
		ack_seq++;
2853
		WRITE_ONCE(msk->ack_seq, ack_seq);
2854
		WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
P
Paolo Abeni 已提交
2855
	}
2856

2857
	sock_reset_flag(nsk, SOCK_RCU_FREE);
2858 2859
	/* will be fully established after successful MPC subflow creation */
	inet_sk_state_store(nsk, TCP_SYN_RECV);
2860 2861

	security_inet_csk_clone(nsk, req);
P
Paolo Abeni 已提交
2862 2863 2864 2865
	bh_unlock_sock(nsk);

	/* keep a single reference */
	__sock_put(nsk);
2866 2867 2868
	return nsk;
}

2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
{
	const struct tcp_sock *tp = tcp_sk(ssk);

	msk->rcvq_space.copied = 0;
	msk->rcvq_space.rtt_us = 0;

	msk->rcvq_space.time = tp->tcp_mstamp;

	/* initial rcv_space offering made to peer */
	msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
				      TCP_INIT_CWND * tp->advmss);
	if (msk->rcvq_space.space == 0)
		msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2883

2884
	WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd);
2885 2886
}

2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910
static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
				 bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct socket *listener;
	struct sock *newsk;

	listener = __mptcp_nmpc_socket(msk);
	if (WARN_ON_ONCE(!listener)) {
		*err = -EINVAL;
		return NULL;
	}

	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
	newsk = inet_csk_accept(listener->sk, flags, err, kern);
	if (!newsk)
		return NULL;

	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
	if (sk_is_mptcp(newsk)) {
		struct mptcp_subflow_context *subflow;
		struct sock *new_mptcp_sock;

		subflow = mptcp_subflow_ctx(newsk);
P
Paolo Abeni 已提交
2911
		new_mptcp_sock = subflow->conn;
2912

P
Paolo Abeni 已提交
2913 2914 2915 2916 2917 2918
		/* is_mptcp should be false if subflow->conn is missing, see
		 * subflow_syn_recv_sock()
		 */
		if (WARN_ON_ONCE(!new_mptcp_sock)) {
			tcp_sk(newsk)->is_mptcp = 0;
			return newsk;
2919 2920
		}

P
Paolo Abeni 已提交
2921 2922
		/* acquire the 2nd reference for the owning socket */
		sock_hold(new_mptcp_sock);
2923
		newsk = new_mptcp_sock;
2924
		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
2925 2926 2927
	} else {
		MPTCP_INC_STATS(sock_net(sk),
				MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
2928 2929 2930 2931 2932
	}

	return newsk;
}

2933 2934
void mptcp_destroy_common(struct mptcp_sock *msk)
{
2935 2936
	struct sock *sk = (struct sock *)msk;

2937 2938
	__mptcp_clear_xmit(sk);

2939 2940 2941
	/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
	skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);

2942 2943 2944 2945 2946
	skb_rbtree_purge(&msk->out_of_order_queue);
	mptcp_token_destroy(msk);
	mptcp_pm_free_anno_list(msk);
}

2947 2948
static void mptcp_destroy(struct sock *sk)
{
2949 2950
	struct mptcp_sock *msk = mptcp_sk(sk);

2951
	mptcp_destroy_common(msk);
2952
	sk_sockets_allocated_dec(sk);
2953 2954
}

2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965
void __mptcp_data_acked(struct sock *sk)
{
	if (!sock_owned_by_user(sk))
		__mptcp_clean_una(sk);
	else
		set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags);

	if (mptcp_pending_data_fin_ack(sk))
		mptcp_schedule_work(sk);
}

2966
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
2967 2968 2969 2970
{
	if (!mptcp_send_head(sk))
		return;

2971
	if (!sock_owned_by_user(sk)) {
P
Paolo Abeni 已提交
2972 2973 2974
		struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));

		if (xmit_ssk == ssk)
2975
			__mptcp_subflow_push_pending(sk, ssk);
P
Paolo Abeni 已提交
2976 2977
		else if (xmit_ssk)
			mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
2978
	} else {
2979
		set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
2980
	}
2981 2982
}

P
Paolo Abeni 已提交
2983
/* processes deferred events and flush wmem */
2984 2985
static void mptcp_release_cb(struct sock *sk)
{
P
Paolo Abeni 已提交
2986
	for (;;) {
P
Paolo Abeni 已提交
2987 2988
		unsigned long flags = 0;

P
Paolo Abeni 已提交
2989
		if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
2990
			flags |= BIT(MPTCP_PUSH_PENDING);
P
Paolo Abeni 已提交
2991 2992
		if (test_and_clear_bit(MPTCP_RETRANSMIT, &mptcp_sk(sk)->flags))
			flags |= BIT(MPTCP_RETRANSMIT);
P
Paolo Abeni 已提交
2993 2994 2995 2996
		if (!flags)
			break;

		/* the following actions acquire the subflow socket lock
2997 2998 2999 3000 3001 3002 3003 3004
		 *
		 * 1) can't be invoked in atomic scope
		 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
		 *    datapath acquires the msk socket spinlock while helding
		 *    the subflow socket lock
		 */

		spin_unlock_bh(&sk->sk_lock.slock);
3005
		if (flags & BIT(MPTCP_PUSH_PENDING))
P
Paolo Abeni 已提交
3006
			__mptcp_push_pending(sk, 0);
P
Paolo Abeni 已提交
3007 3008
		if (flags & BIT(MPTCP_RETRANSMIT))
			__mptcp_retrans(sk);
P
Paolo Abeni 已提交
3009 3010

		cond_resched();
3011 3012
		spin_lock_bh(&sk->sk_lock.slock);
	}
P
Paolo Abeni 已提交
3013

3014 3015 3016 3017 3018
	/* be sure to set the current sk state before tacking actions
	 * depending on sk_state
	 */
	if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
		__mptcp_set_connected(sk);
P
Paolo Abeni 已提交
3019
	if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
P
Paolo Abeni 已提交
3020
		__mptcp_clean_una_wakeup(sk);
P
Paolo Abeni 已提交
3021 3022
	if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
		__mptcp_error_report(sk);
3023

P
Paolo Abeni 已提交
3024 3025 3026
	/* push_pending may touch wmem_reserved, ensure we do the cleanup
	 * later
	 */
P
Paolo Abeni 已提交
3027
	__mptcp_update_wmem(sk);
3028
	__mptcp_update_rmem(sk);
3029 3030
}

P
Paolo Abeni 已提交
3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044
void mptcp_subflow_process_delegated(struct sock *ssk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
	struct sock *sk = subflow->conn;

	mptcp_data_lock(sk);
	if (!sock_owned_by_user(sk))
		__mptcp_subflow_push_pending(sk, ssk);
	else
		set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
	mptcp_data_unlock(sk);
	mptcp_subflow_delegated_done(subflow);
}

P
Paolo Abeni 已提交
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
static int mptcp_hash(struct sock *sk)
{
	/* should never be called,
	 * we hash the TCP subflows not the master socket
	 */
	WARN_ON_ONCE(1);
	return 0;
}

static void mptcp_unhash(struct sock *sk)
{
	/* called from sk_common_release(), but nothing to do here */
}

3059
static int mptcp_get_port(struct sock *sk, unsigned short snum)
M
Mat Martineau 已提交
3060 3061
{
	struct mptcp_sock *msk = mptcp_sk(sk);
3062
	struct socket *ssock;
M
Mat Martineau 已提交
3063

3064 3065 3066 3067
	ssock = __mptcp_nmpc_socket(msk);
	pr_debug("msk=%p, subflow=%p", msk, ssock);
	if (WARN_ON_ONCE(!ssock))
		return -EINVAL;
M
Mat Martineau 已提交
3068

3069 3070
	return inet_csk_get_port(ssock->sk, snum);
}
M
Mat Martineau 已提交
3071

3072 3073 3074 3075 3076
void mptcp_finish_connect(struct sock *ssk)
{
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk;
	struct sock *sk;
3077
	u64 ack_seq;
M
Mat Martineau 已提交
3078

3079 3080 3081 3082
	subflow = mptcp_subflow_ctx(ssk);
	sk = subflow->conn;
	msk = mptcp_sk(sk);

3083 3084
	pr_debug("msk=%p, token=%u", sk, subflow->token);

3085 3086
	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
	ack_seq++;
3087 3088
	subflow->map_seq = ack_seq;
	subflow->map_subflow_seq = 1;
3089

3090 3091 3092 3093 3094
	/* the socket is not connected yet, no msk/subflow ops can access/race
	 * accessing the field below
	 */
	WRITE_ONCE(msk->remote_key, subflow->remote_key);
	WRITE_ONCE(msk->local_key, subflow->local_key);
3095
	WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
P
Paolo Abeni 已提交
3096
	WRITE_ONCE(msk->snd_nxt, msk->write_seq);
3097
	WRITE_ONCE(msk->ack_seq, ack_seq);
3098
	WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
3099
	WRITE_ONCE(msk->can_ack, 1);
3100
	WRITE_ONCE(msk->snd_una, msk->write_seq);
3101

3102
	mptcp_pm_new_connection(msk, ssk, 0);
3103 3104

	mptcp_rcv_space_init(msk, ssk);
M
Mat Martineau 已提交
3105 3106
}

3107
void mptcp_sock_graft(struct sock *sk, struct socket *parent)
3108 3109 3110 3111 3112 3113 3114 3115
{
	write_lock_bh(&sk->sk_callback_lock);
	rcu_assign_pointer(sk->sk_wq, &parent->wq);
	sk_set_socket(sk, parent);
	sk->sk_uid = SOCK_INODE(parent)->i_uid;
	write_unlock_bh(&sk->sk_callback_lock);
}

P
Paolo Abeni 已提交
3116
bool mptcp_finish_join(struct sock *ssk)
3117
{
P
Paolo Abeni 已提交
3118
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3119 3120 3121
	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
	struct sock *parent = (void *)msk;
	struct socket *parent_sock;
3122
	bool ret;
3123 3124 3125 3126

	pr_debug("msk=%p, subflow=%p", msk, subflow);

	/* mptcp socket already closing? */
3127 3128
	if (!mptcp_is_fully_established(parent)) {
		subflow->reset_reason = MPTCP_RST_EMPTCP;
3129
		return false;
3130
	}
3131 3132

	if (!msk->pm.server_side)
3133
		goto out;
3134

3135 3136
	if (!mptcp_pm_allow_new_subflow(msk)) {
		subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3137
		return false;
3138
	}
3139 3140 3141 3142

	/* active connections are already on conn_list, and we can't acquire
	 * msk lock here.
	 * use the join list lock as synchronization point and double-check
P
Paolo Abeni 已提交
3143
	 * msk status to avoid racing with __mptcp_destroy_sock()
3144 3145 3146
	 */
	spin_lock_bh(&msk->join_list_lock);
	ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
P
Paolo Abeni 已提交
3147
	if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) {
3148
		list_add_tail(&subflow->node, &msk->join_list);
P
Paolo Abeni 已提交
3149 3150
		sock_hold(ssk);
	}
3151
	spin_unlock_bh(&msk->join_list_lock);
3152 3153
	if (!ret) {
		subflow->reset_reason = MPTCP_RST_EPROHIBIT;
3154
		return false;
3155
	}
3156 3157 3158 3159

	/* attach to msk socket only after we are sure he will deal with us
	 * at close time
	 */
3160
	parent_sock = READ_ONCE(parent->sk_socket);
P
Paolo Abeni 已提交
3161 3162
	if (parent_sock && !ssk->sk_socket)
		mptcp_sock_graft(ssk, parent_sock);
3163
	subflow->map_seq = READ_ONCE(msk->ack_seq);
3164 3165
out:
	mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
3166
	return true;
3167 3168
}

P
Paolo Abeni 已提交
3169 3170 3171 3172 3173 3174 3175 3176
static void mptcp_shutdown(struct sock *sk, int how)
{
	pr_debug("sk=%p, how=%d", sk, how);

	if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
		__mptcp_wr_shutdown(sk);
}

M
Mat Martineau 已提交
3177 3178 3179 3180
static struct proto mptcp_prot = {
	.name		= "MPTCP",
	.owner		= THIS_MODULE,
	.init		= mptcp_init_sock,
3181
	.disconnect	= mptcp_disconnect,
M
Mat Martineau 已提交
3182
	.close		= mptcp_close,
3183
	.accept		= mptcp_accept,
3184 3185
	.setsockopt	= mptcp_setsockopt,
	.getsockopt	= mptcp_getsockopt,
P
Paolo Abeni 已提交
3186
	.shutdown	= mptcp_shutdown,
3187
	.destroy	= mptcp_destroy,
M
Mat Martineau 已提交
3188 3189
	.sendmsg	= mptcp_sendmsg,
	.recvmsg	= mptcp_recvmsg,
3190
	.release_cb	= mptcp_release_cb,
P
Paolo Abeni 已提交
3191 3192
	.hash		= mptcp_hash,
	.unhash		= mptcp_unhash,
3193
	.get_port	= mptcp_get_port,
3194 3195 3196 3197
	.sockets_allocated	= &mptcp_sockets_allocated,
	.memory_allocated	= &tcp_memory_allocated,
	.memory_pressure	= &tcp_memory_pressure,
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_wmem),
P
Paolo Abeni 已提交
3198
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_tcp_rmem),
3199
	.sysctl_mem	= sysctl_tcp_mem,
M
Mat Martineau 已提交
3200
	.obj_size	= sizeof(struct mptcp_sock),
P
Paolo Abeni 已提交
3201
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
M
Mat Martineau 已提交
3202 3203 3204
	.no_autobind	= true,
};

3205 3206 3207 3208
static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
3209
	int err;
3210 3211

	lock_sock(sock->sk);
3212 3213 3214
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
3215 3216 3217 3218
		goto unlock;
	}

	err = ssock->ops->bind(ssock, uaddr, addr_len);
3219 3220
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
3221 3222 3223 3224 3225 3226

unlock:
	release_sock(sock->sk);
	return err;
}

3227 3228 3229 3230 3231 3232 3233
static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
					 struct mptcp_subflow_context *subflow)
{
	subflow->request_mptcp = 0;
	__mptcp_do_fallback(msk);
}

3234 3235 3236 3237
static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
				int addr_len, int flags)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
P
Paolo Abeni 已提交
3238
	struct mptcp_subflow_context *subflow;
3239 3240 3241 3242
	struct socket *ssock;
	int err;

	lock_sock(sock->sk);
P
Paolo Abeni 已提交
3243 3244 3245 3246 3247 3248 3249 3250
	if (sock->state != SS_UNCONNECTED && msk->subflow) {
		/* pending connection or invalid state, let existing subflow
		 * cope with that
		 */
		ssock = msk->subflow;
		goto do_connect;
	}

3251 3252 3253
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
3254 3255 3256
		goto unlock;
	}

3257 3258
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_SYN_SENT);
P
Paolo Abeni 已提交
3259
	subflow = mptcp_subflow_ctx(ssock->sk);
3260 3261 3262 3263 3264
#ifdef CONFIG_TCP_MD5SIG
	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
	 * TCP option space.
	 */
	if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
3265
		mptcp_subflow_early_fallback(msk, subflow);
3266
#endif
3267 3268
	if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) {
		MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT);
3269
		mptcp_subflow_early_fallback(msk, subflow);
3270
	}
P
Paolo Abeni 已提交
3271 3272
	if (likely(!__mptcp_check_fallback(msk)))
		MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE);
3273

P
Paolo Abeni 已提交
3274
do_connect:
3275
	err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
P
Paolo Abeni 已提交
3276 3277 3278 3279 3280
	sock->state = ssock->state;

	/* on successful connect, the msk state will be moved to established by
	 * subflow_finish_connect()
	 */
3281
	if (!err || err == -EINPROGRESS)
P
Paolo Abeni 已提交
3282 3283 3284
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
	else
		inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
3285 3286 3287 3288 3289 3290

unlock:
	release_sock(sock->sk);
	return err;
}

3291 3292 3293 3294 3295 3296 3297 3298 3299
static int mptcp_listen(struct socket *sock, int backlog)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
3300 3301 3302
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
3303 3304 3305
		goto unlock;
	}

3306 3307
	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_LISTEN);
3308 3309
	sock_set_flag(sock->sk, SOCK_RCU_FREE);

3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
	err = ssock->ops->listen(ssock, backlog);
	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);

unlock:
	release_sock(sock->sk);
	return err;
}

static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
			       int flags, bool kern)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
	if (sock->sk->sk_state != TCP_LISTEN)
		goto unlock_fail;

	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock)
		goto unlock_fail;

P
Paolo Abeni 已提交
3337
	clear_bit(MPTCP_DATA_READY, &msk->flags);
3338 3339 3340 3341
	sock_hold(ssock->sk);
	release_sock(sock->sk);

	err = ssock->ops->accept(sock, newsock, flags, kern);
3342
	if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
3343 3344
		struct mptcp_sock *msk = mptcp_sk(newsock->sk);
		struct mptcp_subflow_context *subflow;
3345 3346
		struct sock *newsk = newsock->sk;

3347
		lock_sock(newsk);
3348 3349 3350 3351

		/* PM/worker can now acquire the first subflow socket
		 * lock without racing with listener queue cleanup,
		 * we can notify it, if needed.
3352 3353 3354
		 *
		 * Even if remote has reset the initial subflow by now
		 * the refcnt is still at least one.
3355 3356 3357 3358 3359
		 */
		subflow = mptcp_subflow_ctx(msk->first);
		list_add(&subflow->node, &msk->conn_list);
		sock_hold(msk->first);
		if (mptcp_is_fully_established(newsk))
3360
			mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
3361

3362 3363
		mptcp_copy_inaddrs(newsk, msk->first);
		mptcp_rcv_space_init(msk, msk->first);
P
Paolo Abeni 已提交
3364
		mptcp_propagate_sndbuf(newsk, msk->first);
3365 3366 3367 3368

		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
		 * This is needed so NOSPACE flag can be set from tcp stack.
		 */
3369
		mptcp_flush_join_list(msk);
3370
		mptcp_for_each_subflow(msk, subflow) {
3371 3372 3373 3374 3375
			struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

			if (!ssk->sk_socket)
				mptcp_sock_graft(ssk, newsock);
		}
3376
		release_sock(newsk);
3377 3378
	}

P
Paolo Abeni 已提交
3379 3380
	if (inet_csk_listen_poll(ssock->sk))
		set_bit(MPTCP_DATA_READY, &msk->flags);
3381 3382 3383 3384 3385 3386 3387 3388
	sock_put(ssock->sk);
	return err;

unlock_fail:
	release_sock(sock->sk);
	return -EINVAL;
}

P
Paolo Abeni 已提交
3389 3390 3391 3392 3393 3394
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
{
	return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
	       0;
}

3395 3396 3397 3398 3399
static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
{
	struct sock *sk = (struct sock *)msk;

	if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
P
Paolo Abeni 已提交
3400
		return EPOLLOUT | EPOLLWRNORM;
3401 3402 3403 3404

	if (sk_stream_is_writeable(sk))
		return EPOLLOUT | EPOLLWRNORM;

P
Paolo Abeni 已提交
3405
	mptcp_set_nospace(sk);
3406 3407 3408
	smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
	if (sk_stream_is_writeable(sk))
		return EPOLLOUT | EPOLLWRNORM;
3409

3410
	return 0;
3411 3412
}

3413 3414 3415
static __poll_t mptcp_poll(struct file *file, struct socket *sock,
			   struct poll_table_struct *wait)
{
3416
	struct sock *sk = sock->sk;
3417
	struct mptcp_sock *msk;
3418
	__poll_t mask = 0;
P
Paolo Abeni 已提交
3419
	int state;
3420

3421 3422 3423
	msk = mptcp_sk(sk);
	sock_poll_wait(file, sock, wait);

P
Paolo Abeni 已提交
3424
	state = inet_sk_state_load(sk);
3425
	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
P
Paolo Abeni 已提交
3426 3427 3428 3429 3430
	if (state == TCP_LISTEN)
		return mptcp_check_readable(msk);

	if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
		mask |= mptcp_check_readable(msk);
3431
		mask |= mptcp_check_writeable(msk);
P
Paolo Abeni 已提交
3432
	}
P
Paolo Abeni 已提交
3433 3434
	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
		mask |= EPOLLHUP;
3435 3436 3437
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;

P
Paolo Abeni 已提交
3438 3439 3440 3441 3442
	/* This barrier is coupled with smp_wmb() in tcp_reset() */
	smp_rmb();
	if (sk->sk_err)
		mask |= EPOLLERR;

3443 3444 3445
	return mask;
}

3446 3447 3448
static const struct proto_ops mptcp_stream_ops = {
	.family		   = PF_INET,
	.owner		   = THIS_MODULE,
3449
	.release	   = inet_release,
3450 3451 3452 3453
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
3454
	.getname	   = inet_getname,
3455 3456 3457 3458
	.poll		   = mptcp_poll,
	.ioctl		   = inet_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
P
Paolo Abeni 已提交
3459
	.shutdown	   = inet_shutdown,
3460 3461 3462 3463 3464 3465 3466
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet_sendmsg,
	.recvmsg	   = inet_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
};
3467

M
Mat Martineau 已提交
3468 3469 3470 3471
static struct inet_protosw mptcp_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_prot,
3472 3473
	.ops		= &mptcp_stream_ops,
	.flags		= INET_PROTOSW_ICSK,
M
Mat Martineau 已提交
3474 3475
};

P
Paolo Abeni 已提交
3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508
static int mptcp_napi_poll(struct napi_struct *napi, int budget)
{
	struct mptcp_delegated_action *delegated;
	struct mptcp_subflow_context *subflow;
	int work_done = 0;

	delegated = container_of(napi, struct mptcp_delegated_action, napi);
	while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		bh_lock_sock_nested(ssk);
		if (!sock_owned_by_user(ssk) &&
		    mptcp_subflow_has_delegated_action(subflow))
			mptcp_subflow_process_delegated(ssk);
		/* ... elsewhere tcp_release_cb_override already processed
		 * the action or will do at next release_sock().
		 * In both case must dequeue the subflow here - on the same
		 * CPU that scheduled it.
		 */
		bh_unlock_sock(ssk);
		sock_put(ssk);

		if (++work_done == budget)
			return budget;
	}

	/* always provide a 0 'work_done' argument, so that napi_complete_done
	 * will not try accessing the NULL napi->dev ptr
	 */
	napi_complete_done(napi, 0);
	return work_done;
}

3509
void __init mptcp_proto_init(void)
M
Mat Martineau 已提交
3510
{
P
Paolo Abeni 已提交
3511 3512 3513
	struct mptcp_delegated_action *delegated;
	int cpu;

3514 3515
	mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;

3516 3517 3518
	if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
		panic("Failed to allocate MPTCP pcpu counter\n");

P
Paolo Abeni 已提交
3519 3520 3521 3522 3523 3524 3525 3526 3527
	init_dummy_netdev(&mptcp_napi_dev);
	for_each_possible_cpu(cpu) {
		delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
		INIT_LIST_HEAD(&delegated->head);
		netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll,
				  NAPI_POLL_WEIGHT);
		napi_enable(&delegated->napi);
	}

3528
	mptcp_subflow_init();
3529
	mptcp_pm_init();
P
Paolo Abeni 已提交
3530
	mptcp_token_init();
3531

M
Mat Martineau 已提交
3532 3533 3534 3535
	if (proto_register(&mptcp_prot, 1) != 0)
		panic("Failed to register MPTCP proto.\n");

	inet_register_protosw(&mptcp_protosw);
3536 3537

	BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb));
M
Mat Martineau 已提交
3538 3539 3540
}

#if IS_ENABLED(CONFIG_MPTCP_IPV6)
3541 3542 3543
static const struct proto_ops mptcp_v6_stream_ops = {
	.family		   = PF_INET6,
	.owner		   = THIS_MODULE,
3544
	.release	   = inet6_release,
3545 3546 3547 3548
	.bind		   = mptcp_bind,
	.connect	   = mptcp_stream_connect,
	.socketpair	   = sock_no_socketpair,
	.accept		   = mptcp_stream_accept,
3549
	.getname	   = inet6_getname,
3550 3551 3552 3553
	.poll		   = mptcp_poll,
	.ioctl		   = inet6_ioctl,
	.gettstamp	   = sock_gettstamp,
	.listen		   = mptcp_listen,
P
Paolo Abeni 已提交
3554
	.shutdown	   = inet_shutdown,
3555 3556 3557 3558 3559 3560 3561
	.setsockopt	   = sock_common_setsockopt,
	.getsockopt	   = sock_common_getsockopt,
	.sendmsg	   = inet6_sendmsg,
	.recvmsg	   = inet6_recvmsg,
	.mmap		   = sock_no_mmap,
	.sendpage	   = inet_sendpage,
#ifdef CONFIG_COMPAT
3562
	.compat_ioctl	   = inet6_compat_ioctl,
3563 3564 3565
#endif
};

M
Mat Martineau 已提交
3566 3567
static struct proto mptcp_v6_prot;

3568 3569 3570 3571 3572 3573
static void mptcp_v6_destroy(struct sock *sk)
{
	mptcp_destroy(sk);
	inet6_destroy_sock(sk);
}

M
Mat Martineau 已提交
3574 3575 3576 3577
static struct inet_protosw mptcp_v6_protosw = {
	.type		= SOCK_STREAM,
	.protocol	= IPPROTO_MPTCP,
	.prot		= &mptcp_v6_prot,
3578
	.ops		= &mptcp_v6_stream_ops,
M
Mat Martineau 已提交
3579 3580 3581
	.flags		= INET_PROTOSW_ICSK,
};

3582
int __init mptcp_proto_v6_init(void)
M
Mat Martineau 已提交
3583 3584 3585 3586 3587 3588
{
	int err;

	mptcp_v6_prot = mptcp_prot;
	strcpy(mptcp_v6_prot.name, "MPTCPv6");
	mptcp_v6_prot.slab = NULL;
3589
	mptcp_v6_prot.destroy = mptcp_v6_destroy;
3590
	mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
M
Mat Martineau 已提交
3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602

	err = proto_register(&mptcp_v6_prot, 1);
	if (err)
		return err;

	err = inet6_register_protosw(&mptcp_v6_protosw);
	if (err)
		proto_unregister(&mptcp_v6_prot);

	return err;
}
#endif