socket.c 72.0 KB
Newer Older
P
Per Liden 已提交
1
/*
2
 * net/tipc/socket.c: TIPC socket API
3
 *
4
 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5
 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37
#include <linux/rhashtable.h>
38 39
#include <linux/sched/signal.h>

P
Per Liden 已提交
40
#include "core.h"
41
#include "name_table.h"
E
Erik Hugne 已提交
42
#include "node.h"
43
#include "link.h"
44
#include "name_distr.h"
45
#include "socket.h"
46
#include "bcast.h"
47
#include "netlink.h"
48

49
#define CONN_TIMEOUT_DEFAULT	8000	/* default connect timeout = 8s */
50
#define CONN_PROBING_INTERVAL	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
51 52 53
#define TIPC_FWD_MSG		1
#define TIPC_MAX_PORT		0xffffffff
#define TIPC_MIN_PORT		1
54

55 56
enum {
	TIPC_LISTEN = TCP_LISTEN,
57
	TIPC_ESTABLISHED = TCP_ESTABLISHED,
58
	TIPC_OPEN = TCP_CLOSE,
59
	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
60
	TIPC_CONNECTING = TCP_SYN_SENT,
61 62
};

63 64 65 66 67 68 69
/**
 * struct tipc_sock - TIPC socket structure
 * @sk: socket - interacts with 'port' and with user via the socket API
 * @conn_type: TIPC type used when connection was established
 * @conn_instance: TIPC instance used when connection was established
 * @published: non-zero if port has one or more associated names
 * @max_pkt: maximum packet size "hint" used when building messages sent by port
70
 * @portid: unique port identity in TIPC socket hash table
71
 * @phdr: preformatted message header used when sending messages
72
 * #cong_links: list of congested links
73
 * @publications: list of publications for port
74
 * @blocking_link: address of the congested link we are currently sleeping on
75 76 77 78
 * @pub_count: total # of publications port has made during its lifetime
 * @probing_state:
 * @conn_timeout: the time we can wait for an unresponded setup request
 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
79
 * @cong_link_cnt: number of congested links
80 81
 * @sent_unacked: # messages sent by socket, and not yet acked by peer
 * @rcv_unacked: # messages read by user, but not yet acked back to peer
82
 * @peer: 'connected' peer for dgram/rdm
83
 * @node: hash table node
84
 * @mc_method: cookie for use between socket and broadcast layer
85
 * @rcu: rcu struct for tipc_sock
86 87 88 89 90 91 92
 */
struct tipc_sock {
	struct sock sk;
	u32 conn_type;
	u32 conn_instance;
	int published;
	u32 max_pkt;
93
	u32 portid;
94
	struct tipc_msg phdr;
95
	struct list_head cong_links;
96 97 98 99
	struct list_head publications;
	u32 pub_count;
	uint conn_timeout;
	atomic_t dupl_rcvcnt;
100
	bool probe_unacked;
101
	u16 cong_link_cnt;
102 103
	u16 snt_unacked;
	u16 snd_win;
104
	u16 peer_caps;
105 106
	u16 rcv_unacked;
	u16 rcv_win;
107
	struct sockaddr_tipc peer;
108
	struct rhash_head node;
109
	struct tipc_mc_method mc_method;
110
	struct rcu_head rcu;
111
};
P
Per Liden 已提交
112

113
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
114
static void tipc_data_ready(struct sock *sk);
115
static void tipc_write_space(struct sock *sk);
116
static void tipc_sock_destruct(struct sock *sk);
117 118
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
119
static void tipc_sk_timeout(unsigned long data);
120
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
121
			   struct tipc_name_seq const *seq);
122
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
123
			    struct tipc_name_seq const *seq);
124
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
125 126
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
127
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
128
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
P
Per Liden 已提交
129

130 131 132
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
P
Per Liden 已提交
133
static struct proto tipc_proto;
134 135
static const struct rhashtable_params tsk_rht_params;

136 137 138 139 140
static u32 tsk_own_node(struct tipc_sock *tsk)
{
	return msg_prevnode(&tsk->phdr);
}

141
static u32 tsk_peer_node(struct tipc_sock *tsk)
142
{
143
	return msg_destnode(&tsk->phdr);
144 145
}

146
static u32 tsk_peer_port(struct tipc_sock *tsk)
147
{
148
	return msg_destport(&tsk->phdr);
149 150
}

151
static  bool tsk_unreliable(struct tipc_sock *tsk)
152
{
153
	return msg_src_droppable(&tsk->phdr) != 0;
154 155
}

156
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
157
{
158
	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
159 160
}

161
static bool tsk_unreturnable(struct tipc_sock *tsk)
162
{
163
	return msg_dest_droppable(&tsk->phdr) != 0;
164 165
}

166
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
167
{
168
	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
169 170
}

171
static int tsk_importance(struct tipc_sock *tsk)
172
{
173
	return msg_importance(&tsk->phdr);
174 175
}

176
static int tsk_set_importance(struct tipc_sock *tsk, int imp)
177 178 179
{
	if (imp > TIPC_CRITICAL_IMPORTANCE)
		return -EINVAL;
180
	msg_set_importance(&tsk->phdr, (u32)imp);
181 182
	return 0;
}
183

184 185 186 187 188
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
	return container_of(sk, struct tipc_sock, sk);
}

189
static bool tsk_conn_cong(struct tipc_sock *tsk)
190
{
191
	return tsk->snt_unacked > tsk->snd_win;
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
}

/* tsk_blocks(): translate a buffer size in bytes to number of
 * advertisable blocks, taking into account the ratio truesize(len)/len
 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
 */
static u16 tsk_adv_blocks(int len)
{
	return len / FLOWCTL_BLK_SZ / 4;
}

/* tsk_inc(): increment counter for sent or received data
 * - If block based flow control is not supported by peer we
 *   fall back to message based ditto, incrementing the counter
 */
static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
{
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return ((msglen / FLOWCTL_BLK_SZ) + 1);
	return 1;
212 213
}

214
/**
215
 * tsk_advance_rx_queue - discard first buffer in socket receive queue
216 217
 *
 * Caller must hold socket lock
P
Per Liden 已提交
218
 */
219
static void tsk_advance_rx_queue(struct sock *sk)
P
Per Liden 已提交
220
{
221
	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
P
Per Liden 已提交
222 223
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
/* tipc_sk_respond() : send response message back to sender
 */
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
	u32 selector;
	u32 dnode;
	u32 onode = tipc_own_addr(sock_net(sk));

	if (!tipc_msg_reverse(onode, &skb, err))
		return;

	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}

P
Per Liden 已提交
240
/**
241
 * tsk_rej_rx_queue - reject all buffers in socket receive queue
242 243
 *
 * Caller must hold socket lock
P
Per Liden 已提交
244
 */
245
static void tsk_rej_rx_queue(struct sock *sk)
P
Per Liden 已提交
246
{
247
	struct sk_buff *skb;
248

249 250
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
P
Per Liden 已提交
251 252
}

253 254
static bool tipc_sk_connected(struct sock *sk)
{
255
	return sk->sk_state == TIPC_ESTABLISHED;
256 257
}

258 259 260 261 262 263 264 265 266 267
/* tipc_sk_type_connectionless - check if the socket is datagram socket
 * @sk: socket
 *
 * Returns true if connection less, false otherwise
 */
static bool tipc_sk_type_connectionless(struct sock *sk)
{
	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
}

268
/* tsk_peer_msg - verify if message was sent by connected port's peer
J
Jon Paul Maloy 已提交
269 270 271 272
 *
 * Handles cases where the node's network address has changed from
 * the default of <0.0.0> to its configured setting.
 */
273
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
J
Jon Paul Maloy 已提交
274
{
275 276
	struct sock *sk = &tsk->sk;
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
277
	u32 peer_port = tsk_peer_port(tsk);
J
Jon Paul Maloy 已提交
278 279 280
	u32 orig_node;
	u32 peer_node;

281
	if (unlikely(!tipc_sk_connected(sk)))
J
Jon Paul Maloy 已提交
282 283 284 285 286 287
		return false;

	if (unlikely(msg_origport(msg) != peer_port))
		return false;

	orig_node = msg_orignode(msg);
288
	peer_node = tsk_peer_node(tsk);
J
Jon Paul Maloy 已提交
289 290 291 292

	if (likely(orig_node == peer_node))
		return true;

293
	if (!orig_node && (peer_node == tn->own_addr))
J
Jon Paul Maloy 已提交
294 295
		return true;

296
	if (!peer_node && (orig_node == tn->own_addr))
J
Jon Paul Maloy 已提交
297 298 299 300 301
		return true;

	return false;
}

302 303 304 305 306 307 308 309 310
/* tipc_set_sk_state - set the sk_state of the socket
 * @sk: socket
 *
 * Caller must hold socket lock
 *
 * Returns 0 on success, errno otherwise
 */
static int tipc_set_sk_state(struct sock *sk, int state)
{
311
	int oldsk_state = sk->sk_state;
312 313 314
	int res = -EINVAL;

	switch (state) {
315 316 317
	case TIPC_OPEN:
		res = 0;
		break;
318
	case TIPC_LISTEN:
319
	case TIPC_CONNECTING:
320
		if (oldsk_state == TIPC_OPEN)
321 322
			res = 0;
		break;
323
	case TIPC_ESTABLISHED:
324
		if (oldsk_state == TIPC_CONNECTING ||
325
		    oldsk_state == TIPC_OPEN)
326 327
			res = 0;
		break;
328
	case TIPC_DISCONNECTING:
329
		if (oldsk_state == TIPC_CONNECTING ||
330 331 332
		    oldsk_state == TIPC_ESTABLISHED)
			res = 0;
		break;
333 334 335 336 337 338 339 340
	}

	if (!res)
		sk->sk_state = state;

	return res;
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
static int tipc_sk_sock_err(struct socket *sock, long *timeout)
{
	struct sock *sk = sock->sk;
	int err = sock_error(sk);
	int typ = sock->type;

	if (err)
		return err;
	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
		if (sk->sk_state == TIPC_DISCONNECTING)
			return -EPIPE;
		else if (!tipc_sk_connected(sk))
			return -ENOTCONN;
	}
	if (!*timeout)
		return -EAGAIN;
	if (signal_pending(current))
		return sock_intr_errno(*timeout);

	return 0;
}

#define tipc_wait_for_cond(sock_, timeout_, condition_)			\
({								        \
	int rc_ = 0;							\
	int done_ = 0;							\
									\
	while (!(condition_) && !done_) {				\
		struct sock *sk_ = sock->sk;				\
		DEFINE_WAIT_FUNC(wait_, woken_wake_function);		\
									\
		rc_ = tipc_sk_sock_err(sock_, timeout_);		\
		if (rc_)						\
			break;						\
		prepare_to_wait(sk_sleep(sk_), &wait_,			\
				TASK_INTERRUPTIBLE);			\
		done_ = sk_wait_event(sk_, timeout_,			\
				      (condition_), &wait_);		\
		remove_wait_queue(sk_sleep(sk_), &wait_);		\
	}								\
	rc_;								\
})

P
Per Liden 已提交
384
/**
385
 * tipc_sk_create - create a TIPC socket
386
 * @net: network namespace (must be default network)
P
Per Liden 已提交
387 388
 * @sock: pre-allocated socket structure
 * @protocol: protocol indicator (must be 0)
389
 * @kern: caused by kernel or by userspace?
390
 *
391 392
 * This routine creates additional data structures used by the TIPC socket,
 * initializes them, and links them together.
P
Per Liden 已提交
393 394 395
 *
 * Returns 0 on success, errno otherwise
 */
396 397
static int tipc_sk_create(struct net *net, struct socket *sock,
			  int protocol, int kern)
P
Per Liden 已提交
398
{
399
	struct tipc_net *tn;
400
	const struct proto_ops *ops;
P
Per Liden 已提交
401
	struct sock *sk;
402
	struct tipc_sock *tsk;
403
	struct tipc_msg *msg;
404 405

	/* Validate arguments */
P
Per Liden 已提交
406 407 408 409 410
	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
411
		ops = &stream_ops;
P
Per Liden 已提交
412 413
		break;
	case SOCK_SEQPACKET:
414
		ops = &packet_ops;
P
Per Liden 已提交
415 416 417
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
418
		ops = &msg_ops;
P
Per Liden 已提交
419
		break;
420 421
	default:
		return -EPROTOTYPE;
P
Per Liden 已提交
422 423
	}

424
	/* Allocate socket's protocol area */
425
	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
426
	if (sk == NULL)
P
Per Liden 已提交
427 428
		return -ENOMEM;

429
	tsk = tipc_sk(sk);
430 431
	tsk->max_pkt = MAX_PKT_DEFAULT;
	INIT_LIST_HEAD(&tsk->publications);
432
	INIT_LIST_HEAD(&tsk->cong_links);
433
	msg = &tsk->phdr;
434
	tn = net_generic(sock_net(sk), tipc_net_id);
P
Per Liden 已提交
435

436 437 438
	/* Finish initializing socket data structures */
	sock->ops = ops;
	sock_init_data(sock, sk);
439
	tipc_set_sk_state(sk, TIPC_OPEN);
440
	if (tipc_sk_insert(tsk)) {
M
Masanari Iida 已提交
441
		pr_warn("Socket create failed; port number exhausted\n");
442 443
		return -EINVAL;
	}
444 445 446 447 448 449 450

	/* Ensure tsk is visible before we read own_addr. */
	smp_mb();

	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
		      NAMED_H_SIZE, 0);

451
	msg_set_origport(msg, tsk->portid);
452
	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
453
	sk->sk_shutdown = 0;
454
	sk->sk_backlog_rcv = tipc_backlog_rcv;
455
	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
456 457
	sk->sk_data_ready = tipc_data_ready;
	sk->sk_write_space = tipc_write_space;
458
	sk->sk_destruct = tipc_sock_destruct;
459 460
	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
	atomic_set(&tsk->dupl_rcvcnt, 0);
461

462 463 464 465
	/* Start out with safe limits until we receive an advertised window */
	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
	tsk->rcv_win = tsk->snd_win;

466
	if (tipc_sk_type_connectionless(sk)) {
467
		tsk_set_unreturnable(tsk, true);
468
		if (sock->type == SOCK_DGRAM)
469
			tsk_set_unreliable(tsk, true);
470
	}
471

P
Per Liden 已提交
472 473 474
	return 0;
}

475 476 477 478 479 480 481
static void tipc_sk_callback(struct rcu_head *head)
{
	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);

	sock_put(&tsk->sk);
}

482 483 484 485 486 487
/* Caller should hold socket lock for the socket. */
static void __tipc_shutdown(struct socket *sock, int error)
{
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
	struct net *net = sock_net(sk);
488
	long timeout = CONN_TIMEOUT_DEFAULT;
489 490 491
	u32 dnode = tsk_peer_node(tsk);
	struct sk_buff *skb;

492 493 494 495
	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
					    !tsk_conn_cong(tsk)));

496 497 498 499 500 501
	/* Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer).
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		if (TIPC_SKB_CB(skb)->bytes_read) {
			kfree_skb(skb);
502
			continue;
503
		}
504 505 506 507 508 509
		if (!tipc_sk_type_connectionless(sk) &&
		    sk->sk_state != TIPC_DISCONNECTING) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			tipc_node_remove_conn(net, dnode, tsk->portid);
		}
		tipc_sk_respond(sk, skb, error);
510
	}
511 512 513 514

	if (tipc_sk_type_connectionless(sk))
		return;

515 516 517 518 519 520 521
	if (sk->sk_state != TIPC_DISCONNECTING) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
				      tsk_own_node(tsk), tsk_peer_port(tsk),
				      tsk->portid, error);
		if (skb)
			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
522 523
		tipc_node_remove_conn(net, dnode, tsk->portid);
		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
524 525 526
	}
}

P
Per Liden 已提交
527
/**
528
 * tipc_release - destroy a TIPC socket
P
Per Liden 已提交
529 530 531 532 533 534 535
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
536
 *
P
Per Liden 已提交
537 538 539 540 541 542
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
543
static int tipc_release(struct socket *sock)
P
Per Liden 已提交
544 545
{
	struct sock *sk = sock->sk;
546
	struct tipc_sock *tsk;
P
Per Liden 已提交
547

548 549 550 551 552
	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
P
Per Liden 已提交
553
		return 0;
554

555
	tsk = tipc_sk(sk);
556 557
	lock_sock(sk);

558 559
	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
	sk->sk_shutdown = SHUTDOWN_MASK;
560
	tipc_sk_withdraw(tsk, 0, NULL);
561
	sk_stop_timer(sk, &sk->sk_timer);
562
	tipc_sk_remove(tsk);
P
Per Liden 已提交
563

564 565
	/* Reject any messages that accumulated in backlog queue */
	release_sock(sk);
566 567
	u32_list_purge(&tsk->cong_links);
	tsk->cong_link_cnt = 0;
568
	call_rcu(&tsk->rcu, tipc_sk_callback);
569
	sock->sk = NULL;
P
Per Liden 已提交
570

571
	return 0;
P
Per Liden 已提交
572 573 574
}

/**
575
 * tipc_bind - associate or disassocate TIPC name(s) with a socket
P
Per Liden 已提交
576 577 578
 * @sock: socket structure
 * @uaddr: socket address describing name(s) and desired operation
 * @uaddr_len: size of socket address data structure
579
 *
P
Per Liden 已提交
580 581 582
 * Name and name sequence binding is indicated using a positive scope value;
 * a negative scope value unbinds the specified name.  Specifying no name
 * (i.e. a socket address length of 0) unbinds all names from the socket.
583
 *
P
Per Liden 已提交
584
 * Returns 0 on success, errno otherwise
585 586 587
 *
 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 *       access any non-constant socket information.
P
Per Liden 已提交
588
 */
589 590
static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
		     int uaddr_len)
P
Per Liden 已提交
591
{
592
	struct sock *sk = sock->sk;
P
Per Liden 已提交
593
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
594
	struct tipc_sock *tsk = tipc_sk(sk);
595
	int res = -EINVAL;
P
Per Liden 已提交
596

597 598
	lock_sock(sk);
	if (unlikely(!uaddr_len)) {
599
		res = tipc_sk_withdraw(tsk, 0, NULL);
600 601
		goto exit;
	}
602

603 604 605 606 607 608 609 610
	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
		res = -EINVAL;
		goto exit;
	}
	if (addr->family != AF_TIPC) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
P
Per Liden 已提交
611 612 613

	if (addr->addrtype == TIPC_ADDR_NAME)
		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
614 615 616 617
	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
618

619
	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
620
	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
621 622 623 624
	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
		res = -EACCES;
		goto exit;
	}
625

626
	res = (addr->scope > 0) ?
627 628
		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
629 630 631
exit:
	release_sock(sk);
	return res;
P
Per Liden 已提交
632 633
}

634
/**
635
 * tipc_getname - get port ID of socket or peer socket
P
Per Liden 已提交
636 637 638
 * @sock: socket structure
 * @uaddr: area for returned socket address
 * @uaddr_len: area for returned length of socket address
639
 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
640
 *
P
Per Liden 已提交
641
 * Returns 0 on success, errno otherwise
642
 *
643 644
 * NOTE: This routine doesn't need to take the socket lock since it only
 *       accesses socket information that is unchanging (or which changes in
645
 *       a completely predictable manner).
P
Per Liden 已提交
646
 */
647 648
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
			int *uaddr_len, int peer)
P
Per Liden 已提交
649 650
{
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
651 652
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
653
	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
P
Per Liden 已提交
654

655
	memset(addr, 0, sizeof(*addr));
656
	if (peer) {
657
		if ((!tipc_sk_connected(sk)) &&
658
		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
659
			return -ENOTCONN;
660 661
		addr->addr.id.ref = tsk_peer_port(tsk);
		addr->addr.id.node = tsk_peer_node(tsk);
662
	} else {
663
		addr->addr.id.ref = tsk->portid;
664
		addr->addr.id.node = tn->own_addr;
665
	}
P
Per Liden 已提交
666 667 668 669 670 671 672

	*uaddr_len = sizeof(*addr);
	addr->addrtype = TIPC_ADDR_ID;
	addr->family = AF_TIPC;
	addr->scope = 0;
	addr->addr.name.domain = 0;

673
	return 0;
P
Per Liden 已提交
674 675 676
}

/**
677
 * tipc_poll - read and possibly block on pollmask
P
Per Liden 已提交
678 679 680 681
 * @file: file structure associated with the socket
 * @sock: socket for which to calculate the poll bits
 * @wait: ???
 *
682 683 684 685 686 687 688 689
 * Returns pollmask value
 *
 * COMMENTARY:
 * It appears that the usual socket locking mechanisms are not useful here
 * since the pollmask info is potentially out-of-date the moment this routine
 * exits.  TCP and other protocols seem to rely on higher level poll routines
 * to handle any preventable race conditions, so TIPC will do the same ...
 *
690 691 692
 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 * imply that the operation will succeed, merely that it should be performed
 * and will not block.
P
Per Liden 已提交
693
 */
694 695
static unsigned int tipc_poll(struct file *file, struct socket *sock,
			      poll_table *wait)
P
Per Liden 已提交
696
{
697
	struct sock *sk = sock->sk;
698
	struct tipc_sock *tsk = tipc_sk(sk);
699
	u32 mask = 0;
700

701
	sock_poll_wait(file, sk_sleep(sk), wait);
702

703 704 705 706 707
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
	if (sk->sk_shutdown == SHUTDOWN_MASK)
		mask |= POLLHUP;

708 709
	switch (sk->sk_state) {
	case TIPC_ESTABLISHED:
710
		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
711
			mask |= POLLOUT;
712 713 714
		/* fall thru' */
	case TIPC_LISTEN:
	case TIPC_CONNECTING:
715 716
		if (!skb_queue_empty(&sk->sk_receive_queue))
			mask |= (POLLIN | POLLRDNORM);
717 718
		break;
	case TIPC_OPEN:
719
		if (!tsk->cong_link_cnt)
720 721 722 723 724 725 726 727
			mask |= POLLOUT;
		if (tipc_sk_type_connectionless(sk) &&
		    (!skb_queue_empty(&sk->sk_receive_queue)))
			mask |= (POLLIN | POLLRDNORM);
		break;
	case TIPC_DISCONNECTING:
		mask = (POLLIN | POLLRDNORM | POLLHUP);
		break;
728
	}
729 730

	return mask;
P
Per Liden 已提交
731 732
}

733 734 735 736
/**
 * tipc_sendmcast - send multicast message
 * @sock: socket structure
 * @seq: destination address
737
 * @msg: message to send
738 739
 * @dlen: length of data to send
 * @timeout: timeout to wait for wakeup
740 741 742 743 744
 *
 * Called from function tipc_sendmsg(), which has done all sanity checks
 * Returns the number of bytes sent on success, or errno
 */
static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
745
			  struct msghdr *msg, size_t dlen, long timeout)
746 747
{
	struct sock *sk = sock->sk;
748
	struct tipc_sock *tsk = tipc_sk(sk);
749
	struct tipc_msg *hdr = &tsk->phdr;
750
	struct net *net = sock_net(sk);
751
	int mtu = tipc_bcast_get_mtu(net);
752
	struct tipc_mc_method *method = &tsk->mc_method;
753
	u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
754
	struct sk_buff_head pkts;
755
	struct tipc_nlist dsts;
756 757
	int rc;

758
	/* Block or return if any destination link is congested */
759 760 761
	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
	if (unlikely(rc))
		return rc;
762

763 764 765 766 767 768 769 770
	/* Lookup destination nodes */
	tipc_nlist_init(&dsts, tipc_own_addr(net));
	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
				      seq->upper, domain, &dsts);
	if (!dsts.local && !dsts.remote)
		return -EHOSTUNREACH;

	/* Build message header */
771
	msg_set_type(hdr, TIPC_MCAST_MSG);
772
	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
773 774 775 776 777 778 779
	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
	msg_set_destport(hdr, 0);
	msg_set_destnode(hdr, 0);
	msg_set_nametype(hdr, seq->type);
	msg_set_namelower(hdr, seq->lower);
	msg_set_nameupper(hdr, seq->upper);

780
	/* Build message as chain of buffers */
781 782
	skb_queue_head_init(&pkts);
	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
783

784 785
	/* Send message if build was successful */
	if (unlikely(rc == dlen))
786
		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
787 788 789
				     &tsk->cong_link_cnt);

	tipc_nlist_purge(&dsts);
790 791

	return rc ? rc : dlen;
792 793
}

794 795 796 797 798 799
/**
 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 * @arrvq: queue with arriving messages, to be cloned after destination lookup
 * @inputq: queue with cloned messages, delivered to socket after dest lookup
 *
 * Multi-threaded: parallel calls with reference to same queues may occur
800
 */
801 802
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
		       struct sk_buff_head *inputq)
803
{
804
	struct tipc_msg *msg;
805
	struct list_head dports;
806
	u32 portid;
807
	u32 scope = TIPC_CLUSTER_SCOPE;
808 809 810
	struct sk_buff_head tmpq;
	uint hsz;
	struct sk_buff *skb, *_skb;
811

812
	__skb_queue_head_init(&tmpq);
813
	INIT_LIST_HEAD(&dports);
814

815 816 817 818 819 820 821 822 823 824 825 826
	skb = tipc_skb_peek(arrvq, &inputq->lock);
	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
		msg = buf_msg(skb);
		hsz = skb_headroom(skb) + msg_hdr_sz(msg);

		if (in_own_node(net, msg_orignode(msg)))
			scope = TIPC_NODE_SCOPE;

		/* Create destination port list and message clones: */
		tipc_nametbl_mc_translate(net,
					  msg_nametype(msg), msg_namelower(msg),
					  msg_nameupper(msg), scope, &dports);
827 828
		portid = u32_pop(&dports);
		for (; portid; portid = u32_pop(&dports)) {
829 830 831 832 833 834 835
			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
			if (_skb) {
				msg_set_destport(buf_msg(_skb), portid);
				__skb_queue_tail(&tmpq, _skb);
				continue;
			}
			pr_warn("Failed to clone mcast rcv buffer\n");
836
		}
837 838 839 840 841 842 843 844 845
		/* Append to inputq if not already done by other thread */
		spin_lock_bh(&inputq->lock);
		if (skb_peek(arrvq) == skb) {
			skb_queue_splice_tail_init(&tmpq, inputq);
			kfree_skb(__skb_dequeue(arrvq));
		}
		spin_unlock_bh(&inputq->lock);
		__skb_queue_purge(&tmpq);
		kfree_skb(skb);
846
	}
847
	tipc_sk_rcv(net, inputq);
848 849
}

850 851 852
/**
 * tipc_sk_proto_rcv - receive a connection mng protocol message
 * @tsk: receiving socket
853
 * @skb: pointer to message buffer.
854
 */
J
Jon Paul Maloy 已提交
855 856
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
			      struct sk_buff_head *xmitq)
857
{
858
	struct sock *sk = &tsk->sk;
J
Jon Paul Maloy 已提交
859
	u32 onode = tsk_own_node(tsk);
860 861
	struct tipc_msg *hdr = buf_msg(skb);
	int mtyp = msg_type(hdr);
862
	bool conn_cong;
863

864
	/* Ignore if connection cannot be validated: */
865
	if (!tsk_peer_msg(tsk, hdr))
866 867
		goto exit;

868
	tsk->probe_unacked = false;
869

870 871
	if (mtyp == CONN_PROBE) {
		msg_set_type(hdr, CONN_PROBE_REPLY);
J
Jon Paul Maloy 已提交
872 873
		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
			__skb_queue_tail(xmitq, skb);
874 875
		return;
	} else if (mtyp == CONN_ACK) {
876
		conn_cong = tsk_conn_cong(tsk);
877 878 879
		tsk->snt_unacked -= msg_conn_ack(hdr);
		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
			tsk->snd_win = msg_adv_win(hdr);
880
		if (conn_cong)
881 882 883
			sk->sk_write_space(sk);
	} else if (mtyp != CONN_PROBE_REPLY) {
		pr_warn("Received unknown CONN_PROTO msg\n");
884 885
	}
exit:
886
	kfree_skb(skb);
887 888
}

P
Per Liden 已提交
889
/**
890
 * tipc_sendmsg - send message in connectionless manner
P
Per Liden 已提交
891 892
 * @sock: socket structure
 * @m: message to send
893
 * @dsz: amount of user data to be sent
894
 *
P
Per Liden 已提交
895
 * Message must have an destination specified explicitly.
896
 * Used for SOCK_RDM and SOCK_DGRAM messages,
P
Per Liden 已提交
897 898
 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
899
 *
P
Per Liden 已提交
900 901
 * Returns the number of bytes sent on success, or errno otherwise
 */
902
static int tipc_sendmsg(struct socket *sock,
903
			struct msghdr *m, size_t dsz)
904 905 906 907 908 909 910 911 912 913 914
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_sendmsg(sock, m, dsz);
	release_sock(sk);

	return ret;
}

915
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
916
{
917
	struct sock *sk = sock->sk;
918
	struct net *net = sock_net(sk);
919 920 921 922 923 924
	struct tipc_sock *tsk = tipc_sk(sk);
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct list_head *clinks = &tsk->cong_links;
	bool syn = !tipc_sk_type_connectionless(sk);
	struct tipc_msg *hdr = &tsk->phdr;
925
	struct tipc_name_seq *seq;
926 927 928 929
	struct sk_buff_head pkts;
	u32 type, inst, domain;
	u32 dnode, dport;
	int mtu, rc;
P
Per Liden 已提交
930

931
	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
932
		return -EMSGSIZE;
933

934
	if (unlikely(!dest)) {
935 936
		dest = &tsk->peer;
		if (!syn || dest->family != AF_TIPC)
937 938
			return -EDESTADDRREQ;
	}
939 940 941 942 943 944 945 946

	if (unlikely(m->msg_namelen < sizeof(*dest)))
		return -EINVAL;

	if (unlikely(dest->family != AF_TIPC))
		return -EINVAL;

	if (unlikely(syn)) {
947
		if (sk->sk_state == TIPC_LISTEN)
948
			return -EPIPE;
949
		if (sk->sk_state != TIPC_OPEN)
950 951 952
			return -EISCONN;
		if (tsk->published)
			return -EOPNOTSUPP;
953
		if (dest->addrtype == TIPC_ADDR_NAME) {
954 955
			tsk->conn_type = dest->addr.name.name.type;
			tsk->conn_instance = dest->addr.name.name.instance;
956
		}
P
Per Liden 已提交
957
	}
958

959 960 961
	seq = &dest->addr.nameseq;
	if (dest->addrtype == TIPC_ADDR_MCAST)
		return tipc_sendmcast(sock, seq, m, dlen, timeout);
962

963 964 965 966
	if (dest->addrtype == TIPC_ADDR_NAME) {
		type = dest->addr.name.name.type;
		inst = dest->addr.name.name.instance;
		domain = dest->addr.name.domain;
967
		dnode = domain;
968 969 970 971 972
		msg_set_type(hdr, TIPC_NAMED_MSG);
		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
		msg_set_nametype(hdr, type);
		msg_set_nameinst(hdr, inst);
		msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
973
		dport = tipc_nametbl_translate(net, type, inst, &dnode);
974 975
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dport);
976 977
		if (unlikely(!dport && !dnode))
			return -EHOSTUNREACH;
978

979 980
	} else if (dest->addrtype == TIPC_ADDR_ID) {
		dnode = dest->addr.id.node;
981 982 983 984 985
		msg_set_type(hdr, TIPC_DIRECT_MSG);
		msg_set_lookup_scope(hdr, 0);
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dest->addr.id.ref);
		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
986 987
	}

988 989 990 991 992 993
	/* Block or return if destination link is congested */
	rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
	if (unlikely(rc))
		return rc;

	skb_queue_head_init(&pkts);
994
	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
995 996
	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
	if (unlikely(rc != dlen))
997
		return rc;
998

999 1000 1001 1002 1003 1004
	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
	if (unlikely(rc == -ELINKCONG)) {
		u32_push(clinks, dnode);
		tsk->cong_link_cnt++;
		rc = 0;
	}
1005

1006 1007 1008 1009
	if (unlikely(syn && !rc))
		tipc_set_sk_state(sk, TIPC_CONNECTING);

	return rc ? rc : dlen;
P
Per Liden 已提交
1010 1011
}

1012
/**
1013
 * tipc_sendstream - send stream-oriented data
P
Per Liden 已提交
1014
 * @sock: socket structure
1015 1016
 * @m: data to send
 * @dsz: total length of data to be transmitted
1017
 *
1018
 * Used for SOCK_STREAM data.
1019
 *
1020 1021
 * Returns the number of bytes sent on success (or partial success),
 * or errno if no data sent
P
Per Liden 已提交
1022
 */
1023
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1024 1025 1026 1027 1028
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
1029
	ret = __tipc_sendstream(sock, m, dsz);
1030 1031 1032 1033 1034
	release_sock(sk);

	return ret;
}

1035
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
1036
{
1037
	struct sock *sk = sock->sk;
1038
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1039 1040 1041 1042 1043 1044 1045 1046
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = &tsk->phdr;
	struct net *net = sock_net(sk);
	struct sk_buff_head pkts;
	u32 dnode = tsk_peer_node(tsk);
	int send, sent = 0;
	int rc = 0;
1047

1048
	skb_queue_head_init(&pkts);
1049

1050 1051
	if (unlikely(dlen > INT_MAX))
		return -EMSGSIZE;
1052

1053 1054 1055 1056 1057
	/* Handle implicit connection setup */
	if (unlikely(dest)) {
		rc = __tipc_sendmsg(sock, m, dlen);
		if (dlen && (dlen == rc))
			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1058
		return rc;
1059
	}
1060

1061
	do {
1062 1063
		rc = tipc_wait_for_cond(sock, &timeout,
					(!tsk->cong_link_cnt &&
1064 1065
					 !tsk_conn_cong(tsk) &&
					 tipc_sk_connected(sk)));
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		if (unlikely(rc))
			break;

		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
		if (unlikely(rc != send))
			break;

		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
		if (unlikely(rc == -ELINKCONG)) {
			tsk->cong_link_cnt = 1;
			rc = 0;
		}
		if (likely(!rc)) {
			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
			sent += send;
		}
	} while (sent < dlen && !rc);
1084

1085
	return rc ? rc : sent;
P
Per Liden 已提交
1086 1087
}

1088
/**
1089
 * tipc_send_packet - send a connection-oriented message
P
Per Liden 已提交
1090
 * @sock: socket structure
1091 1092
 * @m: message to send
 * @dsz: length of data to be transmitted
1093
 *
1094
 * Used for SOCK_SEQPACKET messages.
1095
 *
1096
 * Returns the number of bytes sent on success, or errno otherwise
P
Per Liden 已提交
1097
 */
1098
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1099
{
1100 1101
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
		return -EMSGSIZE;
P
Per Liden 已提交
1102

1103
	return tipc_sendstream(sock, m, dsz);
P
Per Liden 已提交
1104 1105
}

1106
/* tipc_sk_finish_conn - complete the setup of a connection
P
Per Liden 已提交
1107
 */
1108
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1109
				u32 peer_node)
P
Per Liden 已提交
1110
{
1111 1112
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1113
	struct tipc_msg *msg = &tsk->phdr;
P
Per Liden 已提交
1114

1115 1116 1117 1118 1119
	msg_set_destnode(msg, peer_node);
	msg_set_destport(msg, peer_port);
	msg_set_type(msg, TIPC_CONN_MSG);
	msg_set_lookup_scope(msg, 0);
	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1120

1121
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1122
	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1123 1124
	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1125
	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1126 1127 1128 1129 1130 1131
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
		return;

	/* Fall back to message based flow control */
	tsk->rcv_win = FLOWCTL_MSG_WIN;
	tsk->snd_win = FLOWCTL_MSG_WIN;
P
Per Liden 已提交
1132 1133 1134 1135 1136 1137
}

/**
 * set_orig_addr - capture sender's address for received message
 * @m: descriptor for message info
 * @msg: received message header
1138
 *
P
Per Liden 已提交
1139 1140
 * Note: Address is not captured if not requested by receiver.
 */
S
Sam Ravnborg 已提交
1141
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
P
Per Liden 已提交
1142
{
1143
	DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
P
Per Liden 已提交
1144

1145
	if (addr) {
P
Per Liden 已提交
1146 1147
		addr->family = AF_TIPC;
		addr->addrtype = TIPC_ADDR_ID;
1148
		memset(&addr->addr, 0, sizeof(addr->addr));
P
Per Liden 已提交
1149 1150
		addr->addr.id.ref = msg_origport(msg);
		addr->addr.id.node = msg_orignode(msg);
1151 1152
		addr->addr.name.domain = 0;	/* could leave uninitialized */
		addr->scope = 0;		/* could leave uninitialized */
P
Per Liden 已提交
1153 1154 1155 1156 1157
		m->msg_namelen = sizeof(struct sockaddr_tipc);
	}
}

/**
1158
 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
P
Per Liden 已提交
1159 1160
 * @m: descriptor for message info
 * @msg: received message header
1161
 * @tsk: TIPC port associated with message
1162
 *
P
Per Liden 已提交
1163
 * Note: Ancillary data is not captured if not requested by receiver.
1164
 *
P
Per Liden 已提交
1165 1166
 * Returns 0 if successful, otherwise errno
 */
1167 1168
static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				 struct tipc_sock *tsk)
P
Per Liden 已提交
1169 1170 1171 1172
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
1173
	int has_name;
P
Per Liden 已提交
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */
	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
1184 1185
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
P
Per Liden 已提交
1186
			return res;
1187 1188 1189 1190 1191 1192
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
P
Per Liden 已提交
1193 1194 1195 1196 1197 1198
	}

	/* Optionally capture message destination object */
	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
1199
		has_name = 1;
P
Per Liden 已提交
1200 1201 1202 1203 1204
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
1205
		has_name = 1;
P
Per Liden 已提交
1206 1207 1208 1209 1210
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
1211 1212 1213 1214
		has_name = (tsk->conn_type != 0);
		anc_data[0] = tsk->conn_type;
		anc_data[1] = tsk->conn_instance;
		anc_data[2] = tsk->conn_instance;
P
Per Liden 已提交
1215 1216
		break;
	default:
1217
		has_name = 0;
P
Per Liden 已提交
1218
	}
1219 1220 1221 1222 1223
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}
P
Per Liden 已提交
1224 1225 1226 1227

	return 0;
}

1228
static void tipc_sk_send_ack(struct tipc_sock *tsk)
1229
{
1230 1231
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1232
	struct sk_buff *skb = NULL;
1233
	struct tipc_msg *msg;
1234 1235
	u32 peer_port = tsk_peer_port(tsk);
	u32 dnode = tsk_peer_node(tsk);
1236

1237
	if (!tipc_sk_connected(sk))
1238
		return;
1239 1240 1241
	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
			      dnode, tsk_own_node(tsk), peer_port,
			      tsk->portid, TIPC_OK);
1242
	if (!skb)
1243
		return;
1244
	msg = buf_msg(skb);
1245 1246 1247 1248 1249 1250 1251 1252
	msg_set_conn_ack(msg, tsk->rcv_unacked);
	tsk->rcv_unacked = 0;

	/* Adjust to and advertize the correct window limit */
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
		msg_set_adv_win(msg, tsk->rcv_win);
	}
1253
	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1254 1255
}

1256
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Y
Ying Xue 已提交
1257 1258 1259
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
1260
	long timeo = *timeop;
Y
Ying Xue 已提交
1261 1262 1263 1264
	int err;

	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1265
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1266
			if (sk->sk_shutdown & RCV_SHUTDOWN) {
Y
Ying Xue 已提交
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
				err = -ENOTCONN;
				break;
			}
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1280 1281 1282
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
1283 1284
	}
	finish_wait(sk_sleep(sk), &wait);
1285
	*timeop = timeo;
Y
Ying Xue 已提交
1286 1287 1288
	return err;
}

1289
/**
1290
 * tipc_recvmsg - receive packet-oriented message
P
Per Liden 已提交
1291 1292 1293
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1294
 *
P
Per Liden 已提交
1295 1296 1297 1298 1299
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
1300 1301
static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
			int flags)
P
Per Liden 已提交
1302
{
1303
	struct sock *sk = sock->sk;
1304
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1305 1306
	struct sk_buff *buf;
	struct tipc_msg *msg;
1307
	bool is_connectionless = tipc_sk_type_connectionless(sk);
Y
Ying Xue 已提交
1308
	long timeo;
P
Per Liden 已提交
1309 1310
	unsigned int sz;
	u32 err;
1311
	int res, hlen;
P
Per Liden 已提交
1312

1313
	/* Catch invalid receive requests */
P
Per Liden 已提交
1314 1315 1316
	if (unlikely(!buf_len))
		return -EINVAL;

1317
	lock_sock(sk);
P
Per Liden 已提交
1318

1319
	if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) {
1320
		res = -ENOTCONN;
P
Per Liden 已提交
1321 1322 1323
		goto exit;
	}

Y
Ying Xue 已提交
1324
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1325
restart:
P
Per Liden 已提交
1326

1327
	/* Look for a message in receive queue; wait if necessary */
1328
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1329 1330
	if (res)
		goto exit;
P
Per Liden 已提交
1331

1332 1333
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1334 1335
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
1336
	hlen = msg_hdr_sz(msg);
P
Per Liden 已提交
1337 1338 1339 1340
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1341
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1342 1343 1344 1345 1346 1347 1348
		goto restart;
	}

	/* Capture sender's address (optional) */
	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */
1349
	res = tipc_sk_anc_data_recv(m, msg, tsk);
1350
	if (res)
P
Per Liden 已提交
1351 1352 1353 1354 1355 1356 1357 1358
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
1359
		res = skb_copy_datagram_msg(buf, hlen, m, sz);
1360
		if (res)
P
Per Liden 已提交
1361 1362 1363
			goto exit;
		res = sz;
	} else {
1364 1365
		if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
		    m->msg_control)
P
Per Liden 已提交
1366 1367 1368 1369 1370
			res = 0;
		else
			res = -ECONNRESET;
	}

1371 1372 1373
	if (unlikely(flags & MSG_PEEK))
		goto exit;

1374
	if (likely(!is_connectionless)) {
1375 1376 1377
		tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
		if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
			tipc_sk_send_ack(tsk);
1378
	}
1379
	tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1380
exit:
1381
	release_sock(sk);
P
Per Liden 已提交
1382 1383 1384
	return res;
}

1385
/**
1386
 * tipc_recv_stream - receive stream-oriented data
P
Per Liden 已提交
1387 1388 1389
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1390 1391
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
P
Per Liden 已提交
1392 1393 1394 1395
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
1396 1397
static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
			    size_t buf_len, int flags)
P
Per Liden 已提交
1398
{
1399
	struct sock *sk = sock->sk;
1400
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1401 1402
	struct sk_buff *buf;
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1403
	long timeo;
P
Per Liden 已提交
1404
	unsigned int sz;
1405
	int target;
P
Per Liden 已提交
1406 1407
	int sz_copied = 0;
	u32 err;
1408
	int res = 0, hlen;
P
Per Liden 已提交
1409

1410
	/* Catch invalid receive attempts */
P
Per Liden 已提交
1411 1412 1413
	if (unlikely(!buf_len))
		return -EINVAL;

1414
	lock_sock(sk);
P
Per Liden 已提交
1415

1416
	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1417
		res = -ENOTCONN;
P
Per Liden 已提交
1418 1419 1420
		goto exit;
	}

1421
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
Y
Ying Xue 已提交
1422
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1423

1424
restart:
1425
	/* Look for a message in receive queue; wait if necessary */
1426
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1427 1428
	if (res)
		goto exit;
P
Per Liden 已提交
1429

1430 1431
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1432 1433
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
1434
	hlen = msg_hdr_sz(msg);
P
Per Liden 已提交
1435 1436 1437 1438
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1439
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1440 1441 1442 1443 1444 1445
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */
	if (sz_copied == 0) {
		set_orig_addr(m, msg);
1446
		res = tipc_sk_anc_data_recv(m, msg, tsk);
1447
		if (res)
P
Per Liden 已提交
1448 1449 1450 1451 1452
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
1453 1454 1455
		u32 offset = TIPC_SKB_CB(buf)->bytes_read;
		u32 needed;
		int sz_to_copy;
P
Per Liden 已提交
1456

1457
		sz -= offset;
P
Per Liden 已提交
1458
		needed = (buf_len - sz_copied);
1459
		sz_to_copy = min(sz, needed);
1460

1461
		res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1462
		if (res)
P
Per Liden 已提交
1463
			goto exit;
1464

P
Per Liden 已提交
1465 1466 1467 1468
		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
1469 1470
				TIPC_SKB_CB(buf)->bytes_read =
					offset + sz_to_copy;
P
Per Liden 已提交
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

1483 1484 1485 1486 1487 1488 1489
	if (unlikely(flags & MSG_PEEK))
		goto exit;

	tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
	if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
		tipc_sk_send_ack(tsk);
	tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1490 1491

	/* Loop around if more data is required */
1492 1493
	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
1494
	    (sz_copied < target)) &&	/* and more is ready or required */
1495
	    (!err))			/* and haven't reached a FIN */
P
Per Liden 已提交
1496 1497 1498
		goto restart;

exit:
1499
	release_sock(sk);
1500
	return sz_copied ? sz_copied : res;
P
Per Liden 已提交
1501 1502
}

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
/**
 * tipc_write_space - wake up thread if port congestion is released
 * @sk: socket
 */
static void tipc_write_space(struct sock *sk)
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1513
	if (skwq_has_sleeper(wq))
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
						POLLWRNORM | POLLWRBAND);
	rcu_read_unlock();
}

/**
 * tipc_data_ready - wake up threads to indicate messages have been received
 * @sk: socket
 * @len: the length of messages
 */
1524
static void tipc_data_ready(struct sock *sk)
1525 1526 1527 1528 1529
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1530
	if (skwq_has_sleeper(wq))
1531 1532 1533 1534 1535
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
						POLLRDNORM | POLLRDBAND);
	rcu_read_unlock();
}

1536 1537 1538 1539 1540
static void tipc_sock_destruct(struct sock *sk)
{
	__skb_queue_purge(&sk->sk_receive_queue);
}

1541 1542
/**
 * filter_connect - Handle all incoming messages for a connection-based socket
1543
 * @tsk: TIPC socket
1544
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1545
 *
1546
 * Returns true if everything ok, false otherwise
1547
 */
1548
static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1549
{
1550
	struct sock *sk = &tsk->sk;
1551
	struct net *net = sock_net(sk);
1552
	struct tipc_msg *hdr = buf_msg(skb);
1553

1554 1555
	if (unlikely(msg_mcast(hdr)))
		return false;
1556

1557 1558
	switch (sk->sk_state) {
	case TIPC_CONNECTING:
1559 1560 1561
		/* Accept only ACK or NACK message */
		if (unlikely(!msg_connected(hdr)))
			return false;
1562

1563
		if (unlikely(msg_errcode(hdr))) {
1564
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1565
			sk->sk_err = ECONNREFUSED;
1566
			return true;
1567 1568
		}

1569
		if (unlikely(!msg_isdata(hdr))) {
1570
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1571
			sk->sk_err = EINVAL;
1572
			return true;
1573 1574
		}

1575 1576
		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
		msg_set_importance(&tsk->phdr, msg_importance(hdr));
1577

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
		/* If 'ACK+' message, add to socket receive queue */
		if (msg_data_sz(hdr))
			return true;

		/* If empty 'ACK-' message, wake up sleeping connect() */
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));

		/* 'ACK-' message is neither accepted nor rejected: */
		msg_set_dest_droppable(hdr, 1);
		return false;

1590
	case TIPC_OPEN:
1591
	case TIPC_DISCONNECTING:
1592 1593
		break;
	case TIPC_LISTEN:
1594
		/* Accept only SYN message */
1595 1596
		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
			return true;
1597
		break;
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	case TIPC_ESTABLISHED:
		/* Accept only connection-based messages sent by peer */
		if (unlikely(!tsk_peer_msg(tsk, hdr)))
			return false;

		if (unlikely(msg_errcode(hdr))) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			/* Let timer expire on it's own */
			tipc_node_remove_conn(net, tsk_peer_node(tsk),
					      tsk->portid);
			sk->sk_state_change(sk);
		}
		return true;
1611
	default:
1612
		pr_err("Unknown sk_state %u\n", sk->sk_state);
1613
	}
1614

1615
	return false;
1616 1617
}

1618 1619 1620
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
1621
 * @skb: message
1622
 *
1623 1624
 * For connection oriented messages, irrespective of importance,
 * default queue limit is 2 MB.
1625
 *
1626 1627
 * For connectionless messages, queue limits are based on message
 * importance as follows:
1628
 *
1629 1630 1631 1632
 * TIPC_LOW_IMPORTANCE       (2 MB)
 * TIPC_MEDIUM_IMPORTANCE    (4 MB)
 * TIPC_HIGH_IMPORTANCE      (8 MB)
 * TIPC_CRITICAL_IMPORTANCE  (16 MB)
1633 1634 1635
 *
 * Returns overload limit according to corresponding message importance
 */
1636
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1637
{
1638 1639 1640 1641 1642
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = buf_msg(skb);

	if (unlikely(!msg_connected(hdr)))
		return sk->sk_rcvbuf << msg_importance(hdr);
1643

1644 1645
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return sk->sk_rcvbuf;
1646

1647
	return FLOWCTL_MSG_LIM;
1648 1649
}

1650
/**
1651 1652
 * filter_rcv - validate incoming message
 * @sk: socket
1653
 * @skb: pointer to message.
1654
 *
1655 1656 1657
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
1658
 * Called with socket lock already taken
1659
 *
1660
 * Returns true if message was added to socket receive queue, otherwise false
P
Per Liden 已提交
1661
 */
J
Jon Paul Maloy 已提交
1662 1663
static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
		       struct sk_buff_head *xmitq)
P
Per Liden 已提交
1664
{
1665
	struct tipc_sock *tsk = tipc_sk(sk);
1666 1667 1668 1669
	struct tipc_msg *hdr = buf_msg(skb);
	unsigned int limit = rcvbuf_limit(sk, skb);
	int err = TIPC_OK;
	int usr = msg_user(hdr);
1670
	u32 onode;
P
Per Liden 已提交
1671

1672
	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
J
Jon Paul Maloy 已提交
1673
		tipc_sk_proto_rcv(tsk, skb, xmitq);
1674
		return false;
1675
	}
1676

1677
	if (unlikely(usr == SOCK_WAKEUP)) {
1678
		onode = msg_orignode(hdr);
1679
		kfree_skb(skb);
1680 1681
		u32_del(&tsk->cong_links, onode);
		tsk->cong_link_cnt--;
1682
		sk->sk_write_space(sk);
1683
		return false;
1684 1685
	}

1686 1687 1688 1689 1690
	/* Drop if illegal message type */
	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
		kfree_skb(skb);
		return false;
	}
1691

1692
	/* Reject if wrong message type for current socket state */
1693
	if (tipc_sk_type_connectionless(sk)) {
1694 1695 1696 1697 1698 1699 1700
		if (msg_connected(hdr)) {
			err = TIPC_ERR_NO_PORT;
			goto reject;
		}
	} else if (unlikely(!filter_connect(tsk, skb))) {
		err = TIPC_ERR_NO_PORT;
		goto reject;
P
Per Liden 已提交
1701 1702 1703
	}

	/* Reject message if there isn't room to queue it */
1704 1705 1706 1707
	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
		err = TIPC_ERR_OVERLOAD;
		goto reject;
	}
P
Per Liden 已提交
1708

1709
	/* Enqueue message */
1710
	TIPC_SKB_CB(skb)->bytes_read = 0;
1711 1712
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
1713

1714
	sk->sk_data_ready(sk);
1715 1716 1717
	return true;

reject:
J
Jon Paul Maloy 已提交
1718 1719
	if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
		__skb_queue_tail(xmitq, skb);
1720
	return false;
1721
}
P
Per Liden 已提交
1722

1723
/**
1724
 * tipc_backlog_rcv - handle incoming message from backlog queue
1725
 * @sk: socket
1726
 * @skb: message
1727
 *
1728
 * Caller must hold socket lock
1729 1730 1731
 *
 * Returns 0
 */
1732
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1733
{
1734
	unsigned int truesize = skb->truesize;
J
Jon Paul Maloy 已提交
1735 1736
	struct sk_buff_head xmitq;
	u32 dnode, selector;
1737

J
Jon Paul Maloy 已提交
1738 1739 1740
	__skb_queue_head_init(&xmitq);

	if (likely(filter_rcv(sk, skb, &xmitq))) {
1741
		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
J
Jon Paul Maloy 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
		return 0;
	}

	if (skb_queue_empty(&xmitq))
		return 0;

	/* Send response/rejected message */
	skb = __skb_dequeue(&xmitq);
	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1753 1754 1755
	return 0;
}

1756
/**
1757 1758 1759 1760 1761
 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
 *                   inputq and try adding them to socket or backlog queue
 * @inputq: list of incoming buffers with potentially different destinations
 * @sk: socket where the buffers should be enqueued
 * @dport: port number for the socket
1762 1763 1764
 *
 * Caller must hold socket lock
 */
1765
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
J
Jon Paul Maloy 已提交
1766
			    u32 dport, struct sk_buff_head *xmitq)
1767
{
J
Jon Paul Maloy 已提交
1768 1769
	unsigned long time_limit = jiffies + 2;
	struct sk_buff *skb;
1770 1771
	unsigned int lim;
	atomic_t *dcnt;
J
Jon Paul Maloy 已提交
1772
	u32 onode;
1773 1774

	while (skb_queue_len(inputq)) {
1775
		if (unlikely(time_after_eq(jiffies, time_limit)))
1776 1777
			return;

1778 1779
		skb = tipc_skb_dequeue(inputq, dport);
		if (unlikely(!skb))
1780 1781 1782
			return;

		/* Add message directly to receive queue if possible */
1783
		if (!sock_owned_by_user(sk)) {
J
Jon Paul Maloy 已提交
1784
			filter_rcv(sk, skb, xmitq);
1785
			continue;
1786
		}
1787 1788

		/* Try backlog, compensating for double-counted bytes */
1789
		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1790
		if (!sk->sk_backlog.len)
1791 1792 1793 1794
			atomic_set(dcnt, 0);
		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
		if (likely(!sk_add_backlog(sk, skb, lim)))
			continue;
1795 1796

		/* Overload => reject message back to sender */
J
Jon Paul Maloy 已提交
1797 1798 1799
		onode = tipc_own_addr(sock_net(sk));
		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
			__skb_queue_tail(xmitq, skb);
1800
		break;
1801
	}
1802 1803
}

1804
/**
1805 1806 1807 1808
 * tipc_sk_rcv - handle a chain of incoming buffers
 * @inputq: buffer list containing the buffers
 * Consumes all buffers in list until inputq is empty
 * Note: may be called in multiple threads referring to the same queue
1809
 */
1810
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1811
{
J
Jon Paul Maloy 已提交
1812
	struct sk_buff_head xmitq;
1813
	u32 dnode, dport = 0;
E
Erik Hugne 已提交
1814
	int err;
1815 1816
	struct tipc_sock *tsk;
	struct sock *sk;
1817
	struct sk_buff *skb;
1818

J
Jon Paul Maloy 已提交
1819
	__skb_queue_head_init(&xmitq);
1820 1821 1822
	while (skb_queue_len(inputq)) {
		dport = tipc_skb_peek_port(inputq, dport);
		tsk = tipc_sk_lookup(net, dport);
1823

1824 1825 1826
		if (likely(tsk)) {
			sk = &tsk->sk;
			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
J
Jon Paul Maloy 已提交
1827
				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1828 1829
				spin_unlock_bh(&sk->sk_lock.slock);
			}
J
Jon Paul Maloy 已提交
1830 1831 1832 1833 1834
			/* Send pending response/rejected messages, if any */
			while ((skb = __skb_dequeue(&xmitq))) {
				dnode = msg_destnode(buf_msg(skb));
				tipc_node_xmit_skb(net, skb, dnode, dport);
			}
1835 1836 1837
			sock_put(sk);
			continue;
		}
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850

		/* No destination socket => dequeue skb if still there */
		skb = tipc_skb_dequeue(inputq, dport);
		if (!skb)
			return;

		/* Try secondary lookup if unresolved named message */
		err = TIPC_ERR_NO_PORT;
		if (tipc_msg_lookup_dest(net, skb, &err))
			goto xmit;

		/* Prepare for message rejection */
		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1851
			continue;
1852
xmit:
1853
		dnode = msg_destnode(buf_msg(skb));
1854
		tipc_node_xmit_skb(net, skb, dnode, dport);
1855
	}
P
Per Liden 已提交
1856 1857
}

Y
Ying Xue 已提交
1858 1859
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
W
WANG Cong 已提交
1860
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
Y
Ying Xue 已提交
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
	struct sock *sk = sock->sk;
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (!*timeo_p)
			return -ETIMEDOUT;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

W
WANG Cong 已提交
1873
		add_wait_queue(sk_sleep(sk), &wait);
1874
		done = sk_wait_event(sk, timeo_p,
W
WANG Cong 已提交
1875 1876
				     sk->sk_state != TIPC_CONNECTING, &wait);
		remove_wait_queue(sk_sleep(sk), &wait);
Y
Ying Xue 已提交
1877 1878 1879 1880
	} while (!done);
	return 0;
}

P
Per Liden 已提交
1881
/**
1882
 * tipc_connect - establish a connection to another TIPC port
P
Per Liden 已提交
1883 1884 1885
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
1886
 * @flags: file-related flags associated with socket
P
Per Liden 已提交
1887 1888 1889
 *
 * Returns 0 on success, errno otherwise
 */
1890 1891
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
			int destlen, int flags)
P
Per Liden 已提交
1892
{
1893
	struct sock *sk = sock->sk;
1894
	struct tipc_sock *tsk = tipc_sk(sk);
1895 1896
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
1897
	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1898
	int previous;
1899
	int res = 0;
1900

1901 1902
	lock_sock(sk);

1903
	/* DGRAM/RDM connect(), just save the destaddr */
1904
	if (tipc_sk_type_connectionless(sk)) {
1905
		if (dst->family == AF_UNSPEC) {
1906
			memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
1907 1908
		} else if (destlen != sizeof(struct sockaddr_tipc)) {
			res = -EINVAL;
1909
		} else {
1910
			memcpy(&tsk->peer, dest, destlen);
1911
		}
1912 1913
		goto exit;
	}
1914 1915 1916 1917 1918 1919 1920

	/*
	 * Reject connection attempt using multicast address
	 *
	 * Note: send_msg() validates the rest of the address fields,
	 *       so there's no need to do it here
	 */
1921 1922 1923 1924 1925
	if (dst->addrtype == TIPC_ADDR_MCAST) {
		res = -EINVAL;
		goto exit;
	}

1926
	previous = sk->sk_state;
1927 1928 1929

	switch (sk->sk_state) {
	case TIPC_OPEN:
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
		/* Send a 'SYN-' to destination */
		m.msg_name = dest;
		m.msg_namelen = destlen;

		/* If connect is in non-blocking case, set MSG_DONTWAIT to
		 * indicate send_msg() is never blocked.
		 */
		if (!timeout)
			m.msg_flags = MSG_DONTWAIT;

1940
		res = __tipc_sendmsg(sock, &m, 0);
1941 1942 1943
		if ((res < 0) && (res != -EWOULDBLOCK))
			goto exit;

1944
		/* Just entered TIPC_CONNECTING state; the only
1945 1946 1947 1948
		 * difference is that return value in non-blocking
		 * case is EINPROGRESS, rather than EALREADY.
		 */
		res = -EINPROGRESS;
1949 1950 1951 1952 1953
		/* fall thru' */
	case TIPC_CONNECTING:
		if (!timeout) {
			if (previous == TIPC_CONNECTING)
				res = -EALREADY;
Y
Ying Xue 已提交
1954
			goto exit;
1955
		}
Y
Ying Xue 已提交
1956 1957 1958
		timeout = msecs_to_jiffies(timeout);
		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
		res = tipc_wait_for_connect(sock, &timeout);
1959 1960
		break;
	case TIPC_ESTABLISHED:
1961
		res = -EISCONN;
1962 1963
		break;
	default:
1964
		res = -EINVAL;
1965
	}
1966

1967 1968
exit:
	release_sock(sk);
1969
	return res;
P
Per Liden 已提交
1970 1971
}

1972
/**
1973
 * tipc_listen - allow socket to listen for incoming connections
P
Per Liden 已提交
1974 1975
 * @sock: socket structure
 * @len: (unused)
1976
 *
P
Per Liden 已提交
1977 1978
 * Returns 0 on success, errno otherwise
 */
1979
static int tipc_listen(struct socket *sock, int len)
P
Per Liden 已提交
1980
{
1981 1982 1983 1984
	struct sock *sk = sock->sk;
	int res;

	lock_sock(sk);
1985
	res = tipc_set_sk_state(sk, TIPC_LISTEN);
1986
	release_sock(sk);
1987

1988
	return res;
P
Per Liden 已提交
1989 1990
}

Y
Ying Xue 已提交
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int err;

	/* True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	*/
	for (;;) {
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
					  TASK_INTERRUPTIBLE);
2005
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
2016 2017 2018
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
2019 2020 2021 2022 2023
	}
	finish_wait(sk_sleep(sk), &wait);
	return err;
}

2024
/**
2025
 * tipc_accept - wait for connection request
P
Per Liden 已提交
2026 2027 2028
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
2029
 *
P
Per Liden 已提交
2030 2031
 * Returns 0 on success, errno otherwise
 */
2032
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
P
Per Liden 已提交
2033
{
2034
	struct sock *new_sk, *sk = sock->sk;
P
Per Liden 已提交
2035
	struct sk_buff *buf;
2036
	struct tipc_sock *new_tsock;
2037
	struct tipc_msg *msg;
Y
Ying Xue 已提交
2038
	long timeo;
2039
	int res;
P
Per Liden 已提交
2040

2041
	lock_sock(sk);
P
Per Liden 已提交
2042

2043
	if (sk->sk_state != TIPC_LISTEN) {
2044
		res = -EINVAL;
P
Per Liden 已提交
2045 2046
		goto exit;
	}
Y
Ying Xue 已提交
2047 2048 2049 2050
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	res = tipc_wait_for_accept(sock, timeo);
	if (res)
		goto exit;
2051 2052 2053

	buf = skb_peek(&sk->sk_receive_queue);

2054
	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
2055 2056
	if (res)
		goto exit;
2057
	security_sk_clone(sock->sk, new_sock->sk);
P
Per Liden 已提交
2058

2059
	new_sk = new_sock->sk;
2060
	new_tsock = tipc_sk(new_sk);
2061
	msg = buf_msg(buf);
P
Per Liden 已提交
2062

2063 2064 2065 2066 2067 2068 2069
	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
2070
	tsk_rej_rx_queue(new_sk);
2071 2072

	/* Connect new socket to it's peer */
2073
	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2074

2075
	tsk_set_importance(new_tsock, msg_importance(msg));
2076
	if (msg_named(msg)) {
2077 2078
		new_tsock->conn_type = msg_nametype(msg);
		new_tsock->conn_instance = msg_nameinst(msg);
P
Per Liden 已提交
2079
	}
2080 2081 2082 2083 2084 2085 2086 2087

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

2088
		tsk_advance_rx_queue(sk);
2089
		__tipc_sendstream(new_sock, &m, 0);
2090 2091 2092
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2093
		skb_set_owner_r(buf, new_sk);
2094 2095
	}
	release_sock(new_sk);
P
Per Liden 已提交
2096
exit:
2097
	release_sock(sk);
P
Per Liden 已提交
2098 2099 2100 2101
	return res;
}

/**
2102
 * tipc_shutdown - shutdown socket connection
P
Per Liden 已提交
2103
 * @sock: socket structure
2104
 * @how: direction to close (must be SHUT_RDWR)
P
Per Liden 已提交
2105 2106
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
2107
 *
P
Per Liden 已提交
2108 2109
 * Returns 0 on success, errno otherwise
 */
2110
static int tipc_shutdown(struct socket *sock, int how)
P
Per Liden 已提交
2111
{
2112
	struct sock *sk = sock->sk;
P
Per Liden 已提交
2113 2114
	int res;

2115 2116
	if (how != SHUT_RDWR)
		return -EINVAL;
P
Per Liden 已提交
2117

2118
	lock_sock(sk);
P
Per Liden 已提交
2119

2120 2121
	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
	sk->sk_shutdown = SEND_SHUTDOWN;
P
Per Liden 已提交
2122

2123
	if (sk->sk_state == TIPC_DISCONNECTING) {
2124
		/* Discard any unreceived messages */
2125
		__skb_queue_purge(&sk->sk_receive_queue);
2126 2127 2128

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
P
Per Liden 已提交
2129
		res = 0;
2130
	} else {
P
Per Liden 已提交
2131 2132 2133
		res = -ENOTCONN;
	}

2134
	release_sock(sk);
P
Per Liden 已提交
2135 2136 2137
	return res;
}

2138
static void tipc_sk_timeout(unsigned long data)
2139
{
2140 2141
	struct tipc_sock *tsk = (struct tipc_sock *)data;
	struct sock *sk = &tsk->sk;
2142
	struct sk_buff *skb = NULL;
2143
	u32 peer_port, peer_node;
2144
	u32 own_node = tsk_own_node(tsk);
2145

J
Jon Paul Maloy 已提交
2146
	bh_lock_sock(sk);
2147
	if (!tipc_sk_connected(sk)) {
J
Jon Paul Maloy 已提交
2148 2149
		bh_unlock_sock(sk);
		goto exit;
2150
	}
2151 2152
	peer_port = tsk_peer_port(tsk);
	peer_node = tsk_peer_node(tsk);
2153

2154
	if (tsk->probe_unacked) {
2155
		if (!sock_owned_by_user(sk)) {
2156
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2157 2158 2159 2160 2161 2162 2163 2164
			tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
					      tsk_peer_port(tsk));
			sk->sk_state_change(sk);
		} else {
			/* Try again later */
			sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
		}

2165 2166
		bh_unlock_sock(sk);
		goto exit;
2167
	}
2168 2169 2170 2171

	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
			      INT_H_SIZE, 0, peer_node, own_node,
			      peer_port, tsk->portid, TIPC_OK);
2172
	tsk->probe_unacked = true;
2173
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2174
	bh_unlock_sock(sk);
2175
	if (skb)
2176
		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
J
Jon Paul Maloy 已提交
2177
exit:
2178
	sock_put(sk);
2179 2180
}

2181
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2182 2183
			   struct tipc_name_seq const *seq)
{
2184 2185
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
J
Jon Paul Maloy 已提交
2186 2187 2188
	struct publication *publ;
	u32 key;

2189
	if (tipc_sk_connected(sk))
J
Jon Paul Maloy 已提交
2190
		return -EINVAL;
2191 2192
	key = tsk->portid + tsk->pub_count + 1;
	if (key == tsk->portid)
J
Jon Paul Maloy 已提交
2193 2194
		return -EADDRINUSE;

2195
	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2196
				    scope, tsk->portid, key);
J
Jon Paul Maloy 已提交
2197 2198 2199
	if (unlikely(!publ))
		return -EINVAL;

2200 2201 2202
	list_add(&publ->pport_list, &tsk->publications);
	tsk->pub_count++;
	tsk->published = 1;
J
Jon Paul Maloy 已提交
2203 2204 2205
	return 0;
}

2206
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2207 2208
			    struct tipc_name_seq const *seq)
{
2209
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2210 2211 2212 2213
	struct publication *publ;
	struct publication *safe;
	int rc = -EINVAL;

2214
	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
J
Jon Paul Maloy 已提交
2215 2216 2217 2218 2219 2220 2221 2222 2223
		if (seq) {
			if (publ->scope != scope)
				continue;
			if (publ->type != seq->type)
				continue;
			if (publ->lower != seq->lower)
				continue;
			if (publ->upper != seq->upper)
				break;
2224
			tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2225 2226 2227 2228
					      publ->ref, publ->key);
			rc = 0;
			break;
		}
2229
		tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2230 2231 2232
				      publ->ref, publ->key);
		rc = 0;
	}
2233 2234
	if (list_empty(&tsk->publications))
		tsk->published = 0;
J
Jon Paul Maloy 已提交
2235 2236 2237
	return rc;
}

2238 2239 2240
/* tipc_sk_reinit: set non-zero address in all existing sockets
 *                 when we go from standalone to network mode.
 */
2241
void tipc_sk_reinit(struct net *net)
2242
{
2243
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2244
	struct rhashtable_iter iter;
2245
	struct tipc_sock *tsk;
2246 2247
	struct tipc_msg *msg;

2248 2249 2250 2251 2252 2253 2254 2255
	rhashtable_walk_enter(&tn->sk_rht, &iter);

	do {
		tsk = ERR_PTR(rhashtable_walk_start(&iter));
		if (tsk)
			continue;

		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2256 2257
			spin_lock_bh(&tsk->sk.sk_lock.slock);
			msg = &tsk->phdr;
2258 2259
			msg_set_prevnode(msg, tn->own_addr);
			msg_set_orignode(msg, tn->own_addr);
2260 2261
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2262 2263 2264

		rhashtable_walk_stop(&iter);
	} while (tsk == ERR_PTR(-EAGAIN));
2265 2266
}

2267
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2268
{
2269
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2270
	struct tipc_sock *tsk;
2271

2272
	rcu_read_lock();
2273
	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2274 2275 2276
	if (tsk)
		sock_hold(&tsk->sk);
	rcu_read_unlock();
2277

2278
	return tsk;
2279 2280
}

2281
static int tipc_sk_insert(struct tipc_sock *tsk)
2282
{
2283 2284 2285
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2286 2287
	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2288

2289 2290 2291 2292 2293 2294
	while (remaining--) {
		portid++;
		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
			portid = TIPC_MIN_PORT;
		tsk->portid = portid;
		sock_hold(&tsk->sk);
2295 2296
		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
						   tsk_rht_params))
2297 2298
			return 0;
		sock_put(&tsk->sk);
2299 2300
	}

2301
	return -1;
2302 2303
}

2304
static void tipc_sk_remove(struct tipc_sock *tsk)
2305
{
2306
	struct sock *sk = &tsk->sk;
2307
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2308

2309
	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2310 2311
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
2312 2313 2314
	}
}

2315 2316 2317 2318 2319 2320 2321
static const struct rhashtable_params tsk_rht_params = {
	.nelem_hint = 192,
	.head_offset = offsetof(struct tipc_sock, node),
	.key_offset = offsetof(struct tipc_sock, portid),
	.key_len = sizeof(u32), /* portid */
	.max_size = 1048576,
	.min_size = 256,
2322
	.automatic_shrinking = true,
2323 2324
};

2325
int tipc_sk_rht_init(struct net *net)
2326
{
2327
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2328 2329

	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2330 2331
}

2332
void tipc_sk_rht_destroy(struct net *net)
2333
{
2334 2335
	struct tipc_net *tn = net_generic(net, tipc_net_id);

2336 2337
	/* Wait for socket readers to complete */
	synchronize_net();
2338

2339
	rhashtable_destroy(&tn->sk_rht);
2340 2341
}

P
Per Liden 已提交
2342
/**
2343
 * tipc_setsockopt - set socket option
P
Per Liden 已提交
2344 2345 2346 2347 2348
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: pointer to new option value
 * @ol: length of option value
2349 2350
 *
 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
P
Per Liden 已提交
2351
 * (to ease compatibility).
2352
 *
P
Per Liden 已提交
2353 2354
 * Returns 0 on success, errno otherwise
 */
2355 2356
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, unsigned int ol)
P
Per Liden 已提交
2357
{
2358
	struct sock *sk = sock->sk;
2359
	struct tipc_sock *tsk = tipc_sk(sk);
2360
	u32 value = 0;
2361
	int res = 0;
P
Per Liden 已提交
2362

2363 2364
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return 0;
P
Per Liden 已提交
2365 2366
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382

	switch (opt) {
	case TIPC_IMPORTANCE:
	case TIPC_SRC_DROPPABLE:
	case TIPC_DEST_DROPPABLE:
	case TIPC_CONN_TIMEOUT:
		if (ol < sizeof(value))
			return -EINVAL;
		res = get_user(value, (u32 __user *)ov);
		if (res)
			return res;
		break;
	default:
		if (ov || ol)
			return -EINVAL;
	}
P
Per Liden 已提交
2383

2384
	lock_sock(sk);
2385

P
Per Liden 已提交
2386 2387
	switch (opt) {
	case TIPC_IMPORTANCE:
2388
		res = tsk_set_importance(tsk, value);
P
Per Liden 已提交
2389 2390 2391
		break;
	case TIPC_SRC_DROPPABLE:
		if (sock->type != SOCK_STREAM)
2392
			tsk_set_unreliable(tsk, value);
2393
		else
P
Per Liden 已提交
2394 2395 2396
			res = -ENOPROTOOPT;
		break;
	case TIPC_DEST_DROPPABLE:
2397
		tsk_set_unreturnable(tsk, value);
P
Per Liden 已提交
2398 2399
		break;
	case TIPC_CONN_TIMEOUT:
2400
		tipc_sk(sk)->conn_timeout = value;
P
Per Liden 已提交
2401
		break;
2402 2403 2404 2405 2406 2407 2408 2409
	case TIPC_MCAST_BROADCAST:
		tsk->mc_method.rcast = false;
		tsk->mc_method.mandatory = true;
		break;
	case TIPC_MCAST_REPLICAST:
		tsk->mc_method.rcast = true;
		tsk->mc_method.mandatory = true;
		break;
P
Per Liden 已提交
2410 2411 2412 2413
	default:
		res = -EINVAL;
	}

2414 2415
	release_sock(sk);

P
Per Liden 已提交
2416 2417 2418 2419
	return res;
}

/**
2420
 * tipc_getsockopt - get socket option
P
Per Liden 已提交
2421 2422 2423 2424 2425
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: receptacle for option value
 * @ol: receptacle for length of option value
2426 2427
 *
 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
P
Per Liden 已提交
2428
 * (to ease compatibility).
2429
 *
P
Per Liden 已提交
2430 2431
 * Returns 0 on success, errno otherwise
 */
2432 2433
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, int __user *ol)
P
Per Liden 已提交
2434
{
2435
	struct sock *sk = sock->sk;
2436
	struct tipc_sock *tsk = tipc_sk(sk);
2437
	int len;
P
Per Liden 已提交
2438
	u32 value;
2439
	int res;
P
Per Liden 已提交
2440

2441 2442
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return put_user(0, ol);
P
Per Liden 已提交
2443 2444
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2445 2446
	res = get_user(len, ol);
	if (res)
2447
		return res;
P
Per Liden 已提交
2448

2449
	lock_sock(sk);
P
Per Liden 已提交
2450 2451 2452

	switch (opt) {
	case TIPC_IMPORTANCE:
2453
		value = tsk_importance(tsk);
P
Per Liden 已提交
2454 2455
		break;
	case TIPC_SRC_DROPPABLE:
2456
		value = tsk_unreliable(tsk);
P
Per Liden 已提交
2457 2458
		break;
	case TIPC_DEST_DROPPABLE:
2459
		value = tsk_unreturnable(tsk);
P
Per Liden 已提交
2460 2461
		break;
	case TIPC_CONN_TIMEOUT:
2462
		value = tsk->conn_timeout;
2463
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2464
		break;
2465
	case TIPC_NODE_RECVQ_DEPTH:
2466
		value = 0; /* was tipc_queue_size, now obsolete */
2467
		break;
2468
	case TIPC_SOCK_RECVQ_DEPTH:
2469 2470
		value = skb_queue_len(&sk->sk_receive_queue);
		break;
P
Per Liden 已提交
2471 2472 2473 2474
	default:
		res = -EINVAL;
	}

2475 2476
	release_sock(sk);

2477 2478
	if (res)
		return res;	/* "get" failed */
P
Per Liden 已提交
2479

2480 2481 2482 2483 2484 2485 2486
	if (len < sizeof(value))
		return -EINVAL;

	if (copy_to_user(ov, &value, sizeof(value)))
		return -EFAULT;

	return put_user(sizeof(value), ol);
P
Per Liden 已提交
2487 2488
}

2489
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
E
Erik Hugne 已提交
2490
{
2491
	struct sock *sk = sock->sk;
E
Erik Hugne 已提交
2492 2493 2494 2495 2496 2497 2498
	struct tipc_sioc_ln_req lnr;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case SIOCGETLINKNAME:
		if (copy_from_user(&lnr, argp, sizeof(lnr)))
			return -EFAULT;
2499 2500
		if (!tipc_node_get_linkname(sock_net(sk),
					    lnr.bearer_id & 0xffff, lnr.peer,
E
Erik Hugne 已提交
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
			if (copy_to_user(argp, &lnr, sizeof(lnr)))
				return -EFAULT;
			return 0;
		}
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}

2512 2513
/* Protocol switches for the various types of TIPC sockets */

2514
static const struct proto_ops msg_ops = {
2515
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2516
	.family		= AF_TIPC,
2517 2518 2519
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2520
	.socketpair	= sock_no_socketpair,
2521
	.accept		= sock_no_accept,
2522 2523
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2524
	.ioctl		= tipc_ioctl,
2525
	.listen		= sock_no_listen,
2526 2527 2528 2529 2530
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_sendmsg,
	.recvmsg	= tipc_recvmsg,
2531 2532
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2533 2534
};

2535
static const struct proto_ops packet_ops = {
2536
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2537
	.family		= AF_TIPC,
2538 2539 2540
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2541
	.socketpair	= sock_no_socketpair,
2542 2543 2544
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2545
	.ioctl		= tipc_ioctl,
2546 2547 2548 2549 2550 2551
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_packet,
	.recvmsg	= tipc_recvmsg,
2552 2553
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2554 2555
};

2556
static const struct proto_ops stream_ops = {
2557
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2558
	.family		= AF_TIPC,
2559 2560 2561
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2562
	.socketpair	= sock_no_socketpair,
2563 2564 2565
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2566
	.ioctl		= tipc_ioctl,
2567 2568 2569 2570
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
2571
	.sendmsg	= tipc_sendstream,
2572
	.recvmsg	= tipc_recv_stream,
2573 2574
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2575 2576
};

2577
static const struct net_proto_family tipc_family_ops = {
2578
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2579
	.family		= AF_TIPC,
2580
	.create		= tipc_sk_create
P
Per Liden 已提交
2581 2582 2583 2584 2585
};

static struct proto tipc_proto = {
	.name		= "TIPC",
	.owner		= THIS_MODULE,
2586 2587
	.obj_size	= sizeof(struct tipc_sock),
	.sysctl_rmem	= sysctl_tipc_rmem
P
Per Liden 已提交
2588 2589 2590
};

/**
2591
 * tipc_socket_init - initialize TIPC socket interface
2592
 *
P
Per Liden 已提交
2593 2594
 * Returns 0 on success, errno otherwise
 */
2595
int tipc_socket_init(void)
P
Per Liden 已提交
2596 2597 2598
{
	int res;

2599
	res = proto_register(&tipc_proto, 1);
P
Per Liden 已提交
2600
	if (res) {
2601
		pr_err("Failed to register TIPC protocol type\n");
P
Per Liden 已提交
2602 2603 2604 2605 2606
		goto out;
	}

	res = sock_register(&tipc_family_ops);
	if (res) {
2607
		pr_err("Failed to register TIPC socket type\n");
P
Per Liden 已提交
2608 2609 2610 2611 2612 2613 2614 2615
		proto_unregister(&tipc_proto);
		goto out;
	}
 out:
	return res;
}

/**
2616
 * tipc_socket_stop - stop TIPC socket interface
P
Per Liden 已提交
2617
 */
2618
void tipc_socket_stop(void)
P
Per Liden 已提交
2619 2620 2621 2622
{
	sock_unregister(tipc_family_ops.family);
	proto_unregister(&tipc_proto);
}
2623 2624

/* Caller should hold socket lock for the passed tipc socket. */
2625
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
{
	u32 peer_node;
	u32 peer_port;
	struct nlattr *nest;

	peer_node = tsk_peer_node(tsk);
	peer_port = tsk_peer_port(tsk);

	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);

	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
		goto msg_full;
	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
		goto msg_full;

	if (tsk->conn_type != 0) {
		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
			goto msg_full;
	}
	nla_nest_end(skb, nest);

	return 0;

msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2660 2661
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
			    struct tipc_sock *tsk)
2662 2663 2664 2665
{
	int err;
	void *hdr;
	struct nlattr *attrs;
2666 2667
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2668
	struct sock *sk = &tsk->sk;
2669 2670

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2671
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2672 2673 2674 2675 2676 2677
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
	if (!attrs)
		goto genlmsg_cancel;
2678
	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2679
		goto attr_msg_cancel;
2680
	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2681 2682
		goto attr_msg_cancel;

2683
	if (tipc_sk_connected(sk)) {
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
		err = __tipc_nl_add_sk_con(skb, tsk);
		if (err)
			goto attr_msg_cancel;
	} else if (!list_empty(&tsk->publications)) {
		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
			goto attr_msg_cancel;
	}
	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
	struct tipc_sock *tsk;
2708 2709
	const struct bucket_table *tbl;
	struct rhash_head *pos;
2710 2711
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2712 2713
	u32 tbl_id = cb->args[0];
	u32 prev_portid = cb->args[1];
2714

2715
	rcu_read_lock();
2716
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2717 2718
	for (; tbl_id < tbl->size; tbl_id++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2719
			spin_lock_bh(&tsk->sk.sk_lock.slock);
2720 2721 2722 2723 2724
			if (prev_portid && prev_portid != tsk->portid) {
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				continue;
			}

2725
			err = __tipc_nl_add_sk(skb, cb, tsk);
2726 2727 2728 2729 2730 2731
			if (err) {
				prev_portid = tsk->portid;
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				goto out;
			}
			prev_portid = 0;
2732 2733
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2734
	}
2735
out:
2736
	rcu_read_unlock();
2737 2738
	cb->args[0] = tbl_id;
	cb->args[1] = prev_portid;
2739 2740 2741

	return skb->len;
}
2742 2743

/* Caller should hold socket lock for the passed tipc socket. */
2744 2745 2746
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 struct publication *publ)
2747 2748 2749 2750 2751
{
	void *hdr;
	struct nlattr *attrs;

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2752
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
	if (!attrs)
		goto genlmsg_cancel;

	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
		goto attr_msg_cancel;

	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2783 2784 2785
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
				  struct netlink_callback *cb,
				  struct tipc_sock *tsk, u32 *last_publ)
2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
{
	int err;
	struct publication *p;

	if (*last_publ) {
		list_for_each_entry(p, &tsk->publications, pport_list) {
			if (p->key == *last_publ)
				break;
		}
		if (p->key != *last_publ) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			*last_publ = 0;
			return -EPIPE;
		}
	} else {
		p = list_first_entry(&tsk->publications, struct publication,
				     pport_list);
	}

	list_for_each_entry_from(p, &tsk->publications, pport_list) {
		err = __tipc_nl_add_sk_publ(skb, cb, p);
		if (err) {
			*last_publ = p->key;
			return err;
		}
	}
	*last_publ = 0;

	return 0;
}

int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
2826
	u32 tsk_portid = cb->args[0];
2827 2828
	u32 last_publ = cb->args[1];
	u32 done = cb->args[2];
2829
	struct net *net = sock_net(skb->sk);
2830 2831
	struct tipc_sock *tsk;

2832
	if (!tsk_portid) {
2833 2834 2835 2836 2837 2838 2839
		struct nlattr **attrs;
		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];

		err = tipc_nlmsg_parse(cb->nlh, &attrs);
		if (err)
			return err;

2840 2841 2842
		if (!attrs[TIPC_NLA_SOCK])
			return -EINVAL;

2843 2844 2845 2846 2847 2848 2849 2850 2851
		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
				       attrs[TIPC_NLA_SOCK],
				       tipc_nl_sock_policy);
		if (err)
			return err;

		if (!sock[TIPC_NLA_SOCK_REF])
			return -EINVAL;

2852
		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2853 2854 2855 2856 2857
	}

	if (done)
		return 0;

2858
	tsk = tipc_sk_lookup(net, tsk_portid);
2859 2860 2861 2862 2863 2864 2865 2866
	if (!tsk)
		return -EINVAL;

	lock_sock(&tsk->sk);
	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
	if (!err)
		done = 1;
	release_sock(&tsk->sk);
2867
	sock_put(&tsk->sk);
2868

2869
	cb->args[0] = tsk_portid;
2870 2871 2872 2873 2874
	cb->args[1] = last_publ;
	cb->args[2] = done;

	return skb->len;
}