socket.c 72.7 KB
Newer Older
P
Per Liden 已提交
1
/*
2
 * net/tipc/socket.c: TIPC socket API
3
 *
4
 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5
 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37
#include <linux/rhashtable.h>
38 39
#include <linux/sched/signal.h>

P
Per Liden 已提交
40
#include "core.h"
41
#include "name_table.h"
E
Erik Hugne 已提交
42
#include "node.h"
43
#include "link.h"
44
#include "name_distr.h"
45
#include "socket.h"
46
#include "bcast.h"
47
#include "netlink.h"
48

49
#define CONN_TIMEOUT_DEFAULT	8000	/* default connect timeout = 8s */
50
#define CONN_PROBING_INTERVAL	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
51 52 53
#define TIPC_FWD_MSG		1
#define TIPC_MAX_PORT		0xffffffff
#define TIPC_MIN_PORT		1
54

55 56
enum {
	TIPC_LISTEN = TCP_LISTEN,
57
	TIPC_ESTABLISHED = TCP_ESTABLISHED,
58
	TIPC_OPEN = TCP_CLOSE,
59
	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
60
	TIPC_CONNECTING = TCP_SYN_SENT,
61 62
};

63 64 65 66 67 68 69
/**
 * struct tipc_sock - TIPC socket structure
 * @sk: socket - interacts with 'port' and with user via the socket API
 * @conn_type: TIPC type used when connection was established
 * @conn_instance: TIPC instance used when connection was established
 * @published: non-zero if port has one or more associated names
 * @max_pkt: maximum packet size "hint" used when building messages sent by port
70
 * @portid: unique port identity in TIPC socket hash table
71
 * @phdr: preformatted message header used when sending messages
72
 * #cong_links: list of congested links
73
 * @publications: list of publications for port
74
 * @blocking_link: address of the congested link we are currently sleeping on
75 76 77 78
 * @pub_count: total # of publications port has made during its lifetime
 * @probing_state:
 * @conn_timeout: the time we can wait for an unresponded setup request
 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
79
 * @cong_link_cnt: number of congested links
80 81
 * @sent_unacked: # messages sent by socket, and not yet acked by peer
 * @rcv_unacked: # messages read by user, but not yet acked back to peer
82
 * @peer: 'connected' peer for dgram/rdm
83
 * @node: hash table node
84
 * @mc_method: cookie for use between socket and broadcast layer
85
 * @rcu: rcu struct for tipc_sock
86 87 88 89 90 91 92
 */
struct tipc_sock {
	struct sock sk;
	u32 conn_type;
	u32 conn_instance;
	int published;
	u32 max_pkt;
93
	u32 portid;
94
	struct tipc_msg phdr;
95
	struct list_head cong_links;
96 97 98 99
	struct list_head publications;
	u32 pub_count;
	uint conn_timeout;
	atomic_t dupl_rcvcnt;
100
	bool probe_unacked;
101
	u16 cong_link_cnt;
102 103
	u16 snt_unacked;
	u16 snd_win;
104
	u16 peer_caps;
105 106
	u16 rcv_unacked;
	u16 rcv_win;
107
	struct sockaddr_tipc peer;
108
	struct rhash_head node;
109
	struct tipc_mc_method mc_method;
110
	struct rcu_head rcu;
111
};
P
Per Liden 已提交
112

113
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
114
static void tipc_data_ready(struct sock *sk);
115
static void tipc_write_space(struct sock *sk);
116
static void tipc_sock_destruct(struct sock *sk);
117
static int tipc_release(struct socket *sock);
118 119
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
		       bool kern);
120
static void tipc_sk_timeout(unsigned long data);
121
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
122
			   struct tipc_name_seq const *seq);
123
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
124
			    struct tipc_name_seq const *seq);
125
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
126 127
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
128
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
129
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
P
Per Liden 已提交
130

131 132 133
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
P
Per Liden 已提交
134
static struct proto tipc_proto;
135 136
static const struct rhashtable_params tsk_rht_params;

137 138 139 140 141
static u32 tsk_own_node(struct tipc_sock *tsk)
{
	return msg_prevnode(&tsk->phdr);
}

142
static u32 tsk_peer_node(struct tipc_sock *tsk)
143
{
144
	return msg_destnode(&tsk->phdr);
145 146
}

147
static u32 tsk_peer_port(struct tipc_sock *tsk)
148
{
149
	return msg_destport(&tsk->phdr);
150 151
}

152
static  bool tsk_unreliable(struct tipc_sock *tsk)
153
{
154
	return msg_src_droppable(&tsk->phdr) != 0;
155 156
}

157
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
158
{
159
	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
160 161
}

162
static bool tsk_unreturnable(struct tipc_sock *tsk)
163
{
164
	return msg_dest_droppable(&tsk->phdr) != 0;
165 166
}

167
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
168
{
169
	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
170 171
}

172
static int tsk_importance(struct tipc_sock *tsk)
173
{
174
	return msg_importance(&tsk->phdr);
175 176
}

177
static int tsk_set_importance(struct tipc_sock *tsk, int imp)
178 179 180
{
	if (imp > TIPC_CRITICAL_IMPORTANCE)
		return -EINVAL;
181
	msg_set_importance(&tsk->phdr, (u32)imp);
182 183
	return 0;
}
184

185 186 187 188 189
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
	return container_of(sk, struct tipc_sock, sk);
}

190
static bool tsk_conn_cong(struct tipc_sock *tsk)
191
{
192
	return tsk->snt_unacked > tsk->snd_win;
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
}

/* tsk_blocks(): translate a buffer size in bytes to number of
 * advertisable blocks, taking into account the ratio truesize(len)/len
 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
 */
static u16 tsk_adv_blocks(int len)
{
	return len / FLOWCTL_BLK_SZ / 4;
}

/* tsk_inc(): increment counter for sent or received data
 * - If block based flow control is not supported by peer we
 *   fall back to message based ditto, incrementing the counter
 */
static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
{
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return ((msglen / FLOWCTL_BLK_SZ) + 1);
	return 1;
213 214
}

215
/**
216
 * tsk_advance_rx_queue - discard first buffer in socket receive queue
217 218
 *
 * Caller must hold socket lock
P
Per Liden 已提交
219
 */
220
static void tsk_advance_rx_queue(struct sock *sk)
P
Per Liden 已提交
221
{
222
	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
P
Per Liden 已提交
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/* tipc_sk_respond() : send response message back to sender
 */
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
	u32 selector;
	u32 dnode;
	u32 onode = tipc_own_addr(sock_net(sk));

	if (!tipc_msg_reverse(onode, &skb, err))
		return;

	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}

P
Per Liden 已提交
241
/**
242
 * tsk_rej_rx_queue - reject all buffers in socket receive queue
243 244
 *
 * Caller must hold socket lock
P
Per Liden 已提交
245
 */
246
static void tsk_rej_rx_queue(struct sock *sk)
P
Per Liden 已提交
247
{
248
	struct sk_buff *skb;
249

250 251
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
P
Per Liden 已提交
252 253
}

254 255
static bool tipc_sk_connected(struct sock *sk)
{
256
	return sk->sk_state == TIPC_ESTABLISHED;
257 258
}

259 260 261 262 263 264 265 266 267 268
/* tipc_sk_type_connectionless - check if the socket is datagram socket
 * @sk: socket
 *
 * Returns true if connection less, false otherwise
 */
static bool tipc_sk_type_connectionless(struct sock *sk)
{
	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
}

269
/* tsk_peer_msg - verify if message was sent by connected port's peer
J
Jon Paul Maloy 已提交
270 271 272 273
 *
 * Handles cases where the node's network address has changed from
 * the default of <0.0.0> to its configured setting.
 */
274
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
J
Jon Paul Maloy 已提交
275
{
276 277
	struct sock *sk = &tsk->sk;
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
278
	u32 peer_port = tsk_peer_port(tsk);
J
Jon Paul Maloy 已提交
279 280 281
	u32 orig_node;
	u32 peer_node;

282
	if (unlikely(!tipc_sk_connected(sk)))
J
Jon Paul Maloy 已提交
283 284 285 286 287 288
		return false;

	if (unlikely(msg_origport(msg) != peer_port))
		return false;

	orig_node = msg_orignode(msg);
289
	peer_node = tsk_peer_node(tsk);
J
Jon Paul Maloy 已提交
290 291 292 293

	if (likely(orig_node == peer_node))
		return true;

294
	if (!orig_node && (peer_node == tn->own_addr))
J
Jon Paul Maloy 已提交
295 296
		return true;

297
	if (!peer_node && (orig_node == tn->own_addr))
J
Jon Paul Maloy 已提交
298 299 300 301 302
		return true;

	return false;
}

303 304 305 306 307 308 309 310 311
/* tipc_set_sk_state - set the sk_state of the socket
 * @sk: socket
 *
 * Caller must hold socket lock
 *
 * Returns 0 on success, errno otherwise
 */
static int tipc_set_sk_state(struct sock *sk, int state)
{
312
	int oldsk_state = sk->sk_state;
313 314 315
	int res = -EINVAL;

	switch (state) {
316 317 318
	case TIPC_OPEN:
		res = 0;
		break;
319
	case TIPC_LISTEN:
320
	case TIPC_CONNECTING:
321
		if (oldsk_state == TIPC_OPEN)
322 323
			res = 0;
		break;
324
	case TIPC_ESTABLISHED:
325
		if (oldsk_state == TIPC_CONNECTING ||
326
		    oldsk_state == TIPC_OPEN)
327 328
			res = 0;
		break;
329
	case TIPC_DISCONNECTING:
330
		if (oldsk_state == TIPC_CONNECTING ||
331 332 333
		    oldsk_state == TIPC_ESTABLISHED)
			res = 0;
		break;
334 335 336 337 338 339 340 341
	}

	if (!res)
		sk->sk_state = state;

	return res;
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
static int tipc_sk_sock_err(struct socket *sock, long *timeout)
{
	struct sock *sk = sock->sk;
	int err = sock_error(sk);
	int typ = sock->type;

	if (err)
		return err;
	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
		if (sk->sk_state == TIPC_DISCONNECTING)
			return -EPIPE;
		else if (!tipc_sk_connected(sk))
			return -ENOTCONN;
	}
	if (!*timeout)
		return -EAGAIN;
	if (signal_pending(current))
		return sock_intr_errno(*timeout);

	return 0;
}

#define tipc_wait_for_cond(sock_, timeout_, condition_)			\
({								        \
	int rc_ = 0;							\
	int done_ = 0;							\
									\
	while (!(condition_) && !done_) {				\
		struct sock *sk_ = sock->sk;				\
		DEFINE_WAIT_FUNC(wait_, woken_wake_function);		\
									\
		rc_ = tipc_sk_sock_err(sock_, timeout_);		\
		if (rc_)						\
			break;						\
		prepare_to_wait(sk_sleep(sk_), &wait_,			\
				TASK_INTERRUPTIBLE);			\
		done_ = sk_wait_event(sk_, timeout_,			\
				      (condition_), &wait_);		\
		remove_wait_queue(sk_sleep(sk_), &wait_);		\
	}								\
	rc_;								\
})

P
Per Liden 已提交
385
/**
386
 * tipc_sk_create - create a TIPC socket
387
 * @net: network namespace (must be default network)
P
Per Liden 已提交
388 389
 * @sock: pre-allocated socket structure
 * @protocol: protocol indicator (must be 0)
390
 * @kern: caused by kernel or by userspace?
391
 *
392 393
 * This routine creates additional data structures used by the TIPC socket,
 * initializes them, and links them together.
P
Per Liden 已提交
394 395 396
 *
 * Returns 0 on success, errno otherwise
 */
397 398
static int tipc_sk_create(struct net *net, struct socket *sock,
			  int protocol, int kern)
P
Per Liden 已提交
399
{
400
	struct tipc_net *tn;
401
	const struct proto_ops *ops;
P
Per Liden 已提交
402
	struct sock *sk;
403
	struct tipc_sock *tsk;
404
	struct tipc_msg *msg;
405 406

	/* Validate arguments */
P
Per Liden 已提交
407 408 409 410 411
	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
412
		ops = &stream_ops;
P
Per Liden 已提交
413 414
		break;
	case SOCK_SEQPACKET:
415
		ops = &packet_ops;
P
Per Liden 已提交
416 417 418
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
419
		ops = &msg_ops;
P
Per Liden 已提交
420
		break;
421 422
	default:
		return -EPROTOTYPE;
P
Per Liden 已提交
423 424
	}

425
	/* Allocate socket's protocol area */
426
	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
427
	if (sk == NULL)
P
Per Liden 已提交
428 429
		return -ENOMEM;

430
	tsk = tipc_sk(sk);
431 432
	tsk->max_pkt = MAX_PKT_DEFAULT;
	INIT_LIST_HEAD(&tsk->publications);
433
	INIT_LIST_HEAD(&tsk->cong_links);
434
	msg = &tsk->phdr;
435
	tn = net_generic(sock_net(sk), tipc_net_id);
P
Per Liden 已提交
436

437 438 439
	/* Finish initializing socket data structures */
	sock->ops = ops;
	sock_init_data(sock, sk);
440
	tipc_set_sk_state(sk, TIPC_OPEN);
441
	if (tipc_sk_insert(tsk)) {
M
Masanari Iida 已提交
442
		pr_warn("Socket create failed; port number exhausted\n");
443 444
		return -EINVAL;
	}
445 446 447 448 449 450 451

	/* Ensure tsk is visible before we read own_addr. */
	smp_mb();

	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
		      NAMED_H_SIZE, 0);

452
	msg_set_origport(msg, tsk->portid);
453
	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
454
	sk->sk_shutdown = 0;
455
	sk->sk_backlog_rcv = tipc_backlog_rcv;
456
	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
457 458
	sk->sk_data_ready = tipc_data_ready;
	sk->sk_write_space = tipc_write_space;
459
	sk->sk_destruct = tipc_sock_destruct;
460 461
	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
	atomic_set(&tsk->dupl_rcvcnt, 0);
462

463 464 465 466
	/* Start out with safe limits until we receive an advertised window */
	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
	tsk->rcv_win = tsk->snd_win;

467
	if (tipc_sk_type_connectionless(sk)) {
468
		tsk_set_unreturnable(tsk, true);
469
		if (sock->type == SOCK_DGRAM)
470
			tsk_set_unreliable(tsk, true);
471
	}
472

P
Per Liden 已提交
473 474 475
	return 0;
}

476 477 478 479 480 481 482
static void tipc_sk_callback(struct rcu_head *head)
{
	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);

	sock_put(&tsk->sk);
}

483 484 485 486 487 488
/* Caller should hold socket lock for the socket. */
static void __tipc_shutdown(struct socket *sock, int error)
{
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
	struct net *net = sock_net(sk);
489
	long timeout = CONN_TIMEOUT_DEFAULT;
490 491 492
	u32 dnode = tsk_peer_node(tsk);
	struct sk_buff *skb;

493 494 495 496
	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
					    !tsk_conn_cong(tsk)));

497 498 499 500 501 502
	/* Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer).
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		if (TIPC_SKB_CB(skb)->bytes_read) {
			kfree_skb(skb);
503
			continue;
504
		}
505 506 507 508 509 510
		if (!tipc_sk_type_connectionless(sk) &&
		    sk->sk_state != TIPC_DISCONNECTING) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			tipc_node_remove_conn(net, dnode, tsk->portid);
		}
		tipc_sk_respond(sk, skb, error);
511
	}
512 513 514 515

	if (tipc_sk_type_connectionless(sk))
		return;

516 517 518 519 520 521 522
	if (sk->sk_state != TIPC_DISCONNECTING) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
				      tsk_own_node(tsk), tsk_peer_port(tsk),
				      tsk->portid, error);
		if (skb)
			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
523 524
		tipc_node_remove_conn(net, dnode, tsk->portid);
		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
525 526 527
	}
}

P
Per Liden 已提交
528
/**
529
 * tipc_release - destroy a TIPC socket
P
Per Liden 已提交
530 531 532 533 534 535 536
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
537
 *
P
Per Liden 已提交
538 539 540 541 542 543
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
544
static int tipc_release(struct socket *sock)
P
Per Liden 已提交
545 546
{
	struct sock *sk = sock->sk;
547
	struct tipc_sock *tsk;
P
Per Liden 已提交
548

549 550 551 552 553
	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
P
Per Liden 已提交
554
		return 0;
555

556
	tsk = tipc_sk(sk);
557 558
	lock_sock(sk);

559 560
	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
	sk->sk_shutdown = SHUTDOWN_MASK;
561
	tipc_sk_withdraw(tsk, 0, NULL);
562
	sk_stop_timer(sk, &sk->sk_timer);
563
	tipc_sk_remove(tsk);
P
Per Liden 已提交
564

565 566
	/* Reject any messages that accumulated in backlog queue */
	release_sock(sk);
567 568
	u32_list_purge(&tsk->cong_links);
	tsk->cong_link_cnt = 0;
569
	call_rcu(&tsk->rcu, tipc_sk_callback);
570
	sock->sk = NULL;
P
Per Liden 已提交
571

572
	return 0;
P
Per Liden 已提交
573 574 575
}

/**
576
 * tipc_bind - associate or disassocate TIPC name(s) with a socket
P
Per Liden 已提交
577 578 579
 * @sock: socket structure
 * @uaddr: socket address describing name(s) and desired operation
 * @uaddr_len: size of socket address data structure
580
 *
P
Per Liden 已提交
581 582 583
 * Name and name sequence binding is indicated using a positive scope value;
 * a negative scope value unbinds the specified name.  Specifying no name
 * (i.e. a socket address length of 0) unbinds all names from the socket.
584
 *
P
Per Liden 已提交
585
 * Returns 0 on success, errno otherwise
586 587 588
 *
 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 *       access any non-constant socket information.
P
Per Liden 已提交
589
 */
590 591
static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
		     int uaddr_len)
P
Per Liden 已提交
592
{
593
	struct sock *sk = sock->sk;
P
Per Liden 已提交
594
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
595
	struct tipc_sock *tsk = tipc_sk(sk);
596
	int res = -EINVAL;
P
Per Liden 已提交
597

598 599
	lock_sock(sk);
	if (unlikely(!uaddr_len)) {
600
		res = tipc_sk_withdraw(tsk, 0, NULL);
601 602
		goto exit;
	}
603

604 605 606 607 608 609 610 611
	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
		res = -EINVAL;
		goto exit;
	}
	if (addr->family != AF_TIPC) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
P
Per Liden 已提交
612 613 614

	if (addr->addrtype == TIPC_ADDR_NAME)
		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
615 616 617 618
	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
619

620
	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
621
	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
622 623 624 625
	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
		res = -EACCES;
		goto exit;
	}
626

627
	res = (addr->scope > 0) ?
628 629
		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
630 631 632
exit:
	release_sock(sk);
	return res;
P
Per Liden 已提交
633 634
}

635
/**
636
 * tipc_getname - get port ID of socket or peer socket
P
Per Liden 已提交
637 638 639
 * @sock: socket structure
 * @uaddr: area for returned socket address
 * @uaddr_len: area for returned length of socket address
640
 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
641
 *
P
Per Liden 已提交
642
 * Returns 0 on success, errno otherwise
643
 *
644 645
 * NOTE: This routine doesn't need to take the socket lock since it only
 *       accesses socket information that is unchanging (or which changes in
646
 *       a completely predictable manner).
P
Per Liden 已提交
647
 */
648 649
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
			int *uaddr_len, int peer)
P
Per Liden 已提交
650 651
{
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
652 653
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
654
	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
P
Per Liden 已提交
655

656
	memset(addr, 0, sizeof(*addr));
657
	if (peer) {
658
		if ((!tipc_sk_connected(sk)) &&
659
		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
660
			return -ENOTCONN;
661 662
		addr->addr.id.ref = tsk_peer_port(tsk);
		addr->addr.id.node = tsk_peer_node(tsk);
663
	} else {
664
		addr->addr.id.ref = tsk->portid;
665
		addr->addr.id.node = tn->own_addr;
666
	}
P
Per Liden 已提交
667 668 669 670 671 672 673

	*uaddr_len = sizeof(*addr);
	addr->addrtype = TIPC_ADDR_ID;
	addr->family = AF_TIPC;
	addr->scope = 0;
	addr->addr.name.domain = 0;

674
	return 0;
P
Per Liden 已提交
675 676 677
}

/**
678
 * tipc_poll - read and possibly block on pollmask
P
Per Liden 已提交
679 680 681 682
 * @file: file structure associated with the socket
 * @sock: socket for which to calculate the poll bits
 * @wait: ???
 *
683 684 685 686 687 688 689 690
 * Returns pollmask value
 *
 * COMMENTARY:
 * It appears that the usual socket locking mechanisms are not useful here
 * since the pollmask info is potentially out-of-date the moment this routine
 * exits.  TCP and other protocols seem to rely on higher level poll routines
 * to handle any preventable race conditions, so TIPC will do the same ...
 *
691 692 693
 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 * imply that the operation will succeed, merely that it should be performed
 * and will not block.
P
Per Liden 已提交
694
 */
695 696
static unsigned int tipc_poll(struct file *file, struct socket *sock,
			      poll_table *wait)
P
Per Liden 已提交
697
{
698
	struct sock *sk = sock->sk;
699
	struct tipc_sock *tsk = tipc_sk(sk);
700
	u32 mask = 0;
701

702
	sock_poll_wait(file, sk_sleep(sk), wait);
703

704 705 706 707 708
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
	if (sk->sk_shutdown == SHUTDOWN_MASK)
		mask |= POLLHUP;

709 710
	switch (sk->sk_state) {
	case TIPC_ESTABLISHED:
711
		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
712
			mask |= POLLOUT;
713 714 715
		/* fall thru' */
	case TIPC_LISTEN:
	case TIPC_CONNECTING:
716 717
		if (!skb_queue_empty(&sk->sk_receive_queue))
			mask |= (POLLIN | POLLRDNORM);
718 719
		break;
	case TIPC_OPEN:
720
		if (!tsk->cong_link_cnt)
721 722 723 724 725 726 727 728
			mask |= POLLOUT;
		if (tipc_sk_type_connectionless(sk) &&
		    (!skb_queue_empty(&sk->sk_receive_queue)))
			mask |= (POLLIN | POLLRDNORM);
		break;
	case TIPC_DISCONNECTING:
		mask = (POLLIN | POLLRDNORM | POLLHUP);
		break;
729
	}
730 731

	return mask;
P
Per Liden 已提交
732 733
}

734 735 736 737
/**
 * tipc_sendmcast - send multicast message
 * @sock: socket structure
 * @seq: destination address
738
 * @msg: message to send
739 740
 * @dlen: length of data to send
 * @timeout: timeout to wait for wakeup
741 742 743 744 745
 *
 * Called from function tipc_sendmsg(), which has done all sanity checks
 * Returns the number of bytes sent on success, or errno
 */
static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
746
			  struct msghdr *msg, size_t dlen, long timeout)
747 748
{
	struct sock *sk = sock->sk;
749
	struct tipc_sock *tsk = tipc_sk(sk);
750
	struct tipc_msg *hdr = &tsk->phdr;
751
	struct net *net = sock_net(sk);
752
	int mtu = tipc_bcast_get_mtu(net);
753
	struct tipc_mc_method *method = &tsk->mc_method;
754
	u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
755
	struct sk_buff_head pkts;
756
	struct tipc_nlist dsts;
757 758
	int rc;

759
	/* Block or return if any destination link is congested */
760 761 762
	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
	if (unlikely(rc))
		return rc;
763

764 765 766 767 768 769 770 771
	/* Lookup destination nodes */
	tipc_nlist_init(&dsts, tipc_own_addr(net));
	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
				      seq->upper, domain, &dsts);
	if (!dsts.local && !dsts.remote)
		return -EHOSTUNREACH;

	/* Build message header */
772
	msg_set_type(hdr, TIPC_MCAST_MSG);
773
	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
774 775 776 777 778 779 780
	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
	msg_set_destport(hdr, 0);
	msg_set_destnode(hdr, 0);
	msg_set_nametype(hdr, seq->type);
	msg_set_namelower(hdr, seq->lower);
	msg_set_nameupper(hdr, seq->upper);

781
	/* Build message as chain of buffers */
782 783
	skb_queue_head_init(&pkts);
	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
784

785 786
	/* Send message if build was successful */
	if (unlikely(rc == dlen))
787
		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
788 789 790
				     &tsk->cong_link_cnt);

	tipc_nlist_purge(&dsts);
791 792

	return rc ? rc : dlen;
793 794
}

795 796 797 798 799 800
/**
 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 * @arrvq: queue with arriving messages, to be cloned after destination lookup
 * @inputq: queue with cloned messages, delivered to socket after dest lookup
 *
 * Multi-threaded: parallel calls with reference to same queues may occur
801
 */
802 803
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
		       struct sk_buff_head *inputq)
804
{
805
	struct tipc_msg *msg;
806
	struct list_head dports;
807
	u32 portid;
808
	u32 scope = TIPC_CLUSTER_SCOPE;
809 810 811
	struct sk_buff_head tmpq;
	uint hsz;
	struct sk_buff *skb, *_skb;
812

813
	__skb_queue_head_init(&tmpq);
814
	INIT_LIST_HEAD(&dports);
815

816 817 818 819 820 821 822 823 824 825 826 827
	skb = tipc_skb_peek(arrvq, &inputq->lock);
	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
		msg = buf_msg(skb);
		hsz = skb_headroom(skb) + msg_hdr_sz(msg);

		if (in_own_node(net, msg_orignode(msg)))
			scope = TIPC_NODE_SCOPE;

		/* Create destination port list and message clones: */
		tipc_nametbl_mc_translate(net,
					  msg_nametype(msg), msg_namelower(msg),
					  msg_nameupper(msg), scope, &dports);
828 829
		portid = u32_pop(&dports);
		for (; portid; portid = u32_pop(&dports)) {
830 831 832 833 834 835 836
			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
			if (_skb) {
				msg_set_destport(buf_msg(_skb), portid);
				__skb_queue_tail(&tmpq, _skb);
				continue;
			}
			pr_warn("Failed to clone mcast rcv buffer\n");
837
		}
838 839 840 841 842 843 844 845 846
		/* Append to inputq if not already done by other thread */
		spin_lock_bh(&inputq->lock);
		if (skb_peek(arrvq) == skb) {
			skb_queue_splice_tail_init(&tmpq, inputq);
			kfree_skb(__skb_dequeue(arrvq));
		}
		spin_unlock_bh(&inputq->lock);
		__skb_queue_purge(&tmpq);
		kfree_skb(skb);
847
	}
848
	tipc_sk_rcv(net, inputq);
849 850
}

851 852 853
/**
 * tipc_sk_proto_rcv - receive a connection mng protocol message
 * @tsk: receiving socket
854
 * @skb: pointer to message buffer.
855
 */
J
Jon Paul Maloy 已提交
856 857
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
			      struct sk_buff_head *xmitq)
858
{
859
	struct sock *sk = &tsk->sk;
J
Jon Paul Maloy 已提交
860
	u32 onode = tsk_own_node(tsk);
861 862
	struct tipc_msg *hdr = buf_msg(skb);
	int mtyp = msg_type(hdr);
863
	bool conn_cong;
864

865
	/* Ignore if connection cannot be validated: */
866
	if (!tsk_peer_msg(tsk, hdr))
867 868
		goto exit;

869
	tsk->probe_unacked = false;
870

871 872
	if (mtyp == CONN_PROBE) {
		msg_set_type(hdr, CONN_PROBE_REPLY);
J
Jon Paul Maloy 已提交
873 874
		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
			__skb_queue_tail(xmitq, skb);
875 876
		return;
	} else if (mtyp == CONN_ACK) {
877
		conn_cong = tsk_conn_cong(tsk);
878 879 880
		tsk->snt_unacked -= msg_conn_ack(hdr);
		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
			tsk->snd_win = msg_adv_win(hdr);
881
		if (conn_cong)
882 883 884
			sk->sk_write_space(sk);
	} else if (mtyp != CONN_PROBE_REPLY) {
		pr_warn("Received unknown CONN_PROTO msg\n");
885 886
	}
exit:
887
	kfree_skb(skb);
888 889
}

P
Per Liden 已提交
890
/**
891
 * tipc_sendmsg - send message in connectionless manner
P
Per Liden 已提交
892 893
 * @sock: socket structure
 * @m: message to send
894
 * @dsz: amount of user data to be sent
895
 *
P
Per Liden 已提交
896
 * Message must have an destination specified explicitly.
897
 * Used for SOCK_RDM and SOCK_DGRAM messages,
P
Per Liden 已提交
898 899
 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
900
 *
P
Per Liden 已提交
901 902
 * Returns the number of bytes sent on success, or errno otherwise
 */
903
static int tipc_sendmsg(struct socket *sock,
904
			struct msghdr *m, size_t dsz)
905 906 907 908 909 910 911 912 913 914 915
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_sendmsg(sock, m, dsz);
	release_sock(sk);

	return ret;
}

916
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
917
{
918
	struct sock *sk = sock->sk;
919
	struct net *net = sock_net(sk);
920 921 922 923 924 925
	struct tipc_sock *tsk = tipc_sk(sk);
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct list_head *clinks = &tsk->cong_links;
	bool syn = !tipc_sk_type_connectionless(sk);
	struct tipc_msg *hdr = &tsk->phdr;
926
	struct tipc_name_seq *seq;
927 928 929 930
	struct sk_buff_head pkts;
	u32 type, inst, domain;
	u32 dnode, dport;
	int mtu, rc;
P
Per Liden 已提交
931

932
	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
933
		return -EMSGSIZE;
934

935
	if (unlikely(!dest)) {
936 937
		dest = &tsk->peer;
		if (!syn || dest->family != AF_TIPC)
938 939
			return -EDESTADDRREQ;
	}
940 941 942 943 944 945 946 947

	if (unlikely(m->msg_namelen < sizeof(*dest)))
		return -EINVAL;

	if (unlikely(dest->family != AF_TIPC))
		return -EINVAL;

	if (unlikely(syn)) {
948
		if (sk->sk_state == TIPC_LISTEN)
949
			return -EPIPE;
950
		if (sk->sk_state != TIPC_OPEN)
951 952 953
			return -EISCONN;
		if (tsk->published)
			return -EOPNOTSUPP;
954
		if (dest->addrtype == TIPC_ADDR_NAME) {
955 956
			tsk->conn_type = dest->addr.name.name.type;
			tsk->conn_instance = dest->addr.name.name.instance;
957
		}
P
Per Liden 已提交
958
	}
959

960 961 962
	seq = &dest->addr.nameseq;
	if (dest->addrtype == TIPC_ADDR_MCAST)
		return tipc_sendmcast(sock, seq, m, dlen, timeout);
963

964 965 966 967
	if (dest->addrtype == TIPC_ADDR_NAME) {
		type = dest->addr.name.name.type;
		inst = dest->addr.name.name.instance;
		domain = dest->addr.name.domain;
968
		dnode = domain;
969 970 971 972 973
		msg_set_type(hdr, TIPC_NAMED_MSG);
		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
		msg_set_nametype(hdr, type);
		msg_set_nameinst(hdr, inst);
		msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
974
		dport = tipc_nametbl_translate(net, type, inst, &dnode);
975 976
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dport);
977 978
		if (unlikely(!dport && !dnode))
			return -EHOSTUNREACH;
979

980 981
	} else if (dest->addrtype == TIPC_ADDR_ID) {
		dnode = dest->addr.id.node;
982 983 984 985 986
		msg_set_type(hdr, TIPC_DIRECT_MSG);
		msg_set_lookup_scope(hdr, 0);
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dest->addr.id.ref);
		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
987 988
	}

989 990 991 992 993 994
	/* Block or return if destination link is congested */
	rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
	if (unlikely(rc))
		return rc;

	skb_queue_head_init(&pkts);
995
	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
996 997
	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
	if (unlikely(rc != dlen))
998
		return rc;
999

1000 1001 1002 1003 1004 1005
	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
	if (unlikely(rc == -ELINKCONG)) {
		u32_push(clinks, dnode);
		tsk->cong_link_cnt++;
		rc = 0;
	}
1006

1007 1008 1009 1010
	if (unlikely(syn && !rc))
		tipc_set_sk_state(sk, TIPC_CONNECTING);

	return rc ? rc : dlen;
P
Per Liden 已提交
1011 1012
}

1013
/**
1014
 * tipc_sendstream - send stream-oriented data
P
Per Liden 已提交
1015
 * @sock: socket structure
1016 1017
 * @m: data to send
 * @dsz: total length of data to be transmitted
1018
 *
1019
 * Used for SOCK_STREAM data.
1020
 *
1021 1022
 * Returns the number of bytes sent on success (or partial success),
 * or errno if no data sent
P
Per Liden 已提交
1023
 */
1024
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1025 1026 1027 1028 1029
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
1030
	ret = __tipc_sendstream(sock, m, dsz);
1031 1032 1033 1034 1035
	release_sock(sk);

	return ret;
}

1036
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
1037
{
1038
	struct sock *sk = sock->sk;
1039
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1040 1041 1042 1043 1044 1045 1046 1047
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = &tsk->phdr;
	struct net *net = sock_net(sk);
	struct sk_buff_head pkts;
	u32 dnode = tsk_peer_node(tsk);
	int send, sent = 0;
	int rc = 0;
1048

1049
	skb_queue_head_init(&pkts);
1050

1051 1052
	if (unlikely(dlen > INT_MAX))
		return -EMSGSIZE;
1053

1054 1055 1056 1057 1058
	/* Handle implicit connection setup */
	if (unlikely(dest)) {
		rc = __tipc_sendmsg(sock, m, dlen);
		if (dlen && (dlen == rc))
			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1059
		return rc;
1060
	}
1061

1062
	do {
1063 1064
		rc = tipc_wait_for_cond(sock, &timeout,
					(!tsk->cong_link_cnt &&
1065 1066
					 !tsk_conn_cong(tsk) &&
					 tipc_sk_connected(sk)));
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		if (unlikely(rc))
			break;

		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
		if (unlikely(rc != send))
			break;

		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
		if (unlikely(rc == -ELINKCONG)) {
			tsk->cong_link_cnt = 1;
			rc = 0;
		}
		if (likely(!rc)) {
			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
			sent += send;
		}
	} while (sent < dlen && !rc);
1085

1086
	return rc ? rc : sent;
P
Per Liden 已提交
1087 1088
}

1089
/**
1090
 * tipc_send_packet - send a connection-oriented message
P
Per Liden 已提交
1091
 * @sock: socket structure
1092 1093
 * @m: message to send
 * @dsz: length of data to be transmitted
1094
 *
1095
 * Used for SOCK_SEQPACKET messages.
1096
 *
1097
 * Returns the number of bytes sent on success, or errno otherwise
P
Per Liden 已提交
1098
 */
1099
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1100
{
1101 1102
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
		return -EMSGSIZE;
P
Per Liden 已提交
1103

1104
	return tipc_sendstream(sock, m, dsz);
P
Per Liden 已提交
1105 1106
}

1107
/* tipc_sk_finish_conn - complete the setup of a connection
P
Per Liden 已提交
1108
 */
1109
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1110
				u32 peer_node)
P
Per Liden 已提交
1111
{
1112 1113
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1114
	struct tipc_msg *msg = &tsk->phdr;
P
Per Liden 已提交
1115

1116 1117 1118 1119 1120
	msg_set_destnode(msg, peer_node);
	msg_set_destport(msg, peer_port);
	msg_set_type(msg, TIPC_CONN_MSG);
	msg_set_lookup_scope(msg, 0);
	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1121

1122
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1123
	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1124 1125
	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1126
	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1127 1128 1129 1130 1131 1132
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
		return;

	/* Fall back to message based flow control */
	tsk->rcv_win = FLOWCTL_MSG_WIN;
	tsk->snd_win = FLOWCTL_MSG_WIN;
P
Per Liden 已提交
1133 1134 1135 1136 1137 1138
}

/**
 * set_orig_addr - capture sender's address for received message
 * @m: descriptor for message info
 * @msg: received message header
1139
 *
P
Per Liden 已提交
1140 1141
 * Note: Address is not captured if not requested by receiver.
 */
S
Sam Ravnborg 已提交
1142
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
P
Per Liden 已提交
1143
{
1144
	DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
P
Per Liden 已提交
1145

1146
	if (addr) {
P
Per Liden 已提交
1147 1148
		addr->family = AF_TIPC;
		addr->addrtype = TIPC_ADDR_ID;
1149
		memset(&addr->addr, 0, sizeof(addr->addr));
P
Per Liden 已提交
1150 1151
		addr->addr.id.ref = msg_origport(msg);
		addr->addr.id.node = msg_orignode(msg);
1152 1153
		addr->addr.name.domain = 0;	/* could leave uninitialized */
		addr->scope = 0;		/* could leave uninitialized */
P
Per Liden 已提交
1154 1155 1156 1157 1158
		m->msg_namelen = sizeof(struct sockaddr_tipc);
	}
}

/**
1159
 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
P
Per Liden 已提交
1160 1161
 * @m: descriptor for message info
 * @msg: received message header
1162
 * @tsk: TIPC port associated with message
1163
 *
P
Per Liden 已提交
1164
 * Note: Ancillary data is not captured if not requested by receiver.
1165
 *
P
Per Liden 已提交
1166 1167
 * Returns 0 if successful, otherwise errno
 */
1168 1169
static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				 struct tipc_sock *tsk)
P
Per Liden 已提交
1170 1171 1172 1173
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
1174
	int has_name;
P
Per Liden 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */
	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
1185 1186
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
P
Per Liden 已提交
1187
			return res;
1188 1189 1190 1191 1192 1193
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
P
Per Liden 已提交
1194 1195 1196 1197 1198 1199
	}

	/* Optionally capture message destination object */
	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
1200
		has_name = 1;
P
Per Liden 已提交
1201 1202 1203 1204 1205
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
1206
		has_name = 1;
P
Per Liden 已提交
1207 1208 1209 1210 1211
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
1212 1213 1214 1215
		has_name = (tsk->conn_type != 0);
		anc_data[0] = tsk->conn_type;
		anc_data[1] = tsk->conn_instance;
		anc_data[2] = tsk->conn_instance;
P
Per Liden 已提交
1216 1217
		break;
	default:
1218
		has_name = 0;
P
Per Liden 已提交
1219
	}
1220 1221 1222 1223 1224
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}
P
Per Liden 已提交
1225 1226 1227 1228

	return 0;
}

1229
static void tipc_sk_send_ack(struct tipc_sock *tsk)
1230
{
1231 1232
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1233
	struct sk_buff *skb = NULL;
1234
	struct tipc_msg *msg;
1235 1236
	u32 peer_port = tsk_peer_port(tsk);
	u32 dnode = tsk_peer_node(tsk);
1237

1238
	if (!tipc_sk_connected(sk))
1239
		return;
1240 1241 1242
	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
			      dnode, tsk_own_node(tsk), peer_port,
			      tsk->portid, TIPC_OK);
1243
	if (!skb)
1244
		return;
1245
	msg = buf_msg(skb);
1246 1247 1248 1249 1250 1251 1252 1253
	msg_set_conn_ack(msg, tsk->rcv_unacked);
	tsk->rcv_unacked = 0;

	/* Adjust to and advertize the correct window limit */
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
		msg_set_adv_win(msg, tsk->rcv_win);
	}
1254
	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1255 1256
}

1257
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Y
Ying Xue 已提交
1258 1259 1260
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
1261
	long timeo = *timeop;
Y
Ying Xue 已提交
1262 1263 1264 1265
	int err;

	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1266
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1267
			if (sk->sk_shutdown & RCV_SHUTDOWN) {
Y
Ying Xue 已提交
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
				err = -ENOTCONN;
				break;
			}
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1281 1282 1283
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
1284 1285
	}
	finish_wait(sk_sleep(sk), &wait);
1286
	*timeop = timeo;
Y
Ying Xue 已提交
1287 1288 1289
	return err;
}

1290
/**
1291
 * tipc_recvmsg - receive packet-oriented message
P
Per Liden 已提交
1292 1293 1294
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1295
 *
P
Per Liden 已提交
1296 1297 1298 1299 1300
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
1301 1302
static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
			int flags)
P
Per Liden 已提交
1303
{
1304
	struct sock *sk = sock->sk;
1305
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1306 1307
	struct sk_buff *buf;
	struct tipc_msg *msg;
1308
	bool is_connectionless = tipc_sk_type_connectionless(sk);
Y
Ying Xue 已提交
1309
	long timeo;
P
Per Liden 已提交
1310 1311
	unsigned int sz;
	u32 err;
1312
	int res, hlen;
P
Per Liden 已提交
1313

1314
	/* Catch invalid receive requests */
P
Per Liden 已提交
1315 1316 1317
	if (unlikely(!buf_len))
		return -EINVAL;

1318
	lock_sock(sk);
P
Per Liden 已提交
1319

1320
	if (!is_connectionless && unlikely(sk->sk_state == TIPC_OPEN)) {
1321
		res = -ENOTCONN;
P
Per Liden 已提交
1322 1323 1324
		goto exit;
	}

Y
Ying Xue 已提交
1325
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1326
restart:
P
Per Liden 已提交
1327

1328
	/* Look for a message in receive queue; wait if necessary */
1329
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1330 1331
	if (res)
		goto exit;
P
Per Liden 已提交
1332

1333 1334
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1335 1336
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
1337
	hlen = msg_hdr_sz(msg);
P
Per Liden 已提交
1338 1339 1340 1341
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1342
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1343 1344 1345 1346 1347 1348 1349
		goto restart;
	}

	/* Capture sender's address (optional) */
	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */
1350
	res = tipc_sk_anc_data_recv(m, msg, tsk);
1351
	if (res)
P
Per Liden 已提交
1352 1353 1354 1355 1356 1357 1358 1359
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
1360
		res = skb_copy_datagram_msg(buf, hlen, m, sz);
1361
		if (res)
P
Per Liden 已提交
1362 1363 1364
			goto exit;
		res = sz;
	} else {
1365 1366
		if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
		    m->msg_control)
P
Per Liden 已提交
1367 1368 1369 1370 1371
			res = 0;
		else
			res = -ECONNRESET;
	}

1372 1373 1374
	if (unlikely(flags & MSG_PEEK))
		goto exit;

1375
	if (likely(!is_connectionless)) {
1376 1377 1378
		tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
		if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
			tipc_sk_send_ack(tsk);
1379
	}
1380
	tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1381
exit:
1382
	release_sock(sk);
P
Per Liden 已提交
1383 1384 1385
	return res;
}

1386
/**
1387
 * tipc_recv_stream - receive stream-oriented data
P
Per Liden 已提交
1388 1389 1390
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1391 1392
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
P
Per Liden 已提交
1393 1394 1395 1396
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
1397 1398
static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
			    size_t buf_len, int flags)
P
Per Liden 已提交
1399
{
1400
	struct sock *sk = sock->sk;
1401
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1402 1403
	struct sk_buff *buf;
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1404
	long timeo;
P
Per Liden 已提交
1405
	unsigned int sz;
1406
	int target;
P
Per Liden 已提交
1407 1408
	int sz_copied = 0;
	u32 err;
1409
	int res = 0, hlen;
P
Per Liden 已提交
1410

1411
	/* Catch invalid receive attempts */
P
Per Liden 已提交
1412 1413 1414
	if (unlikely(!buf_len))
		return -EINVAL;

1415
	lock_sock(sk);
P
Per Liden 已提交
1416

1417
	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1418
		res = -ENOTCONN;
P
Per Liden 已提交
1419 1420 1421
		goto exit;
	}

1422
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
Y
Ying Xue 已提交
1423
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1424

1425
restart:
1426
	/* Look for a message in receive queue; wait if necessary */
1427
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1428 1429
	if (res)
		goto exit;
P
Per Liden 已提交
1430

1431 1432
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1433 1434
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
1435
	hlen = msg_hdr_sz(msg);
P
Per Liden 已提交
1436 1437 1438 1439
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1440
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1441 1442 1443 1444 1445 1446
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */
	if (sz_copied == 0) {
		set_orig_addr(m, msg);
1447
		res = tipc_sk_anc_data_recv(m, msg, tsk);
1448
		if (res)
P
Per Liden 已提交
1449 1450 1451 1452 1453
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
1454 1455 1456
		u32 offset = TIPC_SKB_CB(buf)->bytes_read;
		u32 needed;
		int sz_to_copy;
P
Per Liden 已提交
1457

1458
		sz -= offset;
P
Per Liden 已提交
1459
		needed = (buf_len - sz_copied);
1460
		sz_to_copy = min(sz, needed);
1461

1462
		res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
1463
		if (res)
P
Per Liden 已提交
1464
			goto exit;
1465

P
Per Liden 已提交
1466 1467 1468 1469
		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
1470 1471
				TIPC_SKB_CB(buf)->bytes_read =
					offset + sz_to_copy;
P
Per Liden 已提交
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

1484 1485 1486 1487 1488 1489 1490
	if (unlikely(flags & MSG_PEEK))
		goto exit;

	tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
	if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
		tipc_sk_send_ack(tsk);
	tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1491 1492

	/* Loop around if more data is required */
1493 1494
	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
1495
	    (sz_copied < target)) &&	/* and more is ready or required */
1496
	    (!err))			/* and haven't reached a FIN */
P
Per Liden 已提交
1497 1498 1499
		goto restart;

exit:
1500
	release_sock(sk);
1501
	return sz_copied ? sz_copied : res;
P
Per Liden 已提交
1502 1503
}

1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
/**
 * tipc_write_space - wake up thread if port congestion is released
 * @sk: socket
 */
static void tipc_write_space(struct sock *sk)
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1514
	if (skwq_has_sleeper(wq))
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
						POLLWRNORM | POLLWRBAND);
	rcu_read_unlock();
}

/**
 * tipc_data_ready - wake up threads to indicate messages have been received
 * @sk: socket
 * @len: the length of messages
 */
1525
static void tipc_data_ready(struct sock *sk)
1526 1527 1528 1529 1530
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1531
	if (skwq_has_sleeper(wq))
1532 1533 1534 1535 1536
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
						POLLRDNORM | POLLRDBAND);
	rcu_read_unlock();
}

1537 1538 1539 1540 1541
static void tipc_sock_destruct(struct sock *sk)
{
	__skb_queue_purge(&sk->sk_receive_queue);
}

1542 1543
/**
 * filter_connect - Handle all incoming messages for a connection-based socket
1544
 * @tsk: TIPC socket
1545
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1546
 *
1547
 * Returns true if everything ok, false otherwise
1548
 */
1549
static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1550
{
1551
	struct sock *sk = &tsk->sk;
1552
	struct net *net = sock_net(sk);
1553
	struct tipc_msg *hdr = buf_msg(skb);
1554

1555 1556
	if (unlikely(msg_mcast(hdr)))
		return false;
1557

1558 1559
	switch (sk->sk_state) {
	case TIPC_CONNECTING:
1560 1561 1562
		/* Accept only ACK or NACK message */
		if (unlikely(!msg_connected(hdr)))
			return false;
1563

1564
		if (unlikely(msg_errcode(hdr))) {
1565
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1566
			sk->sk_err = ECONNREFUSED;
1567
			return true;
1568 1569
		}

1570
		if (unlikely(!msg_isdata(hdr))) {
1571
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1572
			sk->sk_err = EINVAL;
1573
			return true;
1574 1575
		}

1576 1577
		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
		msg_set_importance(&tsk->phdr, msg_importance(hdr));
1578

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
		/* If 'ACK+' message, add to socket receive queue */
		if (msg_data_sz(hdr))
			return true;

		/* If empty 'ACK-' message, wake up sleeping connect() */
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));

		/* 'ACK-' message is neither accepted nor rejected: */
		msg_set_dest_droppable(hdr, 1);
		return false;

1591
	case TIPC_OPEN:
1592
	case TIPC_DISCONNECTING:
1593 1594
		break;
	case TIPC_LISTEN:
1595
		/* Accept only SYN message */
1596 1597
		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
			return true;
1598
		break;
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
	case TIPC_ESTABLISHED:
		/* Accept only connection-based messages sent by peer */
		if (unlikely(!tsk_peer_msg(tsk, hdr)))
			return false;

		if (unlikely(msg_errcode(hdr))) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			/* Let timer expire on it's own */
			tipc_node_remove_conn(net, tsk_peer_node(tsk),
					      tsk->portid);
			sk->sk_state_change(sk);
		}
		return true;
1612
	default:
1613
		pr_err("Unknown sk_state %u\n", sk->sk_state);
1614
	}
1615

1616
	return false;
1617 1618
}

1619 1620 1621
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
1622
 * @skb: message
1623
 *
1624 1625
 * For connection oriented messages, irrespective of importance,
 * default queue limit is 2 MB.
1626
 *
1627 1628
 * For connectionless messages, queue limits are based on message
 * importance as follows:
1629
 *
1630 1631 1632 1633
 * TIPC_LOW_IMPORTANCE       (2 MB)
 * TIPC_MEDIUM_IMPORTANCE    (4 MB)
 * TIPC_HIGH_IMPORTANCE      (8 MB)
 * TIPC_CRITICAL_IMPORTANCE  (16 MB)
1634 1635 1636
 *
 * Returns overload limit according to corresponding message importance
 */
1637
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1638
{
1639 1640 1641 1642 1643
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = buf_msg(skb);

	if (unlikely(!msg_connected(hdr)))
		return sk->sk_rcvbuf << msg_importance(hdr);
1644

1645 1646
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return sk->sk_rcvbuf;
1647

1648
	return FLOWCTL_MSG_LIM;
1649 1650
}

1651
/**
1652 1653
 * filter_rcv - validate incoming message
 * @sk: socket
1654
 * @skb: pointer to message.
1655
 *
1656 1657 1658
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
1659
 * Called with socket lock already taken
1660
 *
1661
 * Returns true if message was added to socket receive queue, otherwise false
P
Per Liden 已提交
1662
 */
J
Jon Paul Maloy 已提交
1663 1664
static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
		       struct sk_buff_head *xmitq)
P
Per Liden 已提交
1665
{
1666
	struct tipc_sock *tsk = tipc_sk(sk);
1667 1668 1669 1670
	struct tipc_msg *hdr = buf_msg(skb);
	unsigned int limit = rcvbuf_limit(sk, skb);
	int err = TIPC_OK;
	int usr = msg_user(hdr);
1671
	u32 onode;
P
Per Liden 已提交
1672

1673
	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
J
Jon Paul Maloy 已提交
1674
		tipc_sk_proto_rcv(tsk, skb, xmitq);
1675
		return false;
1676
	}
1677

1678
	if (unlikely(usr == SOCK_WAKEUP)) {
1679
		onode = msg_orignode(hdr);
1680
		kfree_skb(skb);
1681 1682
		u32_del(&tsk->cong_links, onode);
		tsk->cong_link_cnt--;
1683
		sk->sk_write_space(sk);
1684
		return false;
1685 1686
	}

1687 1688 1689 1690 1691
	/* Drop if illegal message type */
	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
		kfree_skb(skb);
		return false;
	}
1692

1693
	/* Reject if wrong message type for current socket state */
1694
	if (tipc_sk_type_connectionless(sk)) {
1695 1696 1697 1698 1699 1700 1701
		if (msg_connected(hdr)) {
			err = TIPC_ERR_NO_PORT;
			goto reject;
		}
	} else if (unlikely(!filter_connect(tsk, skb))) {
		err = TIPC_ERR_NO_PORT;
		goto reject;
P
Per Liden 已提交
1702 1703 1704
	}

	/* Reject message if there isn't room to queue it */
1705 1706 1707 1708
	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
		err = TIPC_ERR_OVERLOAD;
		goto reject;
	}
P
Per Liden 已提交
1709

1710
	/* Enqueue message */
1711
	TIPC_SKB_CB(skb)->bytes_read = 0;
1712 1713
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
1714

1715
	sk->sk_data_ready(sk);
1716 1717 1718
	return true;

reject:
J
Jon Paul Maloy 已提交
1719 1720
	if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
		__skb_queue_tail(xmitq, skb);
1721
	return false;
1722
}
P
Per Liden 已提交
1723

1724
/**
1725
 * tipc_backlog_rcv - handle incoming message from backlog queue
1726
 * @sk: socket
1727
 * @skb: message
1728
 *
1729
 * Caller must hold socket lock
1730 1731 1732
 *
 * Returns 0
 */
1733
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1734
{
1735
	unsigned int truesize = skb->truesize;
J
Jon Paul Maloy 已提交
1736 1737
	struct sk_buff_head xmitq;
	u32 dnode, selector;
1738

J
Jon Paul Maloy 已提交
1739 1740 1741
	__skb_queue_head_init(&xmitq);

	if (likely(filter_rcv(sk, skb, &xmitq))) {
1742
		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
J
Jon Paul Maloy 已提交
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
		return 0;
	}

	if (skb_queue_empty(&xmitq))
		return 0;

	/* Send response/rejected message */
	skb = __skb_dequeue(&xmitq);
	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1754 1755 1756
	return 0;
}

1757
/**
1758 1759 1760 1761 1762
 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
 *                   inputq and try adding them to socket or backlog queue
 * @inputq: list of incoming buffers with potentially different destinations
 * @sk: socket where the buffers should be enqueued
 * @dport: port number for the socket
1763 1764 1765
 *
 * Caller must hold socket lock
 */
1766
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
J
Jon Paul Maloy 已提交
1767
			    u32 dport, struct sk_buff_head *xmitq)
1768
{
J
Jon Paul Maloy 已提交
1769 1770
	unsigned long time_limit = jiffies + 2;
	struct sk_buff *skb;
1771 1772
	unsigned int lim;
	atomic_t *dcnt;
J
Jon Paul Maloy 已提交
1773
	u32 onode;
1774 1775

	while (skb_queue_len(inputq)) {
1776
		if (unlikely(time_after_eq(jiffies, time_limit)))
1777 1778
			return;

1779 1780
		skb = tipc_skb_dequeue(inputq, dport);
		if (unlikely(!skb))
1781 1782 1783
			return;

		/* Add message directly to receive queue if possible */
1784
		if (!sock_owned_by_user(sk)) {
J
Jon Paul Maloy 已提交
1785
			filter_rcv(sk, skb, xmitq);
1786
			continue;
1787
		}
1788 1789

		/* Try backlog, compensating for double-counted bytes */
1790
		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1791
		if (!sk->sk_backlog.len)
1792 1793 1794 1795
			atomic_set(dcnt, 0);
		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
		if (likely(!sk_add_backlog(sk, skb, lim)))
			continue;
1796 1797

		/* Overload => reject message back to sender */
J
Jon Paul Maloy 已提交
1798 1799 1800
		onode = tipc_own_addr(sock_net(sk));
		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
			__skb_queue_tail(xmitq, skb);
1801
		break;
1802
	}
1803 1804
}

1805
/**
1806 1807 1808 1809
 * tipc_sk_rcv - handle a chain of incoming buffers
 * @inputq: buffer list containing the buffers
 * Consumes all buffers in list until inputq is empty
 * Note: may be called in multiple threads referring to the same queue
1810
 */
1811
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1812
{
J
Jon Paul Maloy 已提交
1813
	struct sk_buff_head xmitq;
1814
	u32 dnode, dport = 0;
E
Erik Hugne 已提交
1815
	int err;
1816 1817
	struct tipc_sock *tsk;
	struct sock *sk;
1818
	struct sk_buff *skb;
1819

J
Jon Paul Maloy 已提交
1820
	__skb_queue_head_init(&xmitq);
1821 1822 1823
	while (skb_queue_len(inputq)) {
		dport = tipc_skb_peek_port(inputq, dport);
		tsk = tipc_sk_lookup(net, dport);
1824

1825 1826 1827
		if (likely(tsk)) {
			sk = &tsk->sk;
			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
J
Jon Paul Maloy 已提交
1828
				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1829 1830
				spin_unlock_bh(&sk->sk_lock.slock);
			}
J
Jon Paul Maloy 已提交
1831 1832 1833 1834 1835
			/* Send pending response/rejected messages, if any */
			while ((skb = __skb_dequeue(&xmitq))) {
				dnode = msg_destnode(buf_msg(skb));
				tipc_node_xmit_skb(net, skb, dnode, dport);
			}
1836 1837 1838
			sock_put(sk);
			continue;
		}
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851

		/* No destination socket => dequeue skb if still there */
		skb = tipc_skb_dequeue(inputq, dport);
		if (!skb)
			return;

		/* Try secondary lookup if unresolved named message */
		err = TIPC_ERR_NO_PORT;
		if (tipc_msg_lookup_dest(net, skb, &err))
			goto xmit;

		/* Prepare for message rejection */
		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1852
			continue;
1853
xmit:
1854
		dnode = msg_destnode(buf_msg(skb));
1855
		tipc_node_xmit_skb(net, skb, dnode, dport);
1856
	}
P
Per Liden 已提交
1857 1858
}

Y
Ying Xue 已提交
1859 1860
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
W
WANG Cong 已提交
1861
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
Y
Ying Xue 已提交
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
	struct sock *sk = sock->sk;
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (!*timeo_p)
			return -ETIMEDOUT;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

W
WANG Cong 已提交
1874
		add_wait_queue(sk_sleep(sk), &wait);
1875
		done = sk_wait_event(sk, timeo_p,
W
WANG Cong 已提交
1876 1877
				     sk->sk_state != TIPC_CONNECTING, &wait);
		remove_wait_queue(sk_sleep(sk), &wait);
Y
Ying Xue 已提交
1878 1879 1880 1881
	} while (!done);
	return 0;
}

P
Per Liden 已提交
1882
/**
1883
 * tipc_connect - establish a connection to another TIPC port
P
Per Liden 已提交
1884 1885 1886
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
1887
 * @flags: file-related flags associated with socket
P
Per Liden 已提交
1888 1889 1890
 *
 * Returns 0 on success, errno otherwise
 */
1891 1892
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
			int destlen, int flags)
P
Per Liden 已提交
1893
{
1894
	struct sock *sk = sock->sk;
1895
	struct tipc_sock *tsk = tipc_sk(sk);
1896 1897
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
1898
	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1899
	int previous;
1900
	int res = 0;
1901

1902 1903
	lock_sock(sk);

1904
	/* DGRAM/RDM connect(), just save the destaddr */
1905
	if (tipc_sk_type_connectionless(sk)) {
1906
		if (dst->family == AF_UNSPEC) {
1907
			memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
1908 1909
		} else if (destlen != sizeof(struct sockaddr_tipc)) {
			res = -EINVAL;
1910
		} else {
1911
			memcpy(&tsk->peer, dest, destlen);
1912
		}
1913 1914
		goto exit;
	}
1915 1916 1917 1918 1919 1920 1921

	/*
	 * Reject connection attempt using multicast address
	 *
	 * Note: send_msg() validates the rest of the address fields,
	 *       so there's no need to do it here
	 */
1922 1923 1924 1925 1926
	if (dst->addrtype == TIPC_ADDR_MCAST) {
		res = -EINVAL;
		goto exit;
	}

1927
	previous = sk->sk_state;
1928 1929 1930

	switch (sk->sk_state) {
	case TIPC_OPEN:
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
		/* Send a 'SYN-' to destination */
		m.msg_name = dest;
		m.msg_namelen = destlen;

		/* If connect is in non-blocking case, set MSG_DONTWAIT to
		 * indicate send_msg() is never blocked.
		 */
		if (!timeout)
			m.msg_flags = MSG_DONTWAIT;

1941
		res = __tipc_sendmsg(sock, &m, 0);
1942 1943 1944
		if ((res < 0) && (res != -EWOULDBLOCK))
			goto exit;

1945
		/* Just entered TIPC_CONNECTING state; the only
1946 1947 1948 1949
		 * difference is that return value in non-blocking
		 * case is EINPROGRESS, rather than EALREADY.
		 */
		res = -EINPROGRESS;
1950 1951 1952 1953 1954
		/* fall thru' */
	case TIPC_CONNECTING:
		if (!timeout) {
			if (previous == TIPC_CONNECTING)
				res = -EALREADY;
Y
Ying Xue 已提交
1955
			goto exit;
1956
		}
Y
Ying Xue 已提交
1957 1958 1959
		timeout = msecs_to_jiffies(timeout);
		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
		res = tipc_wait_for_connect(sock, &timeout);
1960 1961
		break;
	case TIPC_ESTABLISHED:
1962
		res = -EISCONN;
1963 1964
		break;
	default:
1965
		res = -EINVAL;
1966
	}
1967

1968 1969
exit:
	release_sock(sk);
1970
	return res;
P
Per Liden 已提交
1971 1972
}

1973
/**
1974
 * tipc_listen - allow socket to listen for incoming connections
P
Per Liden 已提交
1975 1976
 * @sock: socket structure
 * @len: (unused)
1977
 *
P
Per Liden 已提交
1978 1979
 * Returns 0 on success, errno otherwise
 */
1980
static int tipc_listen(struct socket *sock, int len)
P
Per Liden 已提交
1981
{
1982 1983 1984 1985
	struct sock *sk = sock->sk;
	int res;

	lock_sock(sk);
1986
	res = tipc_set_sk_state(sk, TIPC_LISTEN);
1987
	release_sock(sk);
1988

1989
	return res;
P
Per Liden 已提交
1990 1991
}

Y
Ying Xue 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int err;

	/* True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	*/
	for (;;) {
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
					  TASK_INTERRUPTIBLE);
2006
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
2017 2018 2019
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
2020 2021 2022 2023 2024
	}
	finish_wait(sk_sleep(sk), &wait);
	return err;
}

2025
/**
2026
 * tipc_accept - wait for connection request
P
Per Liden 已提交
2027 2028 2029
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
2030
 *
P
Per Liden 已提交
2031 2032
 * Returns 0 on success, errno otherwise
 */
2033 2034
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
		       bool kern)
P
Per Liden 已提交
2035
{
2036
	struct sock *new_sk, *sk = sock->sk;
P
Per Liden 已提交
2037
	struct sk_buff *buf;
2038
	struct tipc_sock *new_tsock;
2039
	struct tipc_msg *msg;
Y
Ying Xue 已提交
2040
	long timeo;
2041
	int res;
P
Per Liden 已提交
2042

2043
	lock_sock(sk);
P
Per Liden 已提交
2044

2045
	if (sk->sk_state != TIPC_LISTEN) {
2046
		res = -EINVAL;
P
Per Liden 已提交
2047 2048
		goto exit;
	}
Y
Ying Xue 已提交
2049 2050 2051 2052
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	res = tipc_wait_for_accept(sock, timeo);
	if (res)
		goto exit;
2053 2054 2055

	buf = skb_peek(&sk->sk_receive_queue);

2056
	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2057 2058
	if (res)
		goto exit;
2059
	security_sk_clone(sock->sk, new_sock->sk);
P
Per Liden 已提交
2060

2061
	new_sk = new_sock->sk;
2062
	new_tsock = tipc_sk(new_sk);
2063
	msg = buf_msg(buf);
P
Per Liden 已提交
2064

2065 2066 2067 2068 2069 2070 2071
	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
2072
	tsk_rej_rx_queue(new_sk);
2073 2074

	/* Connect new socket to it's peer */
2075
	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2076

2077
	tsk_set_importance(new_tsock, msg_importance(msg));
2078
	if (msg_named(msg)) {
2079 2080
		new_tsock->conn_type = msg_nametype(msg);
		new_tsock->conn_instance = msg_nameinst(msg);
P
Per Liden 已提交
2081
	}
2082 2083 2084 2085 2086 2087 2088 2089

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

2090
		tsk_advance_rx_queue(sk);
2091
		__tipc_sendstream(new_sock, &m, 0);
2092 2093 2094
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2095
		skb_set_owner_r(buf, new_sk);
2096 2097
	}
	release_sock(new_sk);
P
Per Liden 已提交
2098
exit:
2099
	release_sock(sk);
P
Per Liden 已提交
2100 2101 2102 2103
	return res;
}

/**
2104
 * tipc_shutdown - shutdown socket connection
P
Per Liden 已提交
2105
 * @sock: socket structure
2106
 * @how: direction to close (must be SHUT_RDWR)
P
Per Liden 已提交
2107 2108
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
2109
 *
P
Per Liden 已提交
2110 2111
 * Returns 0 on success, errno otherwise
 */
2112
static int tipc_shutdown(struct socket *sock, int how)
P
Per Liden 已提交
2113
{
2114
	struct sock *sk = sock->sk;
P
Per Liden 已提交
2115 2116
	int res;

2117 2118
	if (how != SHUT_RDWR)
		return -EINVAL;
P
Per Liden 已提交
2119

2120
	lock_sock(sk);
P
Per Liden 已提交
2121

2122 2123
	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
	sk->sk_shutdown = SEND_SHUTDOWN;
P
Per Liden 已提交
2124

2125
	if (sk->sk_state == TIPC_DISCONNECTING) {
2126
		/* Discard any unreceived messages */
2127
		__skb_queue_purge(&sk->sk_receive_queue);
2128 2129 2130

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
P
Per Liden 已提交
2131
		res = 0;
2132
	} else {
P
Per Liden 已提交
2133 2134 2135
		res = -ENOTCONN;
	}

2136
	release_sock(sk);
P
Per Liden 已提交
2137 2138 2139
	return res;
}

2140
static void tipc_sk_timeout(unsigned long data)
2141
{
2142 2143
	struct tipc_sock *tsk = (struct tipc_sock *)data;
	struct sock *sk = &tsk->sk;
2144
	struct sk_buff *skb = NULL;
2145
	u32 peer_port, peer_node;
2146
	u32 own_node = tsk_own_node(tsk);
2147

J
Jon Paul Maloy 已提交
2148
	bh_lock_sock(sk);
2149
	if (!tipc_sk_connected(sk)) {
J
Jon Paul Maloy 已提交
2150 2151
		bh_unlock_sock(sk);
		goto exit;
2152
	}
2153 2154
	peer_port = tsk_peer_port(tsk);
	peer_node = tsk_peer_node(tsk);
2155

2156
	if (tsk->probe_unacked) {
2157
		if (!sock_owned_by_user(sk)) {
2158
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2159 2160 2161 2162 2163 2164 2165 2166
			tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
					      tsk_peer_port(tsk));
			sk->sk_state_change(sk);
		} else {
			/* Try again later */
			sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
		}

2167 2168
		bh_unlock_sock(sk);
		goto exit;
2169
	}
2170 2171 2172 2173

	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
			      INT_H_SIZE, 0, peer_node, own_node,
			      peer_port, tsk->portid, TIPC_OK);
2174
	tsk->probe_unacked = true;
2175
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2176
	bh_unlock_sock(sk);
2177
	if (skb)
2178
		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
J
Jon Paul Maloy 已提交
2179
exit:
2180
	sock_put(sk);
2181 2182
}

2183
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2184 2185
			   struct tipc_name_seq const *seq)
{
2186 2187
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
J
Jon Paul Maloy 已提交
2188 2189 2190
	struct publication *publ;
	u32 key;

2191
	if (tipc_sk_connected(sk))
J
Jon Paul Maloy 已提交
2192
		return -EINVAL;
2193 2194
	key = tsk->portid + tsk->pub_count + 1;
	if (key == tsk->portid)
J
Jon Paul Maloy 已提交
2195 2196
		return -EADDRINUSE;

2197
	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2198
				    scope, tsk->portid, key);
J
Jon Paul Maloy 已提交
2199 2200 2201
	if (unlikely(!publ))
		return -EINVAL;

2202 2203 2204
	list_add(&publ->pport_list, &tsk->publications);
	tsk->pub_count++;
	tsk->published = 1;
J
Jon Paul Maloy 已提交
2205 2206 2207
	return 0;
}

2208
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2209 2210
			    struct tipc_name_seq const *seq)
{
2211
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2212 2213 2214 2215
	struct publication *publ;
	struct publication *safe;
	int rc = -EINVAL;

2216
	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
J
Jon Paul Maloy 已提交
2217 2218 2219 2220 2221 2222 2223 2224 2225
		if (seq) {
			if (publ->scope != scope)
				continue;
			if (publ->type != seq->type)
				continue;
			if (publ->lower != seq->lower)
				continue;
			if (publ->upper != seq->upper)
				break;
2226
			tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2227 2228 2229 2230
					      publ->ref, publ->key);
			rc = 0;
			break;
		}
2231
		tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2232 2233 2234
				      publ->ref, publ->key);
		rc = 0;
	}
2235 2236
	if (list_empty(&tsk->publications))
		tsk->published = 0;
J
Jon Paul Maloy 已提交
2237 2238 2239
	return rc;
}

2240 2241 2242
/* tipc_sk_reinit: set non-zero address in all existing sockets
 *                 when we go from standalone to network mode.
 */
2243
void tipc_sk_reinit(struct net *net)
2244
{
2245
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2246
	struct rhashtable_iter iter;
2247
	struct tipc_sock *tsk;
2248 2249
	struct tipc_msg *msg;

2250 2251 2252 2253 2254 2255 2256 2257
	rhashtable_walk_enter(&tn->sk_rht, &iter);

	do {
		tsk = ERR_PTR(rhashtable_walk_start(&iter));
		if (tsk)
			continue;

		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2258 2259
			spin_lock_bh(&tsk->sk.sk_lock.slock);
			msg = &tsk->phdr;
2260 2261
			msg_set_prevnode(msg, tn->own_addr);
			msg_set_orignode(msg, tn->own_addr);
2262 2263
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2264 2265 2266

		rhashtable_walk_stop(&iter);
	} while (tsk == ERR_PTR(-EAGAIN));
2267 2268
}

2269
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2270
{
2271
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2272
	struct tipc_sock *tsk;
2273

2274
	rcu_read_lock();
2275
	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2276 2277 2278
	if (tsk)
		sock_hold(&tsk->sk);
	rcu_read_unlock();
2279

2280
	return tsk;
2281 2282
}

2283
static int tipc_sk_insert(struct tipc_sock *tsk)
2284
{
2285 2286 2287
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2288 2289
	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2290

2291 2292 2293 2294 2295 2296
	while (remaining--) {
		portid++;
		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
			portid = TIPC_MIN_PORT;
		tsk->portid = portid;
		sock_hold(&tsk->sk);
2297 2298
		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
						   tsk_rht_params))
2299 2300
			return 0;
		sock_put(&tsk->sk);
2301 2302
	}

2303
	return -1;
2304 2305
}

2306
static void tipc_sk_remove(struct tipc_sock *tsk)
2307
{
2308
	struct sock *sk = &tsk->sk;
2309
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2310

2311
	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2312 2313
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
2314 2315 2316
	}
}

2317 2318 2319 2320 2321 2322 2323
static const struct rhashtable_params tsk_rht_params = {
	.nelem_hint = 192,
	.head_offset = offsetof(struct tipc_sock, node),
	.key_offset = offsetof(struct tipc_sock, portid),
	.key_len = sizeof(u32), /* portid */
	.max_size = 1048576,
	.min_size = 256,
2324
	.automatic_shrinking = true,
2325 2326
};

2327
int tipc_sk_rht_init(struct net *net)
2328
{
2329
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2330 2331

	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2332 2333
}

2334
void tipc_sk_rht_destroy(struct net *net)
2335
{
2336 2337
	struct tipc_net *tn = net_generic(net, tipc_net_id);

2338 2339
	/* Wait for socket readers to complete */
	synchronize_net();
2340

2341
	rhashtable_destroy(&tn->sk_rht);
2342 2343
}

P
Per Liden 已提交
2344
/**
2345
 * tipc_setsockopt - set socket option
P
Per Liden 已提交
2346 2347 2348 2349 2350
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: pointer to new option value
 * @ol: length of option value
2351 2352
 *
 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
P
Per Liden 已提交
2353
 * (to ease compatibility).
2354
 *
P
Per Liden 已提交
2355 2356
 * Returns 0 on success, errno otherwise
 */
2357 2358
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, unsigned int ol)
P
Per Liden 已提交
2359
{
2360
	struct sock *sk = sock->sk;
2361
	struct tipc_sock *tsk = tipc_sk(sk);
2362
	u32 value = 0;
2363
	int res = 0;
P
Per Liden 已提交
2364

2365 2366
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return 0;
P
Per Liden 已提交
2367 2368
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384

	switch (opt) {
	case TIPC_IMPORTANCE:
	case TIPC_SRC_DROPPABLE:
	case TIPC_DEST_DROPPABLE:
	case TIPC_CONN_TIMEOUT:
		if (ol < sizeof(value))
			return -EINVAL;
		res = get_user(value, (u32 __user *)ov);
		if (res)
			return res;
		break;
	default:
		if (ov || ol)
			return -EINVAL;
	}
P
Per Liden 已提交
2385

2386
	lock_sock(sk);
2387

P
Per Liden 已提交
2388 2389
	switch (opt) {
	case TIPC_IMPORTANCE:
2390
		res = tsk_set_importance(tsk, value);
P
Per Liden 已提交
2391 2392 2393
		break;
	case TIPC_SRC_DROPPABLE:
		if (sock->type != SOCK_STREAM)
2394
			tsk_set_unreliable(tsk, value);
2395
		else
P
Per Liden 已提交
2396 2397 2398
			res = -ENOPROTOOPT;
		break;
	case TIPC_DEST_DROPPABLE:
2399
		tsk_set_unreturnable(tsk, value);
P
Per Liden 已提交
2400 2401
		break;
	case TIPC_CONN_TIMEOUT:
2402
		tipc_sk(sk)->conn_timeout = value;
P
Per Liden 已提交
2403
		break;
2404 2405 2406 2407 2408 2409 2410 2411
	case TIPC_MCAST_BROADCAST:
		tsk->mc_method.rcast = false;
		tsk->mc_method.mandatory = true;
		break;
	case TIPC_MCAST_REPLICAST:
		tsk->mc_method.rcast = true;
		tsk->mc_method.mandatory = true;
		break;
P
Per Liden 已提交
2412 2413 2414 2415
	default:
		res = -EINVAL;
	}

2416 2417
	release_sock(sk);

P
Per Liden 已提交
2418 2419 2420 2421
	return res;
}

/**
2422
 * tipc_getsockopt - get socket option
P
Per Liden 已提交
2423 2424 2425 2426 2427
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: receptacle for option value
 * @ol: receptacle for length of option value
2428 2429
 *
 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
P
Per Liden 已提交
2430
 * (to ease compatibility).
2431
 *
P
Per Liden 已提交
2432 2433
 * Returns 0 on success, errno otherwise
 */
2434 2435
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, int __user *ol)
P
Per Liden 已提交
2436
{
2437
	struct sock *sk = sock->sk;
2438
	struct tipc_sock *tsk = tipc_sk(sk);
2439
	int len;
P
Per Liden 已提交
2440
	u32 value;
2441
	int res;
P
Per Liden 已提交
2442

2443 2444
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return put_user(0, ol);
P
Per Liden 已提交
2445 2446
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2447 2448
	res = get_user(len, ol);
	if (res)
2449
		return res;
P
Per Liden 已提交
2450

2451
	lock_sock(sk);
P
Per Liden 已提交
2452 2453 2454

	switch (opt) {
	case TIPC_IMPORTANCE:
2455
		value = tsk_importance(tsk);
P
Per Liden 已提交
2456 2457
		break;
	case TIPC_SRC_DROPPABLE:
2458
		value = tsk_unreliable(tsk);
P
Per Liden 已提交
2459 2460
		break;
	case TIPC_DEST_DROPPABLE:
2461
		value = tsk_unreturnable(tsk);
P
Per Liden 已提交
2462 2463
		break;
	case TIPC_CONN_TIMEOUT:
2464
		value = tsk->conn_timeout;
2465
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2466
		break;
2467
	case TIPC_NODE_RECVQ_DEPTH:
2468
		value = 0; /* was tipc_queue_size, now obsolete */
2469
		break;
2470
	case TIPC_SOCK_RECVQ_DEPTH:
2471 2472
		value = skb_queue_len(&sk->sk_receive_queue);
		break;
P
Per Liden 已提交
2473 2474 2475 2476
	default:
		res = -EINVAL;
	}

2477 2478
	release_sock(sk);

2479 2480
	if (res)
		return res;	/* "get" failed */
P
Per Liden 已提交
2481

2482 2483 2484 2485 2486 2487 2488
	if (len < sizeof(value))
		return -EINVAL;

	if (copy_to_user(ov, &value, sizeof(value)))
		return -EFAULT;

	return put_user(sizeof(value), ol);
P
Per Liden 已提交
2489 2490
}

2491
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
E
Erik Hugne 已提交
2492
{
2493
	struct sock *sk = sock->sk;
E
Erik Hugne 已提交
2494 2495 2496 2497 2498 2499 2500
	struct tipc_sioc_ln_req lnr;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case SIOCGETLINKNAME:
		if (copy_from_user(&lnr, argp, sizeof(lnr)))
			return -EFAULT;
2501 2502
		if (!tipc_node_get_linkname(sock_net(sk),
					    lnr.bearer_id & 0xffff, lnr.peer,
E
Erik Hugne 已提交
2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
			if (copy_to_user(argp, &lnr, sizeof(lnr)))
				return -EFAULT;
			return 0;
		}
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}

2514 2515 2516 2517
static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
{
	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
E
Erik Hugne 已提交
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	u32 onode = tipc_own_addr(sock_net(sock1->sk));

	tsk1->peer.family = AF_TIPC;
	tsk1->peer.addrtype = TIPC_ADDR_ID;
	tsk1->peer.scope = TIPC_NODE_SCOPE;
	tsk1->peer.addr.id.ref = tsk2->portid;
	tsk1->peer.addr.id.node = onode;
	tsk2->peer.family = AF_TIPC;
	tsk2->peer.addrtype = TIPC_ADDR_ID;
	tsk2->peer.scope = TIPC_NODE_SCOPE;
	tsk2->peer.addr.id.ref = tsk1->portid;
	tsk2->peer.addr.id.node = onode;

	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
2533 2534 2535
	return 0;
}

2536 2537
/* Protocol switches for the various types of TIPC sockets */

2538
static const struct proto_ops msg_ops = {
2539
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2540
	.family		= AF_TIPC,
2541 2542 2543
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
E
Erik Hugne 已提交
2544
	.socketpair	= tipc_socketpair,
2545
	.accept		= sock_no_accept,
2546 2547
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2548
	.ioctl		= tipc_ioctl,
2549
	.listen		= sock_no_listen,
2550 2551 2552 2553 2554
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_sendmsg,
	.recvmsg	= tipc_recvmsg,
2555 2556
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2557 2558
};

2559
static const struct proto_ops packet_ops = {
2560
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2561
	.family		= AF_TIPC,
2562 2563 2564
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2565
	.socketpair	= tipc_socketpair,
2566 2567 2568
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2569
	.ioctl		= tipc_ioctl,
2570 2571 2572 2573 2574 2575
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_packet,
	.recvmsg	= tipc_recvmsg,
2576 2577
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2578 2579
};

2580
static const struct proto_ops stream_ops = {
2581
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2582
	.family		= AF_TIPC,
2583 2584 2585
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2586
	.socketpair	= tipc_socketpair,
2587 2588 2589
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2590
	.ioctl		= tipc_ioctl,
2591 2592 2593 2594
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
2595
	.sendmsg	= tipc_sendstream,
2596
	.recvmsg	= tipc_recv_stream,
2597 2598
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2599 2600
};

2601
static const struct net_proto_family tipc_family_ops = {
2602
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2603
	.family		= AF_TIPC,
2604
	.create		= tipc_sk_create
P
Per Liden 已提交
2605 2606 2607 2608 2609
};

static struct proto tipc_proto = {
	.name		= "TIPC",
	.owner		= THIS_MODULE,
2610 2611
	.obj_size	= sizeof(struct tipc_sock),
	.sysctl_rmem	= sysctl_tipc_rmem
P
Per Liden 已提交
2612 2613 2614
};

/**
2615
 * tipc_socket_init - initialize TIPC socket interface
2616
 *
P
Per Liden 已提交
2617 2618
 * Returns 0 on success, errno otherwise
 */
2619
int tipc_socket_init(void)
P
Per Liden 已提交
2620 2621 2622
{
	int res;

2623
	res = proto_register(&tipc_proto, 1);
P
Per Liden 已提交
2624
	if (res) {
2625
		pr_err("Failed to register TIPC protocol type\n");
P
Per Liden 已提交
2626 2627 2628 2629 2630
		goto out;
	}

	res = sock_register(&tipc_family_ops);
	if (res) {
2631
		pr_err("Failed to register TIPC socket type\n");
P
Per Liden 已提交
2632 2633 2634 2635 2636 2637 2638 2639
		proto_unregister(&tipc_proto);
		goto out;
	}
 out:
	return res;
}

/**
2640
 * tipc_socket_stop - stop TIPC socket interface
P
Per Liden 已提交
2641
 */
2642
void tipc_socket_stop(void)
P
Per Liden 已提交
2643 2644 2645 2646
{
	sock_unregister(tipc_family_ops.family);
	proto_unregister(&tipc_proto);
}
2647 2648

/* Caller should hold socket lock for the passed tipc socket. */
2649
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683
{
	u32 peer_node;
	u32 peer_port;
	struct nlattr *nest;

	peer_node = tsk_peer_node(tsk);
	peer_port = tsk_peer_port(tsk);

	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);

	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
		goto msg_full;
	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
		goto msg_full;

	if (tsk->conn_type != 0) {
		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
			goto msg_full;
	}
	nla_nest_end(skb, nest);

	return 0;

msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2684 2685
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
			    struct tipc_sock *tsk)
2686 2687 2688 2689
{
	int err;
	void *hdr;
	struct nlattr *attrs;
2690 2691
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2692
	struct sock *sk = &tsk->sk;
2693 2694

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2695
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2696 2697 2698 2699 2700 2701
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
	if (!attrs)
		goto genlmsg_cancel;
2702
	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2703
		goto attr_msg_cancel;
2704
	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2705 2706
		goto attr_msg_cancel;

2707
	if (tipc_sk_connected(sk)) {
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
		err = __tipc_nl_add_sk_con(skb, tsk);
		if (err)
			goto attr_msg_cancel;
	} else if (!list_empty(&tsk->publications)) {
		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
			goto attr_msg_cancel;
	}
	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
	struct tipc_sock *tsk;
2732 2733
	const struct bucket_table *tbl;
	struct rhash_head *pos;
2734 2735
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2736 2737
	u32 tbl_id = cb->args[0];
	u32 prev_portid = cb->args[1];
2738

2739
	rcu_read_lock();
2740
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2741 2742
	for (; tbl_id < tbl->size; tbl_id++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2743
			spin_lock_bh(&tsk->sk.sk_lock.slock);
2744 2745 2746 2747 2748
			if (prev_portid && prev_portid != tsk->portid) {
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				continue;
			}

2749
			err = __tipc_nl_add_sk(skb, cb, tsk);
2750 2751 2752 2753 2754 2755
			if (err) {
				prev_portid = tsk->portid;
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				goto out;
			}
			prev_portid = 0;
2756 2757
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2758
	}
2759
out:
2760
	rcu_read_unlock();
2761 2762
	cb->args[0] = tbl_id;
	cb->args[1] = prev_portid;
2763 2764 2765

	return skb->len;
}
2766 2767

/* Caller should hold socket lock for the passed tipc socket. */
2768 2769 2770
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 struct publication *publ)
2771 2772 2773 2774 2775
{
	void *hdr;
	struct nlattr *attrs;

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2776
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
	if (!attrs)
		goto genlmsg_cancel;

	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
		goto attr_msg_cancel;

	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2807 2808 2809
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
				  struct netlink_callback *cb,
				  struct tipc_sock *tsk, u32 *last_publ)
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
{
	int err;
	struct publication *p;

	if (*last_publ) {
		list_for_each_entry(p, &tsk->publications, pport_list) {
			if (p->key == *last_publ)
				break;
		}
		if (p->key != *last_publ) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			*last_publ = 0;
			return -EPIPE;
		}
	} else {
		p = list_first_entry(&tsk->publications, struct publication,
				     pport_list);
	}

	list_for_each_entry_from(p, &tsk->publications, pport_list) {
		err = __tipc_nl_add_sk_publ(skb, cb, p);
		if (err) {
			*last_publ = p->key;
			return err;
		}
	}
	*last_publ = 0;

	return 0;
}

int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
2850
	u32 tsk_portid = cb->args[0];
2851 2852
	u32 last_publ = cb->args[1];
	u32 done = cb->args[2];
2853
	struct net *net = sock_net(skb->sk);
2854 2855
	struct tipc_sock *tsk;

2856
	if (!tsk_portid) {
2857 2858 2859 2860 2861 2862 2863
		struct nlattr **attrs;
		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];

		err = tipc_nlmsg_parse(cb->nlh, &attrs);
		if (err)
			return err;

2864 2865 2866
		if (!attrs[TIPC_NLA_SOCK])
			return -EINVAL;

2867 2868 2869 2870 2871 2872 2873 2874 2875
		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
				       attrs[TIPC_NLA_SOCK],
				       tipc_nl_sock_policy);
		if (err)
			return err;

		if (!sock[TIPC_NLA_SOCK_REF])
			return -EINVAL;

2876
		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2877 2878 2879 2880 2881
	}

	if (done)
		return 0;

2882
	tsk = tipc_sk_lookup(net, tsk_portid);
2883 2884 2885 2886 2887 2888 2889 2890
	if (!tsk)
		return -EINVAL;

	lock_sock(&tsk->sk);
	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
	if (!err)
		done = 1;
	release_sock(&tsk->sk);
2891
	sock_put(&tsk->sk);
2892

2893
	cb->args[0] = tsk_portid;
2894 2895 2896 2897 2898
	cb->args[1] = last_publ;
	cb->args[2] = done;

	return skb->len;
}