socket.c 73.5 KB
Newer Older
P
Per Liden 已提交
1
/*
2
 * net/tipc/socket.c: TIPC socket API
3
 *
4
 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5
 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37
#include <linux/rhashtable.h>
38 39
#include <linux/sched/signal.h>

P
Per Liden 已提交
40
#include "core.h"
41
#include "name_table.h"
E
Erik Hugne 已提交
42
#include "node.h"
43
#include "link.h"
44
#include "name_distr.h"
45
#include "socket.h"
46
#include "bcast.h"
47
#include "netlink.h"
48

49
#define CONN_TIMEOUT_DEFAULT	8000	/* default connect timeout = 8s */
50
#define CONN_PROBING_INTERVAL	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
51 52 53
#define TIPC_FWD_MSG		1
#define TIPC_MAX_PORT		0xffffffff
#define TIPC_MIN_PORT		1
54
#define TIPC_ACK_RATE		4       /* ACK at 1/4 of of rcv window size */
55

56 57
enum {
	TIPC_LISTEN = TCP_LISTEN,
58
	TIPC_ESTABLISHED = TCP_ESTABLISHED,
59
	TIPC_OPEN = TCP_CLOSE,
60
	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
61
	TIPC_CONNECTING = TCP_SYN_SENT,
62 63
};

64 65 66 67 68 69 70
/**
 * struct tipc_sock - TIPC socket structure
 * @sk: socket - interacts with 'port' and with user via the socket API
 * @conn_type: TIPC type used when connection was established
 * @conn_instance: TIPC instance used when connection was established
 * @published: non-zero if port has one or more associated names
 * @max_pkt: maximum packet size "hint" used when building messages sent by port
71
 * @portid: unique port identity in TIPC socket hash table
72
 * @phdr: preformatted message header used when sending messages
73
 * #cong_links: list of congested links
74
 * @publications: list of publications for port
75
 * @blocking_link: address of the congested link we are currently sleeping on
76 77 78 79
 * @pub_count: total # of publications port has made during its lifetime
 * @probing_state:
 * @conn_timeout: the time we can wait for an unresponded setup request
 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
80
 * @cong_link_cnt: number of congested links
81 82
 * @sent_unacked: # messages sent by socket, and not yet acked by peer
 * @rcv_unacked: # messages read by user, but not yet acked back to peer
83
 * @peer: 'connected' peer for dgram/rdm
84
 * @node: hash table node
85
 * @mc_method: cookie for use between socket and broadcast layer
86
 * @rcu: rcu struct for tipc_sock
87 88 89 90 91 92 93
 */
struct tipc_sock {
	struct sock sk;
	u32 conn_type;
	u32 conn_instance;
	int published;
	u32 max_pkt;
94
	u32 portid;
95
	struct tipc_msg phdr;
96
	struct list_head cong_links;
97 98 99 100
	struct list_head publications;
	u32 pub_count;
	uint conn_timeout;
	atomic_t dupl_rcvcnt;
101
	bool probe_unacked;
102
	u16 cong_link_cnt;
103 104
	u16 snt_unacked;
	u16 snd_win;
105
	u16 peer_caps;
106 107
	u16 rcv_unacked;
	u16 rcv_win;
108
	struct sockaddr_tipc peer;
109
	struct rhash_head node;
110
	struct tipc_mc_method mc_method;
111
	struct rcu_head rcu;
112
};
P
Per Liden 已提交
113

114
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
115
static void tipc_data_ready(struct sock *sk);
116
static void tipc_write_space(struct sock *sk);
117
static void tipc_sock_destruct(struct sock *sk);
118
static int tipc_release(struct socket *sock);
119 120
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
		       bool kern);
121
static void tipc_sk_timeout(unsigned long data);
122
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
123
			   struct tipc_name_seq const *seq);
124
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
125
			    struct tipc_name_seq const *seq);
126
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
127 128
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
129
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
130
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
P
Per Liden 已提交
131

132 133 134
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
P
Per Liden 已提交
135
static struct proto tipc_proto;
136 137
static const struct rhashtable_params tsk_rht_params;

138 139 140 141 142
static u32 tsk_own_node(struct tipc_sock *tsk)
{
	return msg_prevnode(&tsk->phdr);
}

143
static u32 tsk_peer_node(struct tipc_sock *tsk)
144
{
145
	return msg_destnode(&tsk->phdr);
146 147
}

148
static u32 tsk_peer_port(struct tipc_sock *tsk)
149
{
150
	return msg_destport(&tsk->phdr);
151 152
}

153
static  bool tsk_unreliable(struct tipc_sock *tsk)
154
{
155
	return msg_src_droppable(&tsk->phdr) != 0;
156 157
}

158
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
159
{
160
	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
161 162
}

163
static bool tsk_unreturnable(struct tipc_sock *tsk)
164
{
165
	return msg_dest_droppable(&tsk->phdr) != 0;
166 167
}

168
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
169
{
170
	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
171 172
}

173
static int tsk_importance(struct tipc_sock *tsk)
174
{
175
	return msg_importance(&tsk->phdr);
176 177
}

178
static int tsk_set_importance(struct tipc_sock *tsk, int imp)
179 180 181
{
	if (imp > TIPC_CRITICAL_IMPORTANCE)
		return -EINVAL;
182
	msg_set_importance(&tsk->phdr, (u32)imp);
183 184
	return 0;
}
185

186 187 188 189 190
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
	return container_of(sk, struct tipc_sock, sk);
}

191
static bool tsk_conn_cong(struct tipc_sock *tsk)
192
{
193
	return tsk->snt_unacked > tsk->snd_win;
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
}

/* tsk_blocks(): translate a buffer size in bytes to number of
 * advertisable blocks, taking into account the ratio truesize(len)/len
 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
 */
static u16 tsk_adv_blocks(int len)
{
	return len / FLOWCTL_BLK_SZ / 4;
}

/* tsk_inc(): increment counter for sent or received data
 * - If block based flow control is not supported by peer we
 *   fall back to message based ditto, incrementing the counter
 */
static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
{
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return ((msglen / FLOWCTL_BLK_SZ) + 1);
	return 1;
214 215
}

216
/**
217
 * tsk_advance_rx_queue - discard first buffer in socket receive queue
218 219
 *
 * Caller must hold socket lock
P
Per Liden 已提交
220
 */
221
static void tsk_advance_rx_queue(struct sock *sk)
P
Per Liden 已提交
222
{
223
	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
P
Per Liden 已提交
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
/* tipc_sk_respond() : send response message back to sender
 */
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
	u32 selector;
	u32 dnode;
	u32 onode = tipc_own_addr(sock_net(sk));

	if (!tipc_msg_reverse(onode, &skb, err))
		return;

	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}

P
Per Liden 已提交
242
/**
243
 * tsk_rej_rx_queue - reject all buffers in socket receive queue
244 245
 *
 * Caller must hold socket lock
P
Per Liden 已提交
246
 */
247
static void tsk_rej_rx_queue(struct sock *sk)
P
Per Liden 已提交
248
{
249
	struct sk_buff *skb;
250

251 252
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
P
Per Liden 已提交
253 254
}

255 256
static bool tipc_sk_connected(struct sock *sk)
{
257
	return sk->sk_state == TIPC_ESTABLISHED;
258 259
}

260 261 262 263 264 265 266 267 268 269
/* tipc_sk_type_connectionless - check if the socket is datagram socket
 * @sk: socket
 *
 * Returns true if connection less, false otherwise
 */
static bool tipc_sk_type_connectionless(struct sock *sk)
{
	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
}

270
/* tsk_peer_msg - verify if message was sent by connected port's peer
J
Jon Paul Maloy 已提交
271 272 273 274
 *
 * Handles cases where the node's network address has changed from
 * the default of <0.0.0> to its configured setting.
 */
275
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
J
Jon Paul Maloy 已提交
276
{
277 278
	struct sock *sk = &tsk->sk;
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
279
	u32 peer_port = tsk_peer_port(tsk);
J
Jon Paul Maloy 已提交
280 281 282
	u32 orig_node;
	u32 peer_node;

283
	if (unlikely(!tipc_sk_connected(sk)))
J
Jon Paul Maloy 已提交
284 285 286 287 288 289
		return false;

	if (unlikely(msg_origport(msg) != peer_port))
		return false;

	orig_node = msg_orignode(msg);
290
	peer_node = tsk_peer_node(tsk);
J
Jon Paul Maloy 已提交
291 292 293 294

	if (likely(orig_node == peer_node))
		return true;

295
	if (!orig_node && (peer_node == tn->own_addr))
J
Jon Paul Maloy 已提交
296 297
		return true;

298
	if (!peer_node && (orig_node == tn->own_addr))
J
Jon Paul Maloy 已提交
299 300 301 302 303
		return true;

	return false;
}

304 305 306 307 308 309 310 311 312
/* tipc_set_sk_state - set the sk_state of the socket
 * @sk: socket
 *
 * Caller must hold socket lock
 *
 * Returns 0 on success, errno otherwise
 */
static int tipc_set_sk_state(struct sock *sk, int state)
{
313
	int oldsk_state = sk->sk_state;
314 315 316
	int res = -EINVAL;

	switch (state) {
317 318 319
	case TIPC_OPEN:
		res = 0;
		break;
320
	case TIPC_LISTEN:
321
	case TIPC_CONNECTING:
322
		if (oldsk_state == TIPC_OPEN)
323 324
			res = 0;
		break;
325
	case TIPC_ESTABLISHED:
326
		if (oldsk_state == TIPC_CONNECTING ||
327
		    oldsk_state == TIPC_OPEN)
328 329
			res = 0;
		break;
330
	case TIPC_DISCONNECTING:
331
		if (oldsk_state == TIPC_CONNECTING ||
332 333 334
		    oldsk_state == TIPC_ESTABLISHED)
			res = 0;
		break;
335 336 337 338 339 340 341 342
	}

	if (!res)
		sk->sk_state = state;

	return res;
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static int tipc_sk_sock_err(struct socket *sock, long *timeout)
{
	struct sock *sk = sock->sk;
	int err = sock_error(sk);
	int typ = sock->type;

	if (err)
		return err;
	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
		if (sk->sk_state == TIPC_DISCONNECTING)
			return -EPIPE;
		else if (!tipc_sk_connected(sk))
			return -ENOTCONN;
	}
	if (!*timeout)
		return -EAGAIN;
	if (signal_pending(current))
		return sock_intr_errno(*timeout);

	return 0;
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
#define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
({                                                                             \
	struct sock *sk_;						       \
	int rc_;							       \
									       \
	while ((rc_ = !(condition_))) {					       \
		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
		sk_ = (sock_)->sk;					       \
		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
		if (rc_)						       \
			break;						       \
		prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE);    \
		release_sock(sk_);					       \
		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
		sched_annotate_sleep();				               \
		lock_sock(sk_);						       \
		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
	}								       \
	rc_;								       \
384 385
})

P
Per Liden 已提交
386
/**
387
 * tipc_sk_create - create a TIPC socket
388
 * @net: network namespace (must be default network)
P
Per Liden 已提交
389 390
 * @sock: pre-allocated socket structure
 * @protocol: protocol indicator (must be 0)
391
 * @kern: caused by kernel or by userspace?
392
 *
393 394
 * This routine creates additional data structures used by the TIPC socket,
 * initializes them, and links them together.
P
Per Liden 已提交
395 396 397
 *
 * Returns 0 on success, errno otherwise
 */
398 399
static int tipc_sk_create(struct net *net, struct socket *sock,
			  int protocol, int kern)
P
Per Liden 已提交
400
{
401
	struct tipc_net *tn;
402
	const struct proto_ops *ops;
P
Per Liden 已提交
403
	struct sock *sk;
404
	struct tipc_sock *tsk;
405
	struct tipc_msg *msg;
406 407

	/* Validate arguments */
P
Per Liden 已提交
408 409 410 411 412
	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
413
		ops = &stream_ops;
P
Per Liden 已提交
414 415
		break;
	case SOCK_SEQPACKET:
416
		ops = &packet_ops;
P
Per Liden 已提交
417 418 419
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
420
		ops = &msg_ops;
P
Per Liden 已提交
421
		break;
422 423
	default:
		return -EPROTOTYPE;
P
Per Liden 已提交
424 425
	}

426
	/* Allocate socket's protocol area */
427
	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
428
	if (sk == NULL)
P
Per Liden 已提交
429 430
		return -ENOMEM;

431
	tsk = tipc_sk(sk);
432 433
	tsk->max_pkt = MAX_PKT_DEFAULT;
	INIT_LIST_HEAD(&tsk->publications);
434
	INIT_LIST_HEAD(&tsk->cong_links);
435
	msg = &tsk->phdr;
436
	tn = net_generic(sock_net(sk), tipc_net_id);
P
Per Liden 已提交
437

438 439 440
	/* Finish initializing socket data structures */
	sock->ops = ops;
	sock_init_data(sock, sk);
441
	tipc_set_sk_state(sk, TIPC_OPEN);
442
	if (tipc_sk_insert(tsk)) {
M
Masanari Iida 已提交
443
		pr_warn("Socket create failed; port number exhausted\n");
444 445
		return -EINVAL;
	}
446 447 448 449 450 451 452

	/* Ensure tsk is visible before we read own_addr. */
	smp_mb();

	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
		      NAMED_H_SIZE, 0);

453
	msg_set_origport(msg, tsk->portid);
454
	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
455
	sk->sk_shutdown = 0;
456
	sk->sk_backlog_rcv = tipc_backlog_rcv;
457
	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
458 459
	sk->sk_data_ready = tipc_data_ready;
	sk->sk_write_space = tipc_write_space;
460
	sk->sk_destruct = tipc_sock_destruct;
461 462
	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
	atomic_set(&tsk->dupl_rcvcnt, 0);
463

464 465 466 467
	/* Start out with safe limits until we receive an advertised window */
	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
	tsk->rcv_win = tsk->snd_win;

468
	if (tipc_sk_type_connectionless(sk)) {
469
		tsk_set_unreturnable(tsk, true);
470
		if (sock->type == SOCK_DGRAM)
471
			tsk_set_unreliable(tsk, true);
472
	}
473

P
Per Liden 已提交
474 475 476
	return 0;
}

477 478 479 480 481 482 483
static void tipc_sk_callback(struct rcu_head *head)
{
	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);

	sock_put(&tsk->sk);
}

484 485 486 487 488 489
/* Caller should hold socket lock for the socket. */
static void __tipc_shutdown(struct socket *sock, int error)
{
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
	struct net *net = sock_net(sk);
490
	long timeout = CONN_TIMEOUT_DEFAULT;
491 492 493
	u32 dnode = tsk_peer_node(tsk);
	struct sk_buff *skb;

494 495 496 497
	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
					    !tsk_conn_cong(tsk)));

498 499 500 501 502 503
	/* Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer).
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		if (TIPC_SKB_CB(skb)->bytes_read) {
			kfree_skb(skb);
504
			continue;
505
		}
506 507 508 509 510 511
		if (!tipc_sk_type_connectionless(sk) &&
		    sk->sk_state != TIPC_DISCONNECTING) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			tipc_node_remove_conn(net, dnode, tsk->portid);
		}
		tipc_sk_respond(sk, skb, error);
512
	}
513 514 515 516

	if (tipc_sk_type_connectionless(sk))
		return;

517 518 519 520 521 522 523
	if (sk->sk_state != TIPC_DISCONNECTING) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
				      tsk_own_node(tsk), tsk_peer_port(tsk),
				      tsk->portid, error);
		if (skb)
			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
524 525
		tipc_node_remove_conn(net, dnode, tsk->portid);
		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
526 527 528
	}
}

P
Per Liden 已提交
529
/**
530
 * tipc_release - destroy a TIPC socket
P
Per Liden 已提交
531 532 533 534 535 536 537
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
538
 *
P
Per Liden 已提交
539 540 541 542 543 544
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
545
static int tipc_release(struct socket *sock)
P
Per Liden 已提交
546 547
{
	struct sock *sk = sock->sk;
548
	struct tipc_sock *tsk;
P
Per Liden 已提交
549

550 551 552 553 554
	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
P
Per Liden 已提交
555
		return 0;
556

557
	tsk = tipc_sk(sk);
558 559
	lock_sock(sk);

560 561
	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
	sk->sk_shutdown = SHUTDOWN_MASK;
562
	tipc_sk_withdraw(tsk, 0, NULL);
563
	sk_stop_timer(sk, &sk->sk_timer);
564
	tipc_sk_remove(tsk);
P
Per Liden 已提交
565

566 567
	/* Reject any messages that accumulated in backlog queue */
	release_sock(sk);
568 569
	u32_list_purge(&tsk->cong_links);
	tsk->cong_link_cnt = 0;
570
	call_rcu(&tsk->rcu, tipc_sk_callback);
571
	sock->sk = NULL;
P
Per Liden 已提交
572

573
	return 0;
P
Per Liden 已提交
574 575 576
}

/**
577
 * tipc_bind - associate or disassocate TIPC name(s) with a socket
P
Per Liden 已提交
578 579 580
 * @sock: socket structure
 * @uaddr: socket address describing name(s) and desired operation
 * @uaddr_len: size of socket address data structure
581
 *
P
Per Liden 已提交
582 583 584
 * Name and name sequence binding is indicated using a positive scope value;
 * a negative scope value unbinds the specified name.  Specifying no name
 * (i.e. a socket address length of 0) unbinds all names from the socket.
585
 *
P
Per Liden 已提交
586
 * Returns 0 on success, errno otherwise
587 588 589
 *
 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 *       access any non-constant socket information.
P
Per Liden 已提交
590
 */
591 592
static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
		     int uaddr_len)
P
Per Liden 已提交
593
{
594
	struct sock *sk = sock->sk;
P
Per Liden 已提交
595
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
596
	struct tipc_sock *tsk = tipc_sk(sk);
597
	int res = -EINVAL;
P
Per Liden 已提交
598

599 600
	lock_sock(sk);
	if (unlikely(!uaddr_len)) {
601
		res = tipc_sk_withdraw(tsk, 0, NULL);
602 603
		goto exit;
	}
604

605 606 607 608 609 610 611 612
	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
		res = -EINVAL;
		goto exit;
	}
	if (addr->family != AF_TIPC) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
P
Per Liden 已提交
613 614 615

	if (addr->addrtype == TIPC_ADDR_NAME)
		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
616 617 618 619
	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
620

621
	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
622
	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
623 624 625 626
	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
		res = -EACCES;
		goto exit;
	}
627

628
	res = (addr->scope > 0) ?
629 630
		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
631 632 633
exit:
	release_sock(sk);
	return res;
P
Per Liden 已提交
634 635
}

636
/**
637
 * tipc_getname - get port ID of socket or peer socket
P
Per Liden 已提交
638 639 640
 * @sock: socket structure
 * @uaddr: area for returned socket address
 * @uaddr_len: area for returned length of socket address
641
 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
642
 *
P
Per Liden 已提交
643
 * Returns 0 on success, errno otherwise
644
 *
645 646
 * NOTE: This routine doesn't need to take the socket lock since it only
 *       accesses socket information that is unchanging (or which changes in
647
 *       a completely predictable manner).
P
Per Liden 已提交
648
 */
649 650
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
			int *uaddr_len, int peer)
P
Per Liden 已提交
651 652
{
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
653 654
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
655
	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
P
Per Liden 已提交
656

657
	memset(addr, 0, sizeof(*addr));
658
	if (peer) {
659
		if ((!tipc_sk_connected(sk)) &&
660
		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
661
			return -ENOTCONN;
662 663
		addr->addr.id.ref = tsk_peer_port(tsk);
		addr->addr.id.node = tsk_peer_node(tsk);
664
	} else {
665
		addr->addr.id.ref = tsk->portid;
666
		addr->addr.id.node = tn->own_addr;
667
	}
P
Per Liden 已提交
668 669 670 671 672 673 674

	*uaddr_len = sizeof(*addr);
	addr->addrtype = TIPC_ADDR_ID;
	addr->family = AF_TIPC;
	addr->scope = 0;
	addr->addr.name.domain = 0;

675
	return 0;
P
Per Liden 已提交
676 677 678
}

/**
679
 * tipc_poll - read and possibly block on pollmask
P
Per Liden 已提交
680 681 682 683
 * @file: file structure associated with the socket
 * @sock: socket for which to calculate the poll bits
 * @wait: ???
 *
684 685 686 687 688 689 690 691
 * Returns pollmask value
 *
 * COMMENTARY:
 * It appears that the usual socket locking mechanisms are not useful here
 * since the pollmask info is potentially out-of-date the moment this routine
 * exits.  TCP and other protocols seem to rely on higher level poll routines
 * to handle any preventable race conditions, so TIPC will do the same ...
 *
692 693 694
 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 * imply that the operation will succeed, merely that it should be performed
 * and will not block.
P
Per Liden 已提交
695
 */
696 697
static unsigned int tipc_poll(struct file *file, struct socket *sock,
			      poll_table *wait)
P
Per Liden 已提交
698
{
699
	struct sock *sk = sock->sk;
700
	struct tipc_sock *tsk = tipc_sk(sk);
701
	u32 mask = 0;
702

703
	sock_poll_wait(file, sk_sleep(sk), wait);
704

705 706 707 708 709
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
	if (sk->sk_shutdown == SHUTDOWN_MASK)
		mask |= POLLHUP;

710 711
	switch (sk->sk_state) {
	case TIPC_ESTABLISHED:
712
		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
713
			mask |= POLLOUT;
714 715 716
		/* fall thru' */
	case TIPC_LISTEN:
	case TIPC_CONNECTING:
717 718
		if (!skb_queue_empty(&sk->sk_receive_queue))
			mask |= (POLLIN | POLLRDNORM);
719 720
		break;
	case TIPC_OPEN:
721
		if (!tsk->cong_link_cnt)
722 723 724 725 726 727 728 729
			mask |= POLLOUT;
		if (tipc_sk_type_connectionless(sk) &&
		    (!skb_queue_empty(&sk->sk_receive_queue)))
			mask |= (POLLIN | POLLRDNORM);
		break;
	case TIPC_DISCONNECTING:
		mask = (POLLIN | POLLRDNORM | POLLHUP);
		break;
730
	}
731 732

	return mask;
P
Per Liden 已提交
733 734
}

735 736 737 738
/**
 * tipc_sendmcast - send multicast message
 * @sock: socket structure
 * @seq: destination address
739
 * @msg: message to send
740 741
 * @dlen: length of data to send
 * @timeout: timeout to wait for wakeup
742 743 744 745 746
 *
 * Called from function tipc_sendmsg(), which has done all sanity checks
 * Returns the number of bytes sent on success, or errno
 */
static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
747
			  struct msghdr *msg, size_t dlen, long timeout)
748 749
{
	struct sock *sk = sock->sk;
750
	struct tipc_sock *tsk = tipc_sk(sk);
751
	struct tipc_msg *hdr = &tsk->phdr;
752
	struct net *net = sock_net(sk);
753
	int mtu = tipc_bcast_get_mtu(net);
754
	struct tipc_mc_method *method = &tsk->mc_method;
755
	u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
756
	struct sk_buff_head pkts;
757
	struct tipc_nlist dsts;
758 759
	int rc;

760
	/* Block or return if any destination link is congested */
761 762 763
	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
	if (unlikely(rc))
		return rc;
764

765 766 767 768 769 770 771 772
	/* Lookup destination nodes */
	tipc_nlist_init(&dsts, tipc_own_addr(net));
	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
				      seq->upper, domain, &dsts);
	if (!dsts.local && !dsts.remote)
		return -EHOSTUNREACH;

	/* Build message header */
773
	msg_set_type(hdr, TIPC_MCAST_MSG);
774
	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
775 776 777 778 779 780 781
	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
	msg_set_destport(hdr, 0);
	msg_set_destnode(hdr, 0);
	msg_set_nametype(hdr, seq->type);
	msg_set_namelower(hdr, seq->lower);
	msg_set_nameupper(hdr, seq->upper);

782
	/* Build message as chain of buffers */
783 784
	skb_queue_head_init(&pkts);
	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
785

786 787
	/* Send message if build was successful */
	if (unlikely(rc == dlen))
788
		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
789 790 791
				     &tsk->cong_link_cnt);

	tipc_nlist_purge(&dsts);
792 793

	return rc ? rc : dlen;
794 795
}

796 797 798 799 800 801
/**
 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 * @arrvq: queue with arriving messages, to be cloned after destination lookup
 * @inputq: queue with cloned messages, delivered to socket after dest lookup
 *
 * Multi-threaded: parallel calls with reference to same queues may occur
802
 */
803 804
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
		       struct sk_buff_head *inputq)
805
{
806
	struct tipc_msg *msg;
807
	struct list_head dports;
808
	u32 portid;
809
	u32 scope = TIPC_CLUSTER_SCOPE;
810 811 812
	struct sk_buff_head tmpq;
	uint hsz;
	struct sk_buff *skb, *_skb;
813

814
	__skb_queue_head_init(&tmpq);
815
	INIT_LIST_HEAD(&dports);
816

817 818 819 820 821 822 823 824 825 826 827 828
	skb = tipc_skb_peek(arrvq, &inputq->lock);
	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
		msg = buf_msg(skb);
		hsz = skb_headroom(skb) + msg_hdr_sz(msg);

		if (in_own_node(net, msg_orignode(msg)))
			scope = TIPC_NODE_SCOPE;

		/* Create destination port list and message clones: */
		tipc_nametbl_mc_translate(net,
					  msg_nametype(msg), msg_namelower(msg),
					  msg_nameupper(msg), scope, &dports);
829 830
		portid = u32_pop(&dports);
		for (; portid; portid = u32_pop(&dports)) {
831 832 833 834 835 836 837
			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
			if (_skb) {
				msg_set_destport(buf_msg(_skb), portid);
				__skb_queue_tail(&tmpq, _skb);
				continue;
			}
			pr_warn("Failed to clone mcast rcv buffer\n");
838
		}
839 840 841 842 843 844 845 846 847
		/* Append to inputq if not already done by other thread */
		spin_lock_bh(&inputq->lock);
		if (skb_peek(arrvq) == skb) {
			skb_queue_splice_tail_init(&tmpq, inputq);
			kfree_skb(__skb_dequeue(arrvq));
		}
		spin_unlock_bh(&inputq->lock);
		__skb_queue_purge(&tmpq);
		kfree_skb(skb);
848
	}
849
	tipc_sk_rcv(net, inputq);
850 851
}

852 853 854
/**
 * tipc_sk_proto_rcv - receive a connection mng protocol message
 * @tsk: receiving socket
855
 * @skb: pointer to message buffer.
856
 */
J
Jon Paul Maloy 已提交
857 858
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
			      struct sk_buff_head *xmitq)
859
{
860
	struct sock *sk = &tsk->sk;
J
Jon Paul Maloy 已提交
861
	u32 onode = tsk_own_node(tsk);
862 863
	struct tipc_msg *hdr = buf_msg(skb);
	int mtyp = msg_type(hdr);
864
	bool conn_cong;
865

866
	/* Ignore if connection cannot be validated: */
867
	if (!tsk_peer_msg(tsk, hdr))
868 869
		goto exit;

870 871 872 873 874 875 876 877
	if (unlikely(msg_errcode(hdr))) {
		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
				      tsk_peer_port(tsk));
		sk->sk_state_change(sk);
		goto exit;
	}

878
	tsk->probe_unacked = false;
879

880 881
	if (mtyp == CONN_PROBE) {
		msg_set_type(hdr, CONN_PROBE_REPLY);
J
Jon Paul Maloy 已提交
882 883
		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
			__skb_queue_tail(xmitq, skb);
884 885
		return;
	} else if (mtyp == CONN_ACK) {
886
		conn_cong = tsk_conn_cong(tsk);
887 888 889
		tsk->snt_unacked -= msg_conn_ack(hdr);
		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
			tsk->snd_win = msg_adv_win(hdr);
890
		if (conn_cong)
891 892 893
			sk->sk_write_space(sk);
	} else if (mtyp != CONN_PROBE_REPLY) {
		pr_warn("Received unknown CONN_PROTO msg\n");
894 895
	}
exit:
896
	kfree_skb(skb);
897 898
}

899 900 901 902
static void tipc_sk_top_evt(struct tipc_sock *tsk, struct tipc_event *evt)
{
}

P
Per Liden 已提交
903
/**
904
 * tipc_sendmsg - send message in connectionless manner
P
Per Liden 已提交
905 906
 * @sock: socket structure
 * @m: message to send
907
 * @dsz: amount of user data to be sent
908
 *
P
Per Liden 已提交
909
 * Message must have an destination specified explicitly.
910
 * Used for SOCK_RDM and SOCK_DGRAM messages,
P
Per Liden 已提交
911 912
 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
913
 *
P
Per Liden 已提交
914 915
 * Returns the number of bytes sent on success, or errno otherwise
 */
916
static int tipc_sendmsg(struct socket *sock,
917
			struct msghdr *m, size_t dsz)
918 919 920 921 922 923 924 925 926 927 928
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_sendmsg(sock, m, dsz);
	release_sock(sk);

	return ret;
}

929
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
930
{
931
	struct sock *sk = sock->sk;
932
	struct net *net = sock_net(sk);
933 934 935 936 937 938
	struct tipc_sock *tsk = tipc_sk(sk);
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct list_head *clinks = &tsk->cong_links;
	bool syn = !tipc_sk_type_connectionless(sk);
	struct tipc_msg *hdr = &tsk->phdr;
939
	struct tipc_name_seq *seq;
940 941 942 943
	struct sk_buff_head pkts;
	u32 type, inst, domain;
	u32 dnode, dport;
	int mtu, rc;
P
Per Liden 已提交
944

945
	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
946
		return -EMSGSIZE;
947

948
	if (unlikely(!dest)) {
949 950
		dest = &tsk->peer;
		if (!syn || dest->family != AF_TIPC)
951 952
			return -EDESTADDRREQ;
	}
953 954 955 956 957 958 959 960

	if (unlikely(m->msg_namelen < sizeof(*dest)))
		return -EINVAL;

	if (unlikely(dest->family != AF_TIPC))
		return -EINVAL;

	if (unlikely(syn)) {
961
		if (sk->sk_state == TIPC_LISTEN)
962
			return -EPIPE;
963
		if (sk->sk_state != TIPC_OPEN)
964 965 966
			return -EISCONN;
		if (tsk->published)
			return -EOPNOTSUPP;
967
		if (dest->addrtype == TIPC_ADDR_NAME) {
968 969
			tsk->conn_type = dest->addr.name.name.type;
			tsk->conn_instance = dest->addr.name.name.instance;
970
		}
P
Per Liden 已提交
971
	}
972

973 974 975
	seq = &dest->addr.nameseq;
	if (dest->addrtype == TIPC_ADDR_MCAST)
		return tipc_sendmcast(sock, seq, m, dlen, timeout);
976

977 978 979 980
	if (dest->addrtype == TIPC_ADDR_NAME) {
		type = dest->addr.name.name.type;
		inst = dest->addr.name.name.instance;
		domain = dest->addr.name.domain;
981
		dnode = domain;
982 983 984 985 986
		msg_set_type(hdr, TIPC_NAMED_MSG);
		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
		msg_set_nametype(hdr, type);
		msg_set_nameinst(hdr, inst);
		msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
987
		dport = tipc_nametbl_translate(net, type, inst, &dnode);
988 989
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dport);
990 991
		if (unlikely(!dport && !dnode))
			return -EHOSTUNREACH;
992

993 994
	} else if (dest->addrtype == TIPC_ADDR_ID) {
		dnode = dest->addr.id.node;
995 996 997 998 999
		msg_set_type(hdr, TIPC_DIRECT_MSG);
		msg_set_lookup_scope(hdr, 0);
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dest->addr.id.ref);
		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1000 1001
	}

1002 1003 1004 1005 1006 1007
	/* Block or return if destination link is congested */
	rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
	if (unlikely(rc))
		return rc;

	skb_queue_head_init(&pkts);
1008
	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1009 1010
	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
	if (unlikely(rc != dlen))
1011
		return rc;
1012

1013 1014 1015 1016 1017 1018
	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
	if (unlikely(rc == -ELINKCONG)) {
		u32_push(clinks, dnode);
		tsk->cong_link_cnt++;
		rc = 0;
	}
1019

1020 1021 1022 1023
	if (unlikely(syn && !rc))
		tipc_set_sk_state(sk, TIPC_CONNECTING);

	return rc ? rc : dlen;
P
Per Liden 已提交
1024 1025
}

1026
/**
1027
 * tipc_sendstream - send stream-oriented data
P
Per Liden 已提交
1028
 * @sock: socket structure
1029 1030
 * @m: data to send
 * @dsz: total length of data to be transmitted
1031
 *
1032
 * Used for SOCK_STREAM data.
1033
 *
1034 1035
 * Returns the number of bytes sent on success (or partial success),
 * or errno if no data sent
P
Per Liden 已提交
1036
 */
1037
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1038 1039 1040 1041 1042
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
1043
	ret = __tipc_sendstream(sock, m, dsz);
1044 1045 1046 1047 1048
	release_sock(sk);

	return ret;
}

1049
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
1050
{
1051
	struct sock *sk = sock->sk;
1052
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1053 1054 1055 1056 1057 1058 1059 1060
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = &tsk->phdr;
	struct net *net = sock_net(sk);
	struct sk_buff_head pkts;
	u32 dnode = tsk_peer_node(tsk);
	int send, sent = 0;
	int rc = 0;
1061

1062
	skb_queue_head_init(&pkts);
1063

1064 1065
	if (unlikely(dlen > INT_MAX))
		return -EMSGSIZE;
1066

1067 1068 1069 1070 1071
	/* Handle implicit connection setup */
	if (unlikely(dest)) {
		rc = __tipc_sendmsg(sock, m, dlen);
		if (dlen && (dlen == rc))
			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1072
		return rc;
1073
	}
1074

1075
	do {
1076 1077
		rc = tipc_wait_for_cond(sock, &timeout,
					(!tsk->cong_link_cnt &&
1078 1079
					 !tsk_conn_cong(tsk) &&
					 tipc_sk_connected(sk)));
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
		if (unlikely(rc))
			break;

		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
		if (unlikely(rc != send))
			break;

		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
		if (unlikely(rc == -ELINKCONG)) {
			tsk->cong_link_cnt = 1;
			rc = 0;
		}
		if (likely(!rc)) {
			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
			sent += send;
		}
	} while (sent < dlen && !rc);
1098

1099
	return sent ? sent : rc;
P
Per Liden 已提交
1100 1101
}

1102
/**
1103
 * tipc_send_packet - send a connection-oriented message
P
Per Liden 已提交
1104
 * @sock: socket structure
1105 1106
 * @m: message to send
 * @dsz: length of data to be transmitted
1107
 *
1108
 * Used for SOCK_SEQPACKET messages.
1109
 *
1110
 * Returns the number of bytes sent on success, or errno otherwise
P
Per Liden 已提交
1111
 */
1112
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1113
{
1114 1115
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
		return -EMSGSIZE;
P
Per Liden 已提交
1116

1117
	return tipc_sendstream(sock, m, dsz);
P
Per Liden 已提交
1118 1119
}

1120
/* tipc_sk_finish_conn - complete the setup of a connection
P
Per Liden 已提交
1121
 */
1122
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1123
				u32 peer_node)
P
Per Liden 已提交
1124
{
1125 1126
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1127
	struct tipc_msg *msg = &tsk->phdr;
P
Per Liden 已提交
1128

1129 1130 1131 1132 1133
	msg_set_destnode(msg, peer_node);
	msg_set_destport(msg, peer_port);
	msg_set_type(msg, TIPC_CONN_MSG);
	msg_set_lookup_scope(msg, 0);
	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1134

1135
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1136
	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1137 1138
	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1139
	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1140 1141 1142 1143 1144 1145
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
		return;

	/* Fall back to message based flow control */
	tsk->rcv_win = FLOWCTL_MSG_WIN;
	tsk->snd_win = FLOWCTL_MSG_WIN;
P
Per Liden 已提交
1146 1147 1148 1149 1150 1151
}

/**
 * set_orig_addr - capture sender's address for received message
 * @m: descriptor for message info
 * @msg: received message header
1152
 *
P
Per Liden 已提交
1153 1154
 * Note: Address is not captured if not requested by receiver.
 */
S
Sam Ravnborg 已提交
1155
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
P
Per Liden 已提交
1156
{
1157
	DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
P
Per Liden 已提交
1158

1159
	if (addr) {
P
Per Liden 已提交
1160 1161
		addr->family = AF_TIPC;
		addr->addrtype = TIPC_ADDR_ID;
1162
		memset(&addr->addr, 0, sizeof(addr->addr));
P
Per Liden 已提交
1163 1164
		addr->addr.id.ref = msg_origport(msg);
		addr->addr.id.node = msg_orignode(msg);
1165 1166
		addr->addr.name.domain = 0;	/* could leave uninitialized */
		addr->scope = 0;		/* could leave uninitialized */
P
Per Liden 已提交
1167 1168 1169 1170 1171
		m->msg_namelen = sizeof(struct sockaddr_tipc);
	}
}

/**
1172
 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
P
Per Liden 已提交
1173 1174
 * @m: descriptor for message info
 * @msg: received message header
1175
 * @tsk: TIPC port associated with message
1176
 *
P
Per Liden 已提交
1177
 * Note: Ancillary data is not captured if not requested by receiver.
1178
 *
P
Per Liden 已提交
1179 1180
 * Returns 0 if successful, otherwise errno
 */
1181 1182
static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				 struct tipc_sock *tsk)
P
Per Liden 已提交
1183 1184 1185 1186
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
1187
	int has_name;
P
Per Liden 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */
	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
1198 1199
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
P
Per Liden 已提交
1200
			return res;
1201 1202 1203 1204 1205 1206
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
P
Per Liden 已提交
1207 1208 1209 1210 1211 1212
	}

	/* Optionally capture message destination object */
	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
1213
		has_name = 1;
P
Per Liden 已提交
1214 1215 1216 1217 1218
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
1219
		has_name = 1;
P
Per Liden 已提交
1220 1221 1222 1223 1224
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
1225 1226 1227 1228
		has_name = (tsk->conn_type != 0);
		anc_data[0] = tsk->conn_type;
		anc_data[1] = tsk->conn_instance;
		anc_data[2] = tsk->conn_instance;
P
Per Liden 已提交
1229 1230
		break;
	default:
1231
		has_name = 0;
P
Per Liden 已提交
1232
	}
1233 1234 1235 1236 1237
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}
P
Per Liden 已提交
1238 1239 1240 1241

	return 0;
}

1242
static void tipc_sk_send_ack(struct tipc_sock *tsk)
1243
{
1244 1245
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1246
	struct sk_buff *skb = NULL;
1247
	struct tipc_msg *msg;
1248 1249
	u32 peer_port = tsk_peer_port(tsk);
	u32 dnode = tsk_peer_node(tsk);
1250

1251
	if (!tipc_sk_connected(sk))
1252
		return;
1253 1254 1255
	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
			      dnode, tsk_own_node(tsk), peer_port,
			      tsk->portid, TIPC_OK);
1256
	if (!skb)
1257
		return;
1258
	msg = buf_msg(skb);
1259 1260 1261 1262 1263 1264 1265 1266
	msg_set_conn_ack(msg, tsk->rcv_unacked);
	tsk->rcv_unacked = 0;

	/* Adjust to and advertize the correct window limit */
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
		msg_set_adv_win(msg, tsk->rcv_win);
	}
1267
	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1268 1269
}

1270
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Y
Ying Xue 已提交
1271 1272 1273
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
1274
	long timeo = *timeop;
1275 1276 1277 1278
	int err = sock_error(sk);

	if (err)
		return err;
Y
Ying Xue 已提交
1279 1280 1281

	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1282
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1283
			if (sk->sk_shutdown & RCV_SHUTDOWN) {
Y
Ying Xue 已提交
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
				err = -ENOTCONN;
				break;
			}
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1297 1298 1299
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
1300 1301 1302 1303

		err = sock_error(sk);
		if (err)
			break;
Y
Ying Xue 已提交
1304 1305
	}
	finish_wait(sk_sleep(sk), &wait);
1306
	*timeop = timeo;
Y
Ying Xue 已提交
1307 1308 1309
	return err;
}

1310
/**
1311
 * tipc_recvmsg - receive packet-oriented message
P
Per Liden 已提交
1312
 * @m: descriptor for message info
1313
 * @buflen: length of user buffer area
P
Per Liden 已提交
1314
 * @flags: receive flags
1315
 *
P
Per Liden 已提交
1316 1317 1318 1319 1320
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
1321 1322
static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
			size_t buflen,	int flags)
P
Per Liden 已提交
1323
{
1324
	struct sock *sk = sock->sk;
1325
	struct tipc_sock *tsk = tipc_sk(sk);
1326 1327 1328 1329 1330
	struct sk_buff *skb;
	struct tipc_msg *hdr;
	bool connected = !tipc_sk_type_connectionless(sk);
	int rc, err, hlen, dlen, copy;
	long timeout;
P
Per Liden 已提交
1331

1332
	/* Catch invalid receive requests */
1333
	if (unlikely(!buflen))
P
Per Liden 已提交
1334 1335
		return -EINVAL;

1336
	lock_sock(sk);
1337 1338
	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
		rc = -ENOTCONN;
P
Per Liden 已提交
1339 1340
		goto exit;
	}
1341
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1342

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
	do {
		/* Look at first msg in receive queue; wait if necessary */
		rc = tipc_wait_for_rcvmsg(sock, &timeout);
		if (unlikely(rc))
			goto exit;
		skb = skb_peek(&sk->sk_receive_queue);
		hdr = buf_msg(skb);
		dlen = msg_data_sz(hdr);
		hlen = msg_hdr_sz(hdr);
		err = msg_errcode(hdr);
		if (likely(dlen || err))
			break;
1355
		tsk_advance_rx_queue(sk);
1356
	} while (1);
P
Per Liden 已提交
1357

1358 1359 1360 1361
	/* Collect msg meta data, including error code and rejected data */
	set_orig_addr(m, hdr);
	rc = tipc_sk_anc_data_recv(m, hdr, tsk);
	if (unlikely(rc))
P
Per Liden 已提交
1362 1363
		goto exit;

1364 1365 1366 1367
	/* Capture data if non-error msg, otherwise just set return value */
	if (likely(!err)) {
		copy = min_t(int, dlen, buflen);
		if (unlikely(copy != dlen))
P
Per Liden 已提交
1368
			m->msg_flags |= MSG_TRUNC;
1369
		rc = skb_copy_datagram_msg(skb, hlen, m, copy);
P
Per Liden 已提交
1370
	} else {
1371 1372 1373 1374
		copy = 0;
		rc = 0;
		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
			rc = -ECONNRESET;
P
Per Liden 已提交
1375
	}
1376 1377
	if (unlikely(rc))
		goto exit;
P
Per Liden 已提交
1378

1379
	/* Caption of data or error code/rejected data was successful */
1380 1381 1382 1383
	if (unlikely(flags & MSG_PEEK))
		goto exit;

	tsk_advance_rx_queue(sk);
1384 1385 1386 1387 1388 1389 1390
	if (likely(!connected))
		goto exit;

	/* Send connection flow control ack when applicable */
	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
		tipc_sk_send_ack(tsk);
P
Per Liden 已提交
1391
exit:
1392
	release_sock(sk);
1393
	return rc ? rc : copy;
P
Per Liden 已提交
1394 1395
}

1396
/**
1397
 * tipc_recvstream - receive stream-oriented data
P
Per Liden 已提交
1398
 * @m: descriptor for message info
1399
 * @buflen: total size of user buffer area
P
Per Liden 已提交
1400
 * @flags: receive flags
1401 1402
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
P
Per Liden 已提交
1403 1404 1405 1406
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
1407 1408
static int tipc_recvstream(struct socket *sock, struct msghdr *m,
			   size_t buflen, int flags)
P
Per Liden 已提交
1409
{
1410
	struct sock *sk = sock->sk;
1411
	struct tipc_sock *tsk = tipc_sk(sk);
1412 1413 1414 1415 1416 1417 1418
	struct sk_buff *skb;
	struct tipc_msg *hdr;
	struct tipc_skb_cb *skb_cb;
	bool peek = flags & MSG_PEEK;
	int offset, required, copy, copied = 0;
	int hlen, dlen, err, rc;
	long timeout;
P
Per Liden 已提交
1419

1420
	/* Catch invalid receive attempts */
1421
	if (unlikely(!buflen))
P
Per Liden 已提交
1422 1423
		return -EINVAL;

1424
	lock_sock(sk);
P
Per Liden 已提交
1425

1426
	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1427
		rc = -ENOTCONN;
Y
Ying Xue 已提交
1428
		goto exit;
P
Per Liden 已提交
1429
	}
1430 1431
	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1432

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
	do {
		/* Look at first msg in receive queue; wait if necessary */
		rc = tipc_wait_for_rcvmsg(sock, &timeout);
		if (unlikely(rc))
			break;
		skb = skb_peek(&sk->sk_receive_queue);
		skb_cb = TIPC_SKB_CB(skb);
		hdr = buf_msg(skb);
		dlen = msg_data_sz(hdr);
		hlen = msg_hdr_sz(hdr);
		err = msg_errcode(hdr);
1444

1445 1446 1447 1448 1449
		/* Discard any empty non-errored (SYN-) message */
		if (unlikely(!dlen && !err)) {
			tsk_advance_rx_queue(sk);
			continue;
		}
1450

1451 1452 1453 1454 1455 1456 1457
		/* Collect msg meta data, incl. error code and rejected data */
		if (!copied) {
			set_orig_addr(m, hdr);
			rc = tipc_sk_anc_data_recv(m, hdr, tsk);
			if (rc)
				break;
		}
P
Per Liden 已提交
1458

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		/* Copy data if msg ok, otherwise return error/partial data */
		if (likely(!err)) {
			offset = skb_cb->bytes_read;
			copy = min_t(int, dlen - offset, buflen - copied);
			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
			if (unlikely(rc))
				break;
			copied += copy;
			offset += copy;
			if (unlikely(offset < dlen)) {
				if (!peek)
					skb_cb->bytes_read = offset;
				break;
			}
		} else {
			rc = 0;
			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
				rc = -ECONNRESET;
			if (copied || rc)
				break;
P
Per Liden 已提交
1479 1480
		}

1481 1482
		if (unlikely(peek))
			break;
P
Per Liden 已提交
1483

1484
		tsk_advance_rx_queue(sk);
1485

1486 1487 1488 1489
		/* Send connection flow control advertisement when applicable */
		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
		if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
			tipc_sk_send_ack(tsk);
P
Per Liden 已提交
1490

1491 1492 1493
		/* Exit if all requested data or FIN/error received */
		if (copied == buflen || err)
			break;
P
Per Liden 已提交
1494

1495
	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
P
Per Liden 已提交
1496
exit:
1497
	release_sock(sk);
1498
	return copied ? copied : rc;
P
Per Liden 已提交
1499 1500
}

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
/**
 * tipc_write_space - wake up thread if port congestion is released
 * @sk: socket
 */
static void tipc_write_space(struct sock *sk)
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1511
	if (skwq_has_sleeper(wq))
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
						POLLWRNORM | POLLWRBAND);
	rcu_read_unlock();
}

/**
 * tipc_data_ready - wake up threads to indicate messages have been received
 * @sk: socket
 * @len: the length of messages
 */
1522
static void tipc_data_ready(struct sock *sk)
1523 1524 1525 1526 1527
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1528
	if (skwq_has_sleeper(wq))
1529 1530 1531 1532 1533
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
						POLLRDNORM | POLLRDBAND);
	rcu_read_unlock();
}

1534 1535 1536 1537 1538
static void tipc_sock_destruct(struct sock *sk)
{
	__skb_queue_purge(&sk->sk_receive_queue);
}

1539 1540
/**
 * filter_connect - Handle all incoming messages for a connection-based socket
1541
 * @tsk: TIPC socket
1542
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1543
 *
1544
 * Returns true if everything ok, false otherwise
1545
 */
1546
static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1547
{
1548
	struct sock *sk = &tsk->sk;
1549
	struct net *net = sock_net(sk);
1550
	struct tipc_msg *hdr = buf_msg(skb);
1551 1552
	u32 pport = msg_origport(hdr);
	u32 pnode = msg_orignode(hdr);
1553

1554 1555
	if (unlikely(msg_mcast(hdr)))
		return false;
1556

1557 1558
	switch (sk->sk_state) {
	case TIPC_CONNECTING:
1559
		/* Accept only ACK or NACK message */
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
		if (unlikely(!msg_connected(hdr))) {
			if (pport != tsk_peer_port(tsk) ||
			    pnode != tsk_peer_node(tsk))
				return false;

			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			sk->sk_err = ECONNREFUSED;
			sk->sk_state_change(sk);
			return true;
		}
1570

1571
		if (unlikely(msg_errcode(hdr))) {
1572
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1573
			sk->sk_err = ECONNREFUSED;
1574
			sk->sk_state_change(sk);
1575
			return true;
1576 1577
		}

1578
		if (unlikely(!msg_isdata(hdr))) {
1579
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1580
			sk->sk_err = EINVAL;
1581
			sk->sk_state_change(sk);
1582
			return true;
1583 1584
		}

1585 1586
		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
		msg_set_importance(&tsk->phdr, msg_importance(hdr));
1587

1588 1589 1590 1591 1592
		/* If 'ACK+' message, add to socket receive queue */
		if (msg_data_sz(hdr))
			return true;

		/* If empty 'ACK-' message, wake up sleeping connect() */
1593
		sk->sk_data_ready(sk);
1594 1595 1596 1597 1598

		/* 'ACK-' message is neither accepted nor rejected: */
		msg_set_dest_droppable(hdr, 1);
		return false;

1599
	case TIPC_OPEN:
1600
	case TIPC_DISCONNECTING:
1601 1602
		break;
	case TIPC_LISTEN:
1603
		/* Accept only SYN message */
1604 1605
		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
			return true;
1606
		break;
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
	case TIPC_ESTABLISHED:
		/* Accept only connection-based messages sent by peer */
		if (unlikely(!tsk_peer_msg(tsk, hdr)))
			return false;

		if (unlikely(msg_errcode(hdr))) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			/* Let timer expire on it's own */
			tipc_node_remove_conn(net, tsk_peer_node(tsk),
					      tsk->portid);
			sk->sk_state_change(sk);
		}
		return true;
1620
	default:
1621
		pr_err("Unknown sk_state %u\n", sk->sk_state);
1622
	}
1623

1624
	return false;
1625 1626
}

1627 1628 1629
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
1630
 * @skb: message
1631
 *
1632 1633
 * For connection oriented messages, irrespective of importance,
 * default queue limit is 2 MB.
1634
 *
1635 1636
 * For connectionless messages, queue limits are based on message
 * importance as follows:
1637
 *
1638 1639 1640 1641
 * TIPC_LOW_IMPORTANCE       (2 MB)
 * TIPC_MEDIUM_IMPORTANCE    (4 MB)
 * TIPC_HIGH_IMPORTANCE      (8 MB)
 * TIPC_CRITICAL_IMPORTANCE  (16 MB)
1642 1643 1644
 *
 * Returns overload limit according to corresponding message importance
 */
1645
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1646
{
1647 1648 1649 1650 1651
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = buf_msg(skb);

	if (unlikely(!msg_connected(hdr)))
		return sk->sk_rcvbuf << msg_importance(hdr);
1652

1653 1654
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return sk->sk_rcvbuf;
1655

1656
	return FLOWCTL_MSG_LIM;
1657 1658
}

1659
/**
1660 1661
 * filter_rcv - validate incoming message
 * @sk: socket
1662
 * @skb: pointer to message.
1663
 *
1664 1665 1666
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
1667
 * Called with socket lock already taken
1668
 *
1669
 * Returns true if message was added to socket receive queue, otherwise false
P
Per Liden 已提交
1670
 */
J
Jon Paul Maloy 已提交
1671 1672
static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
		       struct sk_buff_head *xmitq)
P
Per Liden 已提交
1673
{
1674
	struct tipc_sock *tsk = tipc_sk(sk);
1675 1676 1677
	struct tipc_msg *hdr = buf_msg(skb);
	unsigned int limit = rcvbuf_limit(sk, skb);
	int err = TIPC_OK;
1678

1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
	if (unlikely(!msg_isdata(hdr))) {
		switch (msg_user(hdr)) {
		case CONN_MANAGER:
			tipc_sk_proto_rcv(tsk, skb, xmitq);
			return false;
		case SOCK_WAKEUP:
			u32_del(&tsk->cong_links, msg_orignode(hdr));
			tsk->cong_link_cnt--;
			sk->sk_write_space(sk);
			break;
		case TOP_SRV:
			tipc_sk_top_evt(tsk, (void *)msg_data(hdr));
			break;
		default:
			break;
		}
1695 1696
		kfree_skb(skb);
		return false;
1697 1698
	}

1699 1700 1701 1702 1703
	/* Drop if illegal message type */
	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
		kfree_skb(skb);
		return false;
	}
1704

1705
	/* Reject if wrong message type for current socket state */
1706
	if (tipc_sk_type_connectionless(sk)) {
1707 1708 1709 1710 1711 1712 1713
		if (msg_connected(hdr)) {
			err = TIPC_ERR_NO_PORT;
			goto reject;
		}
	} else if (unlikely(!filter_connect(tsk, skb))) {
		err = TIPC_ERR_NO_PORT;
		goto reject;
P
Per Liden 已提交
1714 1715 1716
	}

	/* Reject message if there isn't room to queue it */
1717 1718 1719 1720
	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
		err = TIPC_ERR_OVERLOAD;
		goto reject;
	}
P
Per Liden 已提交
1721

1722
	/* Enqueue message */
1723
	TIPC_SKB_CB(skb)->bytes_read = 0;
1724 1725
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
1726

1727
	sk->sk_data_ready(sk);
1728 1729 1730
	return true;

reject:
J
Jon Paul Maloy 已提交
1731 1732
	if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
		__skb_queue_tail(xmitq, skb);
1733
	return false;
1734
}
P
Per Liden 已提交
1735

1736
/**
1737
 * tipc_backlog_rcv - handle incoming message from backlog queue
1738
 * @sk: socket
1739
 * @skb: message
1740
 *
1741
 * Caller must hold socket lock
1742 1743 1744
 *
 * Returns 0
 */
1745
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1746
{
1747
	unsigned int truesize = skb->truesize;
J
Jon Paul Maloy 已提交
1748 1749
	struct sk_buff_head xmitq;
	u32 dnode, selector;
1750

J
Jon Paul Maloy 已提交
1751 1752 1753
	__skb_queue_head_init(&xmitq);

	if (likely(filter_rcv(sk, skb, &xmitq))) {
1754
		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
J
Jon Paul Maloy 已提交
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
		return 0;
	}

	if (skb_queue_empty(&xmitq))
		return 0;

	/* Send response/rejected message */
	skb = __skb_dequeue(&xmitq);
	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1766 1767 1768
	return 0;
}

1769
/**
1770 1771 1772 1773 1774
 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
 *                   inputq and try adding them to socket or backlog queue
 * @inputq: list of incoming buffers with potentially different destinations
 * @sk: socket where the buffers should be enqueued
 * @dport: port number for the socket
1775 1776 1777
 *
 * Caller must hold socket lock
 */
1778
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
J
Jon Paul Maloy 已提交
1779
			    u32 dport, struct sk_buff_head *xmitq)
1780
{
J
Jon Paul Maloy 已提交
1781 1782
	unsigned long time_limit = jiffies + 2;
	struct sk_buff *skb;
1783 1784
	unsigned int lim;
	atomic_t *dcnt;
J
Jon Paul Maloy 已提交
1785
	u32 onode;
1786 1787

	while (skb_queue_len(inputq)) {
1788
		if (unlikely(time_after_eq(jiffies, time_limit)))
1789 1790
			return;

1791 1792
		skb = tipc_skb_dequeue(inputq, dport);
		if (unlikely(!skb))
1793 1794 1795
			return;

		/* Add message directly to receive queue if possible */
1796
		if (!sock_owned_by_user(sk)) {
J
Jon Paul Maloy 已提交
1797
			filter_rcv(sk, skb, xmitq);
1798
			continue;
1799
		}
1800 1801

		/* Try backlog, compensating for double-counted bytes */
1802
		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1803
		if (!sk->sk_backlog.len)
1804 1805 1806 1807
			atomic_set(dcnt, 0);
		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
		if (likely(!sk_add_backlog(sk, skb, lim)))
			continue;
1808 1809

		/* Overload => reject message back to sender */
J
Jon Paul Maloy 已提交
1810 1811 1812
		onode = tipc_own_addr(sock_net(sk));
		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
			__skb_queue_tail(xmitq, skb);
1813
		break;
1814
	}
1815 1816
}

1817
/**
1818 1819 1820 1821
 * tipc_sk_rcv - handle a chain of incoming buffers
 * @inputq: buffer list containing the buffers
 * Consumes all buffers in list until inputq is empty
 * Note: may be called in multiple threads referring to the same queue
1822
 */
1823
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1824
{
J
Jon Paul Maloy 已提交
1825
	struct sk_buff_head xmitq;
1826
	u32 dnode, dport = 0;
E
Erik Hugne 已提交
1827
	int err;
1828 1829
	struct tipc_sock *tsk;
	struct sock *sk;
1830
	struct sk_buff *skb;
1831

J
Jon Paul Maloy 已提交
1832
	__skb_queue_head_init(&xmitq);
1833 1834 1835
	while (skb_queue_len(inputq)) {
		dport = tipc_skb_peek_port(inputq, dport);
		tsk = tipc_sk_lookup(net, dport);
1836

1837 1838 1839
		if (likely(tsk)) {
			sk = &tsk->sk;
			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
J
Jon Paul Maloy 已提交
1840
				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1841 1842
				spin_unlock_bh(&sk->sk_lock.slock);
			}
J
Jon Paul Maloy 已提交
1843 1844 1845 1846 1847
			/* Send pending response/rejected messages, if any */
			while ((skb = __skb_dequeue(&xmitq))) {
				dnode = msg_destnode(buf_msg(skb));
				tipc_node_xmit_skb(net, skb, dnode, dport);
			}
1848 1849 1850
			sock_put(sk);
			continue;
		}
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863

		/* No destination socket => dequeue skb if still there */
		skb = tipc_skb_dequeue(inputq, dport);
		if (!skb)
			return;

		/* Try secondary lookup if unresolved named message */
		err = TIPC_ERR_NO_PORT;
		if (tipc_msg_lookup_dest(net, skb, &err))
			goto xmit;

		/* Prepare for message rejection */
		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1864
			continue;
1865
xmit:
1866
		dnode = msg_destnode(buf_msg(skb));
1867
		tipc_node_xmit_skb(net, skb, dnode, dport);
1868
	}
P
Per Liden 已提交
1869 1870
}

Y
Ying Xue 已提交
1871 1872
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
W
WANG Cong 已提交
1873
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
Y
Ying Xue 已提交
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
	struct sock *sk = sock->sk;
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (!*timeo_p)
			return -ETIMEDOUT;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

W
WANG Cong 已提交
1886
		add_wait_queue(sk_sleep(sk), &wait);
1887
		done = sk_wait_event(sk, timeo_p,
W
WANG Cong 已提交
1888 1889
				     sk->sk_state != TIPC_CONNECTING, &wait);
		remove_wait_queue(sk_sleep(sk), &wait);
Y
Ying Xue 已提交
1890 1891 1892 1893
	} while (!done);
	return 0;
}

P
Per Liden 已提交
1894
/**
1895
 * tipc_connect - establish a connection to another TIPC port
P
Per Liden 已提交
1896 1897 1898
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
1899
 * @flags: file-related flags associated with socket
P
Per Liden 已提交
1900 1901 1902
 *
 * Returns 0 on success, errno otherwise
 */
1903 1904
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
			int destlen, int flags)
P
Per Liden 已提交
1905
{
1906
	struct sock *sk = sock->sk;
1907
	struct tipc_sock *tsk = tipc_sk(sk);
1908 1909
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
1910
	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1911
	int previous;
1912
	int res = 0;
1913

1914 1915 1916
	if (destlen != sizeof(struct sockaddr_tipc))
		return -EINVAL;

1917 1918
	lock_sock(sk);

1919 1920 1921
	if (dst->family == AF_UNSPEC) {
		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
		if (!tipc_sk_type_connectionless(sk))
1922
			res = -EINVAL;
1923
		goto exit;
1924 1925
	} else if (dst->family != AF_TIPC) {
		res = -EINVAL;
1926
	}
1927
	if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
1928
		res = -EINVAL;
1929 1930 1931 1932 1933 1934
	if (res)
		goto exit;

	/* DGRAM/RDM connect(), just save the destaddr */
	if (tipc_sk_type_connectionless(sk)) {
		memcpy(&tsk->peer, dest, destlen);
1935 1936 1937
		goto exit;
	}

1938
	previous = sk->sk_state;
1939 1940 1941

	switch (sk->sk_state) {
	case TIPC_OPEN:
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951
		/* Send a 'SYN-' to destination */
		m.msg_name = dest;
		m.msg_namelen = destlen;

		/* If connect is in non-blocking case, set MSG_DONTWAIT to
		 * indicate send_msg() is never blocked.
		 */
		if (!timeout)
			m.msg_flags = MSG_DONTWAIT;

1952
		res = __tipc_sendmsg(sock, &m, 0);
1953 1954 1955
		if ((res < 0) && (res != -EWOULDBLOCK))
			goto exit;

1956
		/* Just entered TIPC_CONNECTING state; the only
1957 1958 1959 1960
		 * difference is that return value in non-blocking
		 * case is EINPROGRESS, rather than EALREADY.
		 */
		res = -EINPROGRESS;
1961 1962 1963 1964 1965
		/* fall thru' */
	case TIPC_CONNECTING:
		if (!timeout) {
			if (previous == TIPC_CONNECTING)
				res = -EALREADY;
Y
Ying Xue 已提交
1966
			goto exit;
1967
		}
Y
Ying Xue 已提交
1968 1969 1970
		timeout = msecs_to_jiffies(timeout);
		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
		res = tipc_wait_for_connect(sock, &timeout);
1971 1972
		break;
	case TIPC_ESTABLISHED:
1973
		res = -EISCONN;
1974 1975
		break;
	default:
1976
		res = -EINVAL;
1977
	}
1978

1979 1980
exit:
	release_sock(sk);
1981
	return res;
P
Per Liden 已提交
1982 1983
}

1984
/**
1985
 * tipc_listen - allow socket to listen for incoming connections
P
Per Liden 已提交
1986 1987
 * @sock: socket structure
 * @len: (unused)
1988
 *
P
Per Liden 已提交
1989 1990
 * Returns 0 on success, errno otherwise
 */
1991
static int tipc_listen(struct socket *sock, int len)
P
Per Liden 已提交
1992
{
1993 1994 1995 1996
	struct sock *sk = sock->sk;
	int res;

	lock_sock(sk);
1997
	res = tipc_set_sk_state(sk, TIPC_LISTEN);
1998
	release_sock(sk);
1999

2000
	return res;
P
Per Liden 已提交
2001 2002
}

Y
Ying Xue 已提交
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int err;

	/* True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	*/
	for (;;) {
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
					  TASK_INTERRUPTIBLE);
2017
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
2028 2029 2030
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
2031 2032 2033 2034 2035
	}
	finish_wait(sk_sleep(sk), &wait);
	return err;
}

2036
/**
2037
 * tipc_accept - wait for connection request
P
Per Liden 已提交
2038 2039 2040
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
2041
 *
P
Per Liden 已提交
2042 2043
 * Returns 0 on success, errno otherwise
 */
2044 2045
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
		       bool kern)
P
Per Liden 已提交
2046
{
2047
	struct sock *new_sk, *sk = sock->sk;
P
Per Liden 已提交
2048
	struct sk_buff *buf;
2049
	struct tipc_sock *new_tsock;
2050
	struct tipc_msg *msg;
Y
Ying Xue 已提交
2051
	long timeo;
2052
	int res;
P
Per Liden 已提交
2053

2054
	lock_sock(sk);
P
Per Liden 已提交
2055

2056
	if (sk->sk_state != TIPC_LISTEN) {
2057
		res = -EINVAL;
P
Per Liden 已提交
2058 2059
		goto exit;
	}
Y
Ying Xue 已提交
2060 2061 2062 2063
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	res = tipc_wait_for_accept(sock, timeo);
	if (res)
		goto exit;
2064 2065 2066

	buf = skb_peek(&sk->sk_receive_queue);

2067
	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2068 2069
	if (res)
		goto exit;
2070
	security_sk_clone(sock->sk, new_sock->sk);
P
Per Liden 已提交
2071

2072
	new_sk = new_sock->sk;
2073
	new_tsock = tipc_sk(new_sk);
2074
	msg = buf_msg(buf);
P
Per Liden 已提交
2075

2076 2077 2078 2079 2080 2081 2082
	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
2083
	tsk_rej_rx_queue(new_sk);
2084 2085

	/* Connect new socket to it's peer */
2086
	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2087

2088
	tsk_set_importance(new_tsock, msg_importance(msg));
2089
	if (msg_named(msg)) {
2090 2091
		new_tsock->conn_type = msg_nametype(msg);
		new_tsock->conn_instance = msg_nameinst(msg);
P
Per Liden 已提交
2092
	}
2093 2094 2095 2096 2097 2098 2099 2100

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

2101
		tsk_advance_rx_queue(sk);
2102
		__tipc_sendstream(new_sock, &m, 0);
2103 2104 2105
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2106
		skb_set_owner_r(buf, new_sk);
2107 2108
	}
	release_sock(new_sk);
P
Per Liden 已提交
2109
exit:
2110
	release_sock(sk);
P
Per Liden 已提交
2111 2112 2113 2114
	return res;
}

/**
2115
 * tipc_shutdown - shutdown socket connection
P
Per Liden 已提交
2116
 * @sock: socket structure
2117
 * @how: direction to close (must be SHUT_RDWR)
P
Per Liden 已提交
2118 2119
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
2120
 *
P
Per Liden 已提交
2121 2122
 * Returns 0 on success, errno otherwise
 */
2123
static int tipc_shutdown(struct socket *sock, int how)
P
Per Liden 已提交
2124
{
2125
	struct sock *sk = sock->sk;
P
Per Liden 已提交
2126 2127
	int res;

2128 2129
	if (how != SHUT_RDWR)
		return -EINVAL;
P
Per Liden 已提交
2130

2131
	lock_sock(sk);
P
Per Liden 已提交
2132

2133 2134
	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
	sk->sk_shutdown = SEND_SHUTDOWN;
P
Per Liden 已提交
2135

2136
	if (sk->sk_state == TIPC_DISCONNECTING) {
2137
		/* Discard any unreceived messages */
2138
		__skb_queue_purge(&sk->sk_receive_queue);
2139 2140 2141

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
P
Per Liden 已提交
2142
		res = 0;
2143
	} else {
P
Per Liden 已提交
2144 2145 2146
		res = -ENOTCONN;
	}

2147
	release_sock(sk);
P
Per Liden 已提交
2148 2149 2150
	return res;
}

2151
static void tipc_sk_timeout(unsigned long data)
2152
{
2153 2154
	struct tipc_sock *tsk = (struct tipc_sock *)data;
	struct sock *sk = &tsk->sk;
2155
	struct sk_buff *skb = NULL;
2156
	u32 peer_port, peer_node;
2157
	u32 own_node = tsk_own_node(tsk);
2158

J
Jon Paul Maloy 已提交
2159
	bh_lock_sock(sk);
2160
	if (!tipc_sk_connected(sk)) {
J
Jon Paul Maloy 已提交
2161 2162
		bh_unlock_sock(sk);
		goto exit;
2163
	}
2164 2165
	peer_port = tsk_peer_port(tsk);
	peer_node = tsk_peer_node(tsk);
2166

2167
	if (tsk->probe_unacked) {
2168
		if (!sock_owned_by_user(sk)) {
2169
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2170 2171 2172 2173 2174 2175 2176 2177
			tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
					      tsk_peer_port(tsk));
			sk->sk_state_change(sk);
		} else {
			/* Try again later */
			sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
		}

2178 2179
		bh_unlock_sock(sk);
		goto exit;
2180
	}
2181 2182 2183 2184

	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
			      INT_H_SIZE, 0, peer_node, own_node,
			      peer_port, tsk->portid, TIPC_OK);
2185
	tsk->probe_unacked = true;
2186
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2187
	bh_unlock_sock(sk);
2188
	if (skb)
2189
		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
J
Jon Paul Maloy 已提交
2190
exit:
2191
	sock_put(sk);
2192 2193
}

2194
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2195 2196
			   struct tipc_name_seq const *seq)
{
2197 2198
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
J
Jon Paul Maloy 已提交
2199 2200 2201
	struct publication *publ;
	u32 key;

2202
	if (tipc_sk_connected(sk))
J
Jon Paul Maloy 已提交
2203
		return -EINVAL;
2204 2205
	key = tsk->portid + tsk->pub_count + 1;
	if (key == tsk->portid)
J
Jon Paul Maloy 已提交
2206 2207
		return -EADDRINUSE;

2208
	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2209
				    scope, tsk->portid, key);
J
Jon Paul Maloy 已提交
2210 2211 2212
	if (unlikely(!publ))
		return -EINVAL;

2213 2214 2215
	list_add(&publ->pport_list, &tsk->publications);
	tsk->pub_count++;
	tsk->published = 1;
J
Jon Paul Maloy 已提交
2216 2217 2218
	return 0;
}

2219
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2220 2221
			    struct tipc_name_seq const *seq)
{
2222
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2223 2224 2225 2226
	struct publication *publ;
	struct publication *safe;
	int rc = -EINVAL;

2227
	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
J
Jon Paul Maloy 已提交
2228 2229 2230 2231 2232 2233 2234 2235 2236
		if (seq) {
			if (publ->scope != scope)
				continue;
			if (publ->type != seq->type)
				continue;
			if (publ->lower != seq->lower)
				continue;
			if (publ->upper != seq->upper)
				break;
2237
			tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2238 2239 2240 2241
					      publ->ref, publ->key);
			rc = 0;
			break;
		}
2242
		tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2243 2244 2245
				      publ->ref, publ->key);
		rc = 0;
	}
2246 2247
	if (list_empty(&tsk->publications))
		tsk->published = 0;
J
Jon Paul Maloy 已提交
2248 2249 2250
	return rc;
}

2251 2252 2253
/* tipc_sk_reinit: set non-zero address in all existing sockets
 *                 when we go from standalone to network mode.
 */
2254
void tipc_sk_reinit(struct net *net)
2255
{
2256
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2257
	struct rhashtable_iter iter;
2258
	struct tipc_sock *tsk;
2259 2260
	struct tipc_msg *msg;

2261 2262 2263 2264
	rhashtable_walk_enter(&tn->sk_rht, &iter);

	do {
		tsk = ERR_PTR(rhashtable_walk_start(&iter));
2265 2266
		if (IS_ERR(tsk))
			goto walk_stop;
2267 2268

		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2269 2270
			spin_lock_bh(&tsk->sk.sk_lock.slock);
			msg = &tsk->phdr;
2271 2272
			msg_set_prevnode(msg, tn->own_addr);
			msg_set_orignode(msg, tn->own_addr);
2273 2274
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2275
walk_stop:
2276 2277
		rhashtable_walk_stop(&iter);
	} while (tsk == ERR_PTR(-EAGAIN));
2278 2279
}

2280
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2281
{
2282
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2283
	struct tipc_sock *tsk;
2284

2285
	rcu_read_lock();
2286
	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2287 2288 2289
	if (tsk)
		sock_hold(&tsk->sk);
	rcu_read_unlock();
2290

2291
	return tsk;
2292 2293
}

2294
static int tipc_sk_insert(struct tipc_sock *tsk)
2295
{
2296 2297 2298
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2299 2300
	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2301

2302 2303 2304 2305 2306 2307
	while (remaining--) {
		portid++;
		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
			portid = TIPC_MIN_PORT;
		tsk->portid = portid;
		sock_hold(&tsk->sk);
2308 2309
		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
						   tsk_rht_params))
2310 2311
			return 0;
		sock_put(&tsk->sk);
2312 2313
	}

2314
	return -1;
2315 2316
}

2317
static void tipc_sk_remove(struct tipc_sock *tsk)
2318
{
2319
	struct sock *sk = &tsk->sk;
2320
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2321

2322
	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2323
		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2324
		__sock_put(sk);
2325 2326 2327
	}
}

2328 2329 2330 2331 2332 2333 2334
static const struct rhashtable_params tsk_rht_params = {
	.nelem_hint = 192,
	.head_offset = offsetof(struct tipc_sock, node),
	.key_offset = offsetof(struct tipc_sock, portid),
	.key_len = sizeof(u32), /* portid */
	.max_size = 1048576,
	.min_size = 256,
2335
	.automatic_shrinking = true,
2336 2337
};

2338
int tipc_sk_rht_init(struct net *net)
2339
{
2340
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2341 2342

	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2343 2344
}

2345
void tipc_sk_rht_destroy(struct net *net)
2346
{
2347 2348
	struct tipc_net *tn = net_generic(net, tipc_net_id);

2349 2350
	/* Wait for socket readers to complete */
	synchronize_net();
2351

2352
	rhashtable_destroy(&tn->sk_rht);
2353 2354
}

P
Per Liden 已提交
2355
/**
2356
 * tipc_setsockopt - set socket option
P
Per Liden 已提交
2357 2358 2359 2360 2361
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: pointer to new option value
 * @ol: length of option value
2362 2363
 *
 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
P
Per Liden 已提交
2364
 * (to ease compatibility).
2365
 *
P
Per Liden 已提交
2366 2367
 * Returns 0 on success, errno otherwise
 */
2368 2369
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, unsigned int ol)
P
Per Liden 已提交
2370
{
2371
	struct sock *sk = sock->sk;
2372
	struct tipc_sock *tsk = tipc_sk(sk);
2373
	u32 value = 0;
2374
	int res = 0;
P
Per Liden 已提交
2375

2376 2377
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return 0;
P
Per Liden 已提交
2378 2379
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395

	switch (opt) {
	case TIPC_IMPORTANCE:
	case TIPC_SRC_DROPPABLE:
	case TIPC_DEST_DROPPABLE:
	case TIPC_CONN_TIMEOUT:
		if (ol < sizeof(value))
			return -EINVAL;
		res = get_user(value, (u32 __user *)ov);
		if (res)
			return res;
		break;
	default:
		if (ov || ol)
			return -EINVAL;
	}
P
Per Liden 已提交
2396

2397
	lock_sock(sk);
2398

P
Per Liden 已提交
2399 2400
	switch (opt) {
	case TIPC_IMPORTANCE:
2401
		res = tsk_set_importance(tsk, value);
P
Per Liden 已提交
2402 2403 2404
		break;
	case TIPC_SRC_DROPPABLE:
		if (sock->type != SOCK_STREAM)
2405
			tsk_set_unreliable(tsk, value);
2406
		else
P
Per Liden 已提交
2407 2408 2409
			res = -ENOPROTOOPT;
		break;
	case TIPC_DEST_DROPPABLE:
2410
		tsk_set_unreturnable(tsk, value);
P
Per Liden 已提交
2411 2412
		break;
	case TIPC_CONN_TIMEOUT:
2413
		tipc_sk(sk)->conn_timeout = value;
P
Per Liden 已提交
2414
		break;
2415 2416 2417 2418 2419 2420 2421 2422
	case TIPC_MCAST_BROADCAST:
		tsk->mc_method.rcast = false;
		tsk->mc_method.mandatory = true;
		break;
	case TIPC_MCAST_REPLICAST:
		tsk->mc_method.rcast = true;
		tsk->mc_method.mandatory = true;
		break;
P
Per Liden 已提交
2423 2424 2425 2426
	default:
		res = -EINVAL;
	}

2427 2428
	release_sock(sk);

P
Per Liden 已提交
2429 2430 2431 2432
	return res;
}

/**
2433
 * tipc_getsockopt - get socket option
P
Per Liden 已提交
2434 2435 2436 2437 2438
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: receptacle for option value
 * @ol: receptacle for length of option value
2439 2440
 *
 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
P
Per Liden 已提交
2441
 * (to ease compatibility).
2442
 *
P
Per Liden 已提交
2443 2444
 * Returns 0 on success, errno otherwise
 */
2445 2446
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, int __user *ol)
P
Per Liden 已提交
2447
{
2448
	struct sock *sk = sock->sk;
2449
	struct tipc_sock *tsk = tipc_sk(sk);
2450
	int len;
P
Per Liden 已提交
2451
	u32 value;
2452
	int res;
P
Per Liden 已提交
2453

2454 2455
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return put_user(0, ol);
P
Per Liden 已提交
2456 2457
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2458 2459
	res = get_user(len, ol);
	if (res)
2460
		return res;
P
Per Liden 已提交
2461

2462
	lock_sock(sk);
P
Per Liden 已提交
2463 2464 2465

	switch (opt) {
	case TIPC_IMPORTANCE:
2466
		value = tsk_importance(tsk);
P
Per Liden 已提交
2467 2468
		break;
	case TIPC_SRC_DROPPABLE:
2469
		value = tsk_unreliable(tsk);
P
Per Liden 已提交
2470 2471
		break;
	case TIPC_DEST_DROPPABLE:
2472
		value = tsk_unreturnable(tsk);
P
Per Liden 已提交
2473 2474
		break;
	case TIPC_CONN_TIMEOUT:
2475
		value = tsk->conn_timeout;
2476
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2477
		break;
2478
	case TIPC_NODE_RECVQ_DEPTH:
2479
		value = 0; /* was tipc_queue_size, now obsolete */
2480
		break;
2481
	case TIPC_SOCK_RECVQ_DEPTH:
2482 2483
		value = skb_queue_len(&sk->sk_receive_queue);
		break;
P
Per Liden 已提交
2484 2485 2486 2487
	default:
		res = -EINVAL;
	}

2488 2489
	release_sock(sk);

2490 2491
	if (res)
		return res;	/* "get" failed */
P
Per Liden 已提交
2492

2493 2494 2495 2496 2497 2498 2499
	if (len < sizeof(value))
		return -EINVAL;

	if (copy_to_user(ov, &value, sizeof(value)))
		return -EFAULT;

	return put_user(sizeof(value), ol);
P
Per Liden 已提交
2500 2501
}

2502
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
E
Erik Hugne 已提交
2503
{
2504
	struct sock *sk = sock->sk;
E
Erik Hugne 已提交
2505 2506 2507 2508 2509 2510 2511
	struct tipc_sioc_ln_req lnr;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case SIOCGETLINKNAME:
		if (copy_from_user(&lnr, argp, sizeof(lnr)))
			return -EFAULT;
2512 2513
		if (!tipc_node_get_linkname(sock_net(sk),
					    lnr.bearer_id & 0xffff, lnr.peer,
E
Erik Hugne 已提交
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
			if (copy_to_user(argp, &lnr, sizeof(lnr)))
				return -EFAULT;
			return 0;
		}
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}

2525 2526 2527 2528
static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
{
	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
E
Erik Hugne 已提交
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543
	u32 onode = tipc_own_addr(sock_net(sock1->sk));

	tsk1->peer.family = AF_TIPC;
	tsk1->peer.addrtype = TIPC_ADDR_ID;
	tsk1->peer.scope = TIPC_NODE_SCOPE;
	tsk1->peer.addr.id.ref = tsk2->portid;
	tsk1->peer.addr.id.node = onode;
	tsk2->peer.family = AF_TIPC;
	tsk2->peer.addrtype = TIPC_ADDR_ID;
	tsk2->peer.scope = TIPC_NODE_SCOPE;
	tsk2->peer.addr.id.ref = tsk1->portid;
	tsk2->peer.addr.id.node = onode;

	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
2544 2545 2546
	return 0;
}

2547 2548
/* Protocol switches for the various types of TIPC sockets */

2549
static const struct proto_ops msg_ops = {
2550
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2551
	.family		= AF_TIPC,
2552 2553 2554
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
E
Erik Hugne 已提交
2555
	.socketpair	= tipc_socketpair,
2556
	.accept		= sock_no_accept,
2557 2558
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2559
	.ioctl		= tipc_ioctl,
2560
	.listen		= sock_no_listen,
2561 2562 2563 2564 2565
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_sendmsg,
	.recvmsg	= tipc_recvmsg,
2566 2567
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2568 2569
};

2570
static const struct proto_ops packet_ops = {
2571
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2572
	.family		= AF_TIPC,
2573 2574 2575
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2576
	.socketpair	= tipc_socketpair,
2577 2578 2579
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2580
	.ioctl		= tipc_ioctl,
2581 2582 2583 2584 2585 2586
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_packet,
	.recvmsg	= tipc_recvmsg,
2587 2588
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2589 2590
};

2591
static const struct proto_ops stream_ops = {
2592
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2593
	.family		= AF_TIPC,
2594 2595 2596
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2597
	.socketpair	= tipc_socketpair,
2598 2599 2600
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2601
	.ioctl		= tipc_ioctl,
2602 2603 2604 2605
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
2606
	.sendmsg	= tipc_sendstream,
2607
	.recvmsg	= tipc_recvstream,
2608 2609
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2610 2611
};

2612
static const struct net_proto_family tipc_family_ops = {
2613
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2614
	.family		= AF_TIPC,
2615
	.create		= tipc_sk_create
P
Per Liden 已提交
2616 2617 2618 2619 2620
};

static struct proto tipc_proto = {
	.name		= "TIPC",
	.owner		= THIS_MODULE,
2621 2622
	.obj_size	= sizeof(struct tipc_sock),
	.sysctl_rmem	= sysctl_tipc_rmem
P
Per Liden 已提交
2623 2624 2625
};

/**
2626
 * tipc_socket_init - initialize TIPC socket interface
2627
 *
P
Per Liden 已提交
2628 2629
 * Returns 0 on success, errno otherwise
 */
2630
int tipc_socket_init(void)
P
Per Liden 已提交
2631 2632 2633
{
	int res;

2634
	res = proto_register(&tipc_proto, 1);
P
Per Liden 已提交
2635
	if (res) {
2636
		pr_err("Failed to register TIPC protocol type\n");
P
Per Liden 已提交
2637 2638 2639 2640 2641
		goto out;
	}

	res = sock_register(&tipc_family_ops);
	if (res) {
2642
		pr_err("Failed to register TIPC socket type\n");
P
Per Liden 已提交
2643 2644 2645 2646 2647 2648 2649 2650
		proto_unregister(&tipc_proto);
		goto out;
	}
 out:
	return res;
}

/**
2651
 * tipc_socket_stop - stop TIPC socket interface
P
Per Liden 已提交
2652
 */
2653
void tipc_socket_stop(void)
P
Per Liden 已提交
2654 2655 2656 2657
{
	sock_unregister(tipc_family_ops.family);
	proto_unregister(&tipc_proto);
}
2658 2659

/* Caller should hold socket lock for the passed tipc socket. */
2660
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
{
	u32 peer_node;
	u32 peer_port;
	struct nlattr *nest;

	peer_node = tsk_peer_node(tsk);
	peer_port = tsk_peer_port(tsk);

	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);

	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
		goto msg_full;
	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
		goto msg_full;

	if (tsk->conn_type != 0) {
		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
			goto msg_full;
	}
	nla_nest_end(skb, nest);

	return 0;

msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2695 2696
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
			    struct tipc_sock *tsk)
2697 2698 2699 2700
{
	int err;
	void *hdr;
	struct nlattr *attrs;
2701 2702
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2703
	struct sock *sk = &tsk->sk;
2704 2705

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2706
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2707 2708 2709 2710 2711 2712
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
	if (!attrs)
		goto genlmsg_cancel;
2713
	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2714
		goto attr_msg_cancel;
2715
	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2716 2717
		goto attr_msg_cancel;

2718
	if (tipc_sk_connected(sk)) {
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
		err = __tipc_nl_add_sk_con(skb, tsk);
		if (err)
			goto attr_msg_cancel;
	} else if (!list_empty(&tsk->publications)) {
		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
			goto attr_msg_cancel;
	}
	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
	struct tipc_sock *tsk;
2743 2744
	const struct bucket_table *tbl;
	struct rhash_head *pos;
2745 2746
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2747 2748
	u32 tbl_id = cb->args[0];
	u32 prev_portid = cb->args[1];
2749

2750
	rcu_read_lock();
2751
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2752 2753
	for (; tbl_id < tbl->size; tbl_id++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2754
			spin_lock_bh(&tsk->sk.sk_lock.slock);
2755 2756 2757 2758 2759
			if (prev_portid && prev_portid != tsk->portid) {
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				continue;
			}

2760
			err = __tipc_nl_add_sk(skb, cb, tsk);
2761 2762 2763 2764 2765 2766
			if (err) {
				prev_portid = tsk->portid;
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				goto out;
			}
			prev_portid = 0;
2767 2768
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2769
	}
2770
out:
2771
	rcu_read_unlock();
2772 2773
	cb->args[0] = tbl_id;
	cb->args[1] = prev_portid;
2774 2775 2776

	return skb->len;
}
2777 2778

/* Caller should hold socket lock for the passed tipc socket. */
2779 2780 2781
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 struct publication *publ)
2782 2783 2784 2785 2786
{
	void *hdr;
	struct nlattr *attrs;

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2787
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
	if (!attrs)
		goto genlmsg_cancel;

	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
		goto attr_msg_cancel;

	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2818 2819 2820
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
				  struct netlink_callback *cb,
				  struct tipc_sock *tsk, u32 *last_publ)
2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
{
	int err;
	struct publication *p;

	if (*last_publ) {
		list_for_each_entry(p, &tsk->publications, pport_list) {
			if (p->key == *last_publ)
				break;
		}
		if (p->key != *last_publ) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			*last_publ = 0;
			return -EPIPE;
		}
	} else {
		p = list_first_entry(&tsk->publications, struct publication,
				     pport_list);
	}

	list_for_each_entry_from(p, &tsk->publications, pport_list) {
		err = __tipc_nl_add_sk_publ(skb, cb, p);
		if (err) {
			*last_publ = p->key;
			return err;
		}
	}
	*last_publ = 0;

	return 0;
}

int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
2861
	u32 tsk_portid = cb->args[0];
2862 2863
	u32 last_publ = cb->args[1];
	u32 done = cb->args[2];
2864
	struct net *net = sock_net(skb->sk);
2865 2866
	struct tipc_sock *tsk;

2867
	if (!tsk_portid) {
2868 2869 2870 2871 2872 2873 2874
		struct nlattr **attrs;
		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];

		err = tipc_nlmsg_parse(cb->nlh, &attrs);
		if (err)
			return err;

2875 2876 2877
		if (!attrs[TIPC_NLA_SOCK])
			return -EINVAL;

2878 2879
		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
				       attrs[TIPC_NLA_SOCK],
2880
				       tipc_nl_sock_policy, NULL);
2881 2882 2883 2884 2885 2886
		if (err)
			return err;

		if (!sock[TIPC_NLA_SOCK_REF])
			return -EINVAL;

2887
		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2888 2889 2890 2891 2892
	}

	if (done)
		return 0;

2893
	tsk = tipc_sk_lookup(net, tsk_portid);
2894 2895 2896 2897 2898 2899 2900 2901
	if (!tsk)
		return -EINVAL;

	lock_sock(&tsk->sk);
	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
	if (!err)
		done = 1;
	release_sock(&tsk->sk);
2902
	sock_put(&tsk->sk);
2903

2904
	cb->args[0] = tsk_portid;
2905 2906 2907 2908 2909
	cb->args[1] = last_publ;
	cb->args[2] = done;

	return skb->len;
}