socket.c 73.5 KB
Newer Older
P
Per Liden 已提交
1
/*
2
 * net/tipc/socket.c: TIPC socket API
3
 *
4
 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5
 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37
#include <linux/rhashtable.h>
38 39
#include <linux/sched/signal.h>

P
Per Liden 已提交
40
#include "core.h"
41
#include "name_table.h"
E
Erik Hugne 已提交
42
#include "node.h"
43
#include "link.h"
44
#include "name_distr.h"
45
#include "socket.h"
46
#include "bcast.h"
47
#include "netlink.h"
48

49
#define CONN_TIMEOUT_DEFAULT	8000	/* default connect timeout = 8s */
50
#define CONN_PROBING_INTERVAL	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
51 52 53
#define TIPC_FWD_MSG		1
#define TIPC_MAX_PORT		0xffffffff
#define TIPC_MIN_PORT		1
54
#define TIPC_ACK_RATE		4       /* ACK at 1/4 of of rcv window size */
55

56 57
enum {
	TIPC_LISTEN = TCP_LISTEN,
58
	TIPC_ESTABLISHED = TCP_ESTABLISHED,
59
	TIPC_OPEN = TCP_CLOSE,
60
	TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
61
	TIPC_CONNECTING = TCP_SYN_SENT,
62 63
};

64 65 66 67 68 69 70
/**
 * struct tipc_sock - TIPC socket structure
 * @sk: socket - interacts with 'port' and with user via the socket API
 * @conn_type: TIPC type used when connection was established
 * @conn_instance: TIPC instance used when connection was established
 * @published: non-zero if port has one or more associated names
 * @max_pkt: maximum packet size "hint" used when building messages sent by port
71
 * @portid: unique port identity in TIPC socket hash table
72
 * @phdr: preformatted message header used when sending messages
73
 * #cong_links: list of congested links
74
 * @publications: list of publications for port
75
 * @blocking_link: address of the congested link we are currently sleeping on
76 77 78 79
 * @pub_count: total # of publications port has made during its lifetime
 * @probing_state:
 * @conn_timeout: the time we can wait for an unresponded setup request
 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
80
 * @cong_link_cnt: number of congested links
81 82
 * @sent_unacked: # messages sent by socket, and not yet acked by peer
 * @rcv_unacked: # messages read by user, but not yet acked back to peer
83
 * @peer: 'connected' peer for dgram/rdm
84
 * @node: hash table node
85
 * @mc_method: cookie for use between socket and broadcast layer
86
 * @rcu: rcu struct for tipc_sock
87 88 89 90 91 92 93
 */
struct tipc_sock {
	struct sock sk;
	u32 conn_type;
	u32 conn_instance;
	int published;
	u32 max_pkt;
94
	u32 portid;
95
	struct tipc_msg phdr;
96
	struct list_head cong_links;
97 98 99 100
	struct list_head publications;
	u32 pub_count;
	uint conn_timeout;
	atomic_t dupl_rcvcnt;
101
	bool probe_unacked;
102
	u16 cong_link_cnt;
103 104
	u16 snt_unacked;
	u16 snd_win;
105
	u16 peer_caps;
106 107
	u16 rcv_unacked;
	u16 rcv_win;
108
	struct sockaddr_tipc peer;
109
	struct rhash_head node;
110
	struct tipc_mc_method mc_method;
111
	struct rcu_head rcu;
112
};
P
Per Liden 已提交
113

J
Jon Maloy 已提交
114
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
115
static void tipc_data_ready(struct sock *sk);
116
static void tipc_write_space(struct sock *sk);
117
static void tipc_sock_destruct(struct sock *sk);
118
static int tipc_release(struct socket *sock);
119 120
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
		       bool kern);
121
static void tipc_sk_timeout(unsigned long data);
122
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
123
			   struct tipc_name_seq const *seq);
124
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
125
			    struct tipc_name_seq const *seq);
126
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
127 128
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
129
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
130
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
P
Per Liden 已提交
131

132 133 134
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
P
Per Liden 已提交
135
static struct proto tipc_proto;
136 137
static const struct rhashtable_params tsk_rht_params;

138 139 140 141 142
static u32 tsk_own_node(struct tipc_sock *tsk)
{
	return msg_prevnode(&tsk->phdr);
}

143
static u32 tsk_peer_node(struct tipc_sock *tsk)
144
{
145
	return msg_destnode(&tsk->phdr);
146 147
}

148
static u32 tsk_peer_port(struct tipc_sock *tsk)
149
{
150
	return msg_destport(&tsk->phdr);
151 152
}

153
static  bool tsk_unreliable(struct tipc_sock *tsk)
154
{
155
	return msg_src_droppable(&tsk->phdr) != 0;
156 157
}

158
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
159
{
160
	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
161 162
}

163
static bool tsk_unreturnable(struct tipc_sock *tsk)
164
{
165
	return msg_dest_droppable(&tsk->phdr) != 0;
166 167
}

168
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
169
{
170
	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
171 172
}

173
static int tsk_importance(struct tipc_sock *tsk)
174
{
175
	return msg_importance(&tsk->phdr);
176 177
}

178
static int tsk_set_importance(struct tipc_sock *tsk, int imp)
179 180 181
{
	if (imp > TIPC_CRITICAL_IMPORTANCE)
		return -EINVAL;
182
	msg_set_importance(&tsk->phdr, (u32)imp);
183 184
	return 0;
}
185

186 187 188 189 190
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
	return container_of(sk, struct tipc_sock, sk);
}

191
static bool tsk_conn_cong(struct tipc_sock *tsk)
192
{
193
	return tsk->snt_unacked > tsk->snd_win;
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
}

/* tsk_blocks(): translate a buffer size in bytes to number of
 * advertisable blocks, taking into account the ratio truesize(len)/len
 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
 */
static u16 tsk_adv_blocks(int len)
{
	return len / FLOWCTL_BLK_SZ / 4;
}

/* tsk_inc(): increment counter for sent or received data
 * - If block based flow control is not supported by peer we
 *   fall back to message based ditto, incrementing the counter
 */
static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
{
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return ((msglen / FLOWCTL_BLK_SZ) + 1);
	return 1;
214 215
}

216
/**
217
 * tsk_advance_rx_queue - discard first buffer in socket receive queue
218 219
 *
 * Caller must hold socket lock
P
Per Liden 已提交
220
 */
221
static void tsk_advance_rx_queue(struct sock *sk)
P
Per Liden 已提交
222
{
223
	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
P
Per Liden 已提交
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
/* tipc_sk_respond() : send response message back to sender
 */
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
	u32 selector;
	u32 dnode;
	u32 onode = tipc_own_addr(sock_net(sk));

	if (!tipc_msg_reverse(onode, &skb, err))
		return;

	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}

P
Per Liden 已提交
242
/**
243
 * tsk_rej_rx_queue - reject all buffers in socket receive queue
244 245
 *
 * Caller must hold socket lock
P
Per Liden 已提交
246
 */
247
static void tsk_rej_rx_queue(struct sock *sk)
P
Per Liden 已提交
248
{
249
	struct sk_buff *skb;
250

251 252
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
P
Per Liden 已提交
253 254
}

255 256
static bool tipc_sk_connected(struct sock *sk)
{
257
	return sk->sk_state == TIPC_ESTABLISHED;
258 259
}

260 261 262 263 264 265 266 267 268 269
/* tipc_sk_type_connectionless - check if the socket is datagram socket
 * @sk: socket
 *
 * Returns true if connection less, false otherwise
 */
static bool tipc_sk_type_connectionless(struct sock *sk)
{
	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
}

270
/* tsk_peer_msg - verify if message was sent by connected port's peer
J
Jon Paul Maloy 已提交
271 272 273 274
 *
 * Handles cases where the node's network address has changed from
 * the default of <0.0.0> to its configured setting.
 */
275
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
J
Jon Paul Maloy 已提交
276
{
277 278
	struct sock *sk = &tsk->sk;
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
279
	u32 peer_port = tsk_peer_port(tsk);
J
Jon Paul Maloy 已提交
280 281 282
	u32 orig_node;
	u32 peer_node;

283
	if (unlikely(!tipc_sk_connected(sk)))
J
Jon Paul Maloy 已提交
284 285 286 287 288 289
		return false;

	if (unlikely(msg_origport(msg) != peer_port))
		return false;

	orig_node = msg_orignode(msg);
290
	peer_node = tsk_peer_node(tsk);
J
Jon Paul Maloy 已提交
291 292 293 294

	if (likely(orig_node == peer_node))
		return true;

295
	if (!orig_node && (peer_node == tn->own_addr))
J
Jon Paul Maloy 已提交
296 297
		return true;

298
	if (!peer_node && (orig_node == tn->own_addr))
J
Jon Paul Maloy 已提交
299 300 301 302 303
		return true;

	return false;
}

304 305 306 307 308 309 310 311 312
/* tipc_set_sk_state - set the sk_state of the socket
 * @sk: socket
 *
 * Caller must hold socket lock
 *
 * Returns 0 on success, errno otherwise
 */
static int tipc_set_sk_state(struct sock *sk, int state)
{
313
	int oldsk_state = sk->sk_state;
314 315 316
	int res = -EINVAL;

	switch (state) {
317 318 319
	case TIPC_OPEN:
		res = 0;
		break;
320
	case TIPC_LISTEN:
321
	case TIPC_CONNECTING:
322
		if (oldsk_state == TIPC_OPEN)
323 324
			res = 0;
		break;
325
	case TIPC_ESTABLISHED:
326
		if (oldsk_state == TIPC_CONNECTING ||
327
		    oldsk_state == TIPC_OPEN)
328 329
			res = 0;
		break;
330
	case TIPC_DISCONNECTING:
331
		if (oldsk_state == TIPC_CONNECTING ||
332 333 334
		    oldsk_state == TIPC_ESTABLISHED)
			res = 0;
		break;
335 336 337 338 339 340 341 342
	}

	if (!res)
		sk->sk_state = state;

	return res;
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static int tipc_sk_sock_err(struct socket *sock, long *timeout)
{
	struct sock *sk = sock->sk;
	int err = sock_error(sk);
	int typ = sock->type;

	if (err)
		return err;
	if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
		if (sk->sk_state == TIPC_DISCONNECTING)
			return -EPIPE;
		else if (!tipc_sk_connected(sk))
			return -ENOTCONN;
	}
	if (!*timeout)
		return -EAGAIN;
	if (signal_pending(current))
		return sock_intr_errno(*timeout);

	return 0;
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
#define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
({                                                                             \
	struct sock *sk_;						       \
	int rc_;							       \
									       \
	while ((rc_ = !(condition_))) {					       \
		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
		sk_ = (sock_)->sk;					       \
		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
		if (rc_)						       \
			break;						       \
		prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE);    \
		release_sock(sk_);					       \
		*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
		sched_annotate_sleep();				               \
		lock_sock(sk_);						       \
		remove_wait_queue(sk_sleep(sk_), &wait_);		       \
	}								       \
	rc_;								       \
384 385
})

P
Per Liden 已提交
386
/**
387
 * tipc_sk_create - create a TIPC socket
388
 * @net: network namespace (must be default network)
P
Per Liden 已提交
389 390
 * @sock: pre-allocated socket structure
 * @protocol: protocol indicator (must be 0)
391
 * @kern: caused by kernel or by userspace?
392
 *
393 394
 * This routine creates additional data structures used by the TIPC socket,
 * initializes them, and links them together.
P
Per Liden 已提交
395 396 397
 *
 * Returns 0 on success, errno otherwise
 */
398 399
static int tipc_sk_create(struct net *net, struct socket *sock,
			  int protocol, int kern)
P
Per Liden 已提交
400
{
401
	struct tipc_net *tn;
402
	const struct proto_ops *ops;
P
Per Liden 已提交
403
	struct sock *sk;
404
	struct tipc_sock *tsk;
405
	struct tipc_msg *msg;
406 407

	/* Validate arguments */
P
Per Liden 已提交
408 409 410 411 412
	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
413
		ops = &stream_ops;
P
Per Liden 已提交
414 415
		break;
	case SOCK_SEQPACKET:
416
		ops = &packet_ops;
P
Per Liden 已提交
417 418 419
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
420
		ops = &msg_ops;
P
Per Liden 已提交
421
		break;
422 423
	default:
		return -EPROTOTYPE;
P
Per Liden 已提交
424 425
	}

426
	/* Allocate socket's protocol area */
427
	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
428
	if (sk == NULL)
P
Per Liden 已提交
429 430
		return -ENOMEM;

431
	tsk = tipc_sk(sk);
432 433
	tsk->max_pkt = MAX_PKT_DEFAULT;
	INIT_LIST_HEAD(&tsk->publications);
434
	INIT_LIST_HEAD(&tsk->cong_links);
435
	msg = &tsk->phdr;
436
	tn = net_generic(sock_net(sk), tipc_net_id);
P
Per Liden 已提交
437

438 439 440
	/* Finish initializing socket data structures */
	sock->ops = ops;
	sock_init_data(sock, sk);
441
	tipc_set_sk_state(sk, TIPC_OPEN);
442
	if (tipc_sk_insert(tsk)) {
M
Masanari Iida 已提交
443
		pr_warn("Socket create failed; port number exhausted\n");
444 445
		return -EINVAL;
	}
446 447 448 449 450 451 452

	/* Ensure tsk is visible before we read own_addr. */
	smp_mb();

	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
		      NAMED_H_SIZE, 0);

453
	msg_set_origport(msg, tsk->portid);
454
	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
455
	sk->sk_shutdown = 0;
J
Jon Maloy 已提交
456
	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
457
	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
458 459
	sk->sk_data_ready = tipc_data_ready;
	sk->sk_write_space = tipc_write_space;
460
	sk->sk_destruct = tipc_sock_destruct;
461 462
	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
	atomic_set(&tsk->dupl_rcvcnt, 0);
463

464 465 466 467
	/* Start out with safe limits until we receive an advertised window */
	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
	tsk->rcv_win = tsk->snd_win;

468
	if (tipc_sk_type_connectionless(sk)) {
469
		tsk_set_unreturnable(tsk, true);
470
		if (sock->type == SOCK_DGRAM)
471
			tsk_set_unreliable(tsk, true);
472
	}
473

P
Per Liden 已提交
474 475 476
	return 0;
}

477 478 479 480 481 482 483
static void tipc_sk_callback(struct rcu_head *head)
{
	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);

	sock_put(&tsk->sk);
}

484 485 486 487 488 489
/* Caller should hold socket lock for the socket. */
static void __tipc_shutdown(struct socket *sock, int error)
{
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
	struct net *net = sock_net(sk);
490
	long timeout = CONN_TIMEOUT_DEFAULT;
491 492 493
	u32 dnode = tsk_peer_node(tsk);
	struct sk_buff *skb;

494 495 496 497
	/* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
					    !tsk_conn_cong(tsk)));

498 499 500 501 502 503
	/* Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer).
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		if (TIPC_SKB_CB(skb)->bytes_read) {
			kfree_skb(skb);
504
			continue;
505
		}
506 507 508 509 510 511
		if (!tipc_sk_type_connectionless(sk) &&
		    sk->sk_state != TIPC_DISCONNECTING) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			tipc_node_remove_conn(net, dnode, tsk->portid);
		}
		tipc_sk_respond(sk, skb, error);
512
	}
513 514 515 516

	if (tipc_sk_type_connectionless(sk))
		return;

517 518 519 520 521 522 523
	if (sk->sk_state != TIPC_DISCONNECTING) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
				      tsk_own_node(tsk), tsk_peer_port(tsk),
				      tsk->portid, error);
		if (skb)
			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
524 525
		tipc_node_remove_conn(net, dnode, tsk->portid);
		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
526 527 528
	}
}

P
Per Liden 已提交
529
/**
530
 * tipc_release - destroy a TIPC socket
P
Per Liden 已提交
531 532 533 534 535 536 537
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
538
 *
P
Per Liden 已提交
539 540 541 542 543 544
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
545
static int tipc_release(struct socket *sock)
P
Per Liden 已提交
546 547
{
	struct sock *sk = sock->sk;
548
	struct tipc_sock *tsk;
P
Per Liden 已提交
549

550 551 552 553 554
	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
P
Per Liden 已提交
555
		return 0;
556

557
	tsk = tipc_sk(sk);
558 559
	lock_sock(sk);

560 561
	__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
	sk->sk_shutdown = SHUTDOWN_MASK;
562
	tipc_sk_withdraw(tsk, 0, NULL);
563
	sk_stop_timer(sk, &sk->sk_timer);
564
	tipc_sk_remove(tsk);
P
Per Liden 已提交
565

566 567
	/* Reject any messages that accumulated in backlog queue */
	release_sock(sk);
568 569
	u32_list_purge(&tsk->cong_links);
	tsk->cong_link_cnt = 0;
570
	call_rcu(&tsk->rcu, tipc_sk_callback);
571
	sock->sk = NULL;
P
Per Liden 已提交
572

573
	return 0;
P
Per Liden 已提交
574 575 576
}

/**
577
 * tipc_bind - associate or disassocate TIPC name(s) with a socket
P
Per Liden 已提交
578 579 580
 * @sock: socket structure
 * @uaddr: socket address describing name(s) and desired operation
 * @uaddr_len: size of socket address data structure
581
 *
P
Per Liden 已提交
582 583 584
 * Name and name sequence binding is indicated using a positive scope value;
 * a negative scope value unbinds the specified name.  Specifying no name
 * (i.e. a socket address length of 0) unbinds all names from the socket.
585
 *
P
Per Liden 已提交
586
 * Returns 0 on success, errno otherwise
587 588 589
 *
 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 *       access any non-constant socket information.
P
Per Liden 已提交
590
 */
591 592
static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
		     int uaddr_len)
P
Per Liden 已提交
593
{
594
	struct sock *sk = sock->sk;
P
Per Liden 已提交
595
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
596
	struct tipc_sock *tsk = tipc_sk(sk);
597
	int res = -EINVAL;
P
Per Liden 已提交
598

599 600
	lock_sock(sk);
	if (unlikely(!uaddr_len)) {
601
		res = tipc_sk_withdraw(tsk, 0, NULL);
602 603
		goto exit;
	}
604

605 606 607 608 609 610 611 612
	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
		res = -EINVAL;
		goto exit;
	}
	if (addr->family != AF_TIPC) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
P
Per Liden 已提交
613 614 615

	if (addr->addrtype == TIPC_ADDR_NAME)
		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
616 617 618 619
	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
620

621
	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
622
	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
623 624 625 626
	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
		res = -EACCES;
		goto exit;
	}
627

628
	res = (addr->scope > 0) ?
629 630
		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
631 632 633
exit:
	release_sock(sk);
	return res;
P
Per Liden 已提交
634 635
}

636
/**
637
 * tipc_getname - get port ID of socket or peer socket
P
Per Liden 已提交
638 639 640
 * @sock: socket structure
 * @uaddr: area for returned socket address
 * @uaddr_len: area for returned length of socket address
641
 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
642
 *
P
Per Liden 已提交
643
 * Returns 0 on success, errno otherwise
644
 *
645 646
 * NOTE: This routine doesn't need to take the socket lock since it only
 *       accesses socket information that is unchanging (or which changes in
647
 *       a completely predictable manner).
P
Per Liden 已提交
648
 */
649 650
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
			int *uaddr_len, int peer)
P
Per Liden 已提交
651 652
{
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
653 654
	struct sock *sk = sock->sk;
	struct tipc_sock *tsk = tipc_sk(sk);
655
	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
P
Per Liden 已提交
656

657
	memset(addr, 0, sizeof(*addr));
658
	if (peer) {
659
		if ((!tipc_sk_connected(sk)) &&
660
		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
661
			return -ENOTCONN;
662 663
		addr->addr.id.ref = tsk_peer_port(tsk);
		addr->addr.id.node = tsk_peer_node(tsk);
664
	} else {
665
		addr->addr.id.ref = tsk->portid;
666
		addr->addr.id.node = tn->own_addr;
667
	}
P
Per Liden 已提交
668 669 670 671 672 673 674

	*uaddr_len = sizeof(*addr);
	addr->addrtype = TIPC_ADDR_ID;
	addr->family = AF_TIPC;
	addr->scope = 0;
	addr->addr.name.domain = 0;

675
	return 0;
P
Per Liden 已提交
676 677 678
}

/**
679
 * tipc_poll - read and possibly block on pollmask
P
Per Liden 已提交
680 681 682 683
 * @file: file structure associated with the socket
 * @sock: socket for which to calculate the poll bits
 * @wait: ???
 *
684 685 686 687 688 689 690 691
 * Returns pollmask value
 *
 * COMMENTARY:
 * It appears that the usual socket locking mechanisms are not useful here
 * since the pollmask info is potentially out-of-date the moment this routine
 * exits.  TCP and other protocols seem to rely on higher level poll routines
 * to handle any preventable race conditions, so TIPC will do the same ...
 *
692 693 694
 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 * imply that the operation will succeed, merely that it should be performed
 * and will not block.
P
Per Liden 已提交
695
 */
696 697
static unsigned int tipc_poll(struct file *file, struct socket *sock,
			      poll_table *wait)
P
Per Liden 已提交
698
{
699
	struct sock *sk = sock->sk;
700
	struct tipc_sock *tsk = tipc_sk(sk);
701
	u32 mask = 0;
702

703
	sock_poll_wait(file, sk_sleep(sk), wait);
704

705 706 707 708 709
	if (sk->sk_shutdown & RCV_SHUTDOWN)
		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
	if (sk->sk_shutdown == SHUTDOWN_MASK)
		mask |= POLLHUP;

710 711
	switch (sk->sk_state) {
	case TIPC_ESTABLISHED:
712
		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
713
			mask |= POLLOUT;
714 715 716
		/* fall thru' */
	case TIPC_LISTEN:
	case TIPC_CONNECTING:
717 718
		if (!skb_queue_empty(&sk->sk_receive_queue))
			mask |= (POLLIN | POLLRDNORM);
719 720
		break;
	case TIPC_OPEN:
721
		if (!tsk->cong_link_cnt)
722 723 724 725 726 727 728 729
			mask |= POLLOUT;
		if (tipc_sk_type_connectionless(sk) &&
		    (!skb_queue_empty(&sk->sk_receive_queue)))
			mask |= (POLLIN | POLLRDNORM);
		break;
	case TIPC_DISCONNECTING:
		mask = (POLLIN | POLLRDNORM | POLLHUP);
		break;
730
	}
731 732

	return mask;
P
Per Liden 已提交
733 734
}

735 736 737 738
/**
 * tipc_sendmcast - send multicast message
 * @sock: socket structure
 * @seq: destination address
739
 * @msg: message to send
740 741
 * @dlen: length of data to send
 * @timeout: timeout to wait for wakeup
742 743 744 745 746
 *
 * Called from function tipc_sendmsg(), which has done all sanity checks
 * Returns the number of bytes sent on success, or errno
 */
static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
747
			  struct msghdr *msg, size_t dlen, long timeout)
748 749
{
	struct sock *sk = sock->sk;
750
	struct tipc_sock *tsk = tipc_sk(sk);
751
	struct tipc_msg *hdr = &tsk->phdr;
752
	struct net *net = sock_net(sk);
753
	int mtu = tipc_bcast_get_mtu(net);
754
	struct tipc_mc_method *method = &tsk->mc_method;
755
	u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
756
	struct sk_buff_head pkts;
757
	struct tipc_nlist dsts;
758 759
	int rc;

760
	/* Block or return if any destination link is congested */
761 762 763
	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
	if (unlikely(rc))
		return rc;
764

765 766 767 768 769 770 771 772
	/* Lookup destination nodes */
	tipc_nlist_init(&dsts, tipc_own_addr(net));
	tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
				      seq->upper, domain, &dsts);
	if (!dsts.local && !dsts.remote)
		return -EHOSTUNREACH;

	/* Build message header */
773
	msg_set_type(hdr, TIPC_MCAST_MSG);
774
	msg_set_hdr_sz(hdr, MCAST_H_SIZE);
775 776 777 778 779 780 781
	msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
	msg_set_destport(hdr, 0);
	msg_set_destnode(hdr, 0);
	msg_set_nametype(hdr, seq->type);
	msg_set_namelower(hdr, seq->lower);
	msg_set_nameupper(hdr, seq->upper);

782
	/* Build message as chain of buffers */
783 784
	skb_queue_head_init(&pkts);
	rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
785

786 787
	/* Send message if build was successful */
	if (unlikely(rc == dlen))
788
		rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
789 790 791
				     &tsk->cong_link_cnt);

	tipc_nlist_purge(&dsts);
792 793

	return rc ? rc : dlen;
794 795
}

796 797 798 799 800 801
/**
 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 * @arrvq: queue with arriving messages, to be cloned after destination lookup
 * @inputq: queue with cloned messages, delivered to socket after dest lookup
 *
 * Multi-threaded: parallel calls with reference to same queues may occur
802
 */
803 804
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
		       struct sk_buff_head *inputq)
805
{
806
	struct tipc_msg *msg;
807
	struct list_head dports;
808
	u32 portid;
809
	u32 scope = TIPC_CLUSTER_SCOPE;
810 811 812
	struct sk_buff_head tmpq;
	uint hsz;
	struct sk_buff *skb, *_skb;
813

814
	__skb_queue_head_init(&tmpq);
815
	INIT_LIST_HEAD(&dports);
816

817 818 819 820 821 822 823 824 825 826 827 828
	skb = tipc_skb_peek(arrvq, &inputq->lock);
	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
		msg = buf_msg(skb);
		hsz = skb_headroom(skb) + msg_hdr_sz(msg);

		if (in_own_node(net, msg_orignode(msg)))
			scope = TIPC_NODE_SCOPE;

		/* Create destination port list and message clones: */
		tipc_nametbl_mc_translate(net,
					  msg_nametype(msg), msg_namelower(msg),
					  msg_nameupper(msg), scope, &dports);
829 830
		portid = u32_pop(&dports);
		for (; portid; portid = u32_pop(&dports)) {
831 832 833 834 835 836 837
			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
			if (_skb) {
				msg_set_destport(buf_msg(_skb), portid);
				__skb_queue_tail(&tmpq, _skb);
				continue;
			}
			pr_warn("Failed to clone mcast rcv buffer\n");
838
		}
839 840 841 842 843 844 845 846 847
		/* Append to inputq if not already done by other thread */
		spin_lock_bh(&inputq->lock);
		if (skb_peek(arrvq) == skb) {
			skb_queue_splice_tail_init(&tmpq, inputq);
			kfree_skb(__skb_dequeue(arrvq));
		}
		spin_unlock_bh(&inputq->lock);
		__skb_queue_purge(&tmpq);
		kfree_skb(skb);
848
	}
849
	tipc_sk_rcv(net, inputq);
850 851
}

852
/**
J
Jon Maloy 已提交
853
 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
854
 * @tsk: receiving socket
855
 * @skb: pointer to message buffer.
856
 */
J
Jon Maloy 已提交
857 858
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
				   struct sk_buff_head *xmitq)
859
{
860
	struct tipc_msg *hdr = buf_msg(skb);
J
Jon Maloy 已提交
861 862
	u32 onode = tsk_own_node(tsk);
	struct sock *sk = &tsk->sk;
863
	int mtyp = msg_type(hdr);
864
	bool conn_cong;
865

866
	/* Ignore if connection cannot be validated: */
867
	if (!tsk_peer_msg(tsk, hdr))
868 869
		goto exit;

870 871 872 873 874 875 876 877
	if (unlikely(msg_errcode(hdr))) {
		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
				      tsk_peer_port(tsk));
		sk->sk_state_change(sk);
		goto exit;
	}

878
	tsk->probe_unacked = false;
879

880 881
	if (mtyp == CONN_PROBE) {
		msg_set_type(hdr, CONN_PROBE_REPLY);
J
Jon Paul Maloy 已提交
882 883
		if (tipc_msg_reverse(onode, &skb, TIPC_OK))
			__skb_queue_tail(xmitq, skb);
884 885
		return;
	} else if (mtyp == CONN_ACK) {
886
		conn_cong = tsk_conn_cong(tsk);
887 888 889
		tsk->snt_unacked -= msg_conn_ack(hdr);
		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
			tsk->snd_win = msg_adv_win(hdr);
890
		if (conn_cong)
891 892 893
			sk->sk_write_space(sk);
	} else if (mtyp != CONN_PROBE_REPLY) {
		pr_warn("Received unknown CONN_PROTO msg\n");
894 895
	}
exit:
896
	kfree_skb(skb);
897 898
}

899 900 901 902
static void tipc_sk_top_evt(struct tipc_sock *tsk, struct tipc_event *evt)
{
}

P
Per Liden 已提交
903
/**
904
 * tipc_sendmsg - send message in connectionless manner
P
Per Liden 已提交
905 906
 * @sock: socket structure
 * @m: message to send
907
 * @dsz: amount of user data to be sent
908
 *
P
Per Liden 已提交
909
 * Message must have an destination specified explicitly.
910
 * Used for SOCK_RDM and SOCK_DGRAM messages,
P
Per Liden 已提交
911 912
 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
913
 *
P
Per Liden 已提交
914 915
 * Returns the number of bytes sent on success, or errno otherwise
 */
916
static int tipc_sendmsg(struct socket *sock,
917
			struct msghdr *m, size_t dsz)
918 919 920 921 922 923 924 925 926 927 928
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_sendmsg(sock, m, dsz);
	release_sock(sk);

	return ret;
}

929
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
930
{
931
	struct sock *sk = sock->sk;
932
	struct net *net = sock_net(sk);
933 934 935 936 937 938
	struct tipc_sock *tsk = tipc_sk(sk);
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct list_head *clinks = &tsk->cong_links;
	bool syn = !tipc_sk_type_connectionless(sk);
	struct tipc_msg *hdr = &tsk->phdr;
939
	struct tipc_name_seq *seq;
940 941 942 943
	struct sk_buff_head pkts;
	u32 type, inst, domain;
	u32 dnode, dport;
	int mtu, rc;
P
Per Liden 已提交
944

945
	if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
946
		return -EMSGSIZE;
947

948
	if (unlikely(!dest)) {
949 950
		dest = &tsk->peer;
		if (!syn || dest->family != AF_TIPC)
951 952
			return -EDESTADDRREQ;
	}
953 954 955 956 957 958 959 960

	if (unlikely(m->msg_namelen < sizeof(*dest)))
		return -EINVAL;

	if (unlikely(dest->family != AF_TIPC))
		return -EINVAL;

	if (unlikely(syn)) {
961
		if (sk->sk_state == TIPC_LISTEN)
962
			return -EPIPE;
963
		if (sk->sk_state != TIPC_OPEN)
964 965 966
			return -EISCONN;
		if (tsk->published)
			return -EOPNOTSUPP;
967
		if (dest->addrtype == TIPC_ADDR_NAME) {
968 969
			tsk->conn_type = dest->addr.name.name.type;
			tsk->conn_instance = dest->addr.name.name.instance;
970
		}
P
Per Liden 已提交
971
	}
972

973 974 975
	seq = &dest->addr.nameseq;
	if (dest->addrtype == TIPC_ADDR_MCAST)
		return tipc_sendmcast(sock, seq, m, dlen, timeout);
976

977 978 979 980
	if (dest->addrtype == TIPC_ADDR_NAME) {
		type = dest->addr.name.name.type;
		inst = dest->addr.name.name.instance;
		domain = dest->addr.name.domain;
981
		dnode = domain;
982 983 984 985 986
		msg_set_type(hdr, TIPC_NAMED_MSG);
		msg_set_hdr_sz(hdr, NAMED_H_SIZE);
		msg_set_nametype(hdr, type);
		msg_set_nameinst(hdr, inst);
		msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
987
		dport = tipc_nametbl_translate(net, type, inst, &dnode);
988 989
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dport);
990 991
		if (unlikely(!dport && !dnode))
			return -EHOSTUNREACH;
992

993 994
	} else if (dest->addrtype == TIPC_ADDR_ID) {
		dnode = dest->addr.id.node;
995 996 997 998 999
		msg_set_type(hdr, TIPC_DIRECT_MSG);
		msg_set_lookup_scope(hdr, 0);
		msg_set_destnode(hdr, dnode);
		msg_set_destport(hdr, dest->addr.id.ref);
		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1000 1001
	}

1002 1003 1004 1005 1006 1007
	/* Block or return if destination link is congested */
	rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
	if (unlikely(rc))
		return rc;

	skb_queue_head_init(&pkts);
1008
	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1009 1010
	rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
	if (unlikely(rc != dlen))
1011
		return rc;
1012

1013 1014 1015 1016 1017 1018
	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
	if (unlikely(rc == -ELINKCONG)) {
		u32_push(clinks, dnode);
		tsk->cong_link_cnt++;
		rc = 0;
	}
1019

1020 1021 1022 1023
	if (unlikely(syn && !rc))
		tipc_set_sk_state(sk, TIPC_CONNECTING);

	return rc ? rc : dlen;
P
Per Liden 已提交
1024 1025
}

1026
/**
1027
 * tipc_sendstream - send stream-oriented data
P
Per Liden 已提交
1028
 * @sock: socket structure
1029 1030
 * @m: data to send
 * @dsz: total length of data to be transmitted
1031
 *
1032
 * Used for SOCK_STREAM data.
1033
 *
1034 1035
 * Returns the number of bytes sent on success (or partial success),
 * or errno if no data sent
P
Per Liden 已提交
1036
 */
1037
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1038 1039 1040 1041 1042
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
1043
	ret = __tipc_sendstream(sock, m, dsz);
1044 1045 1046 1047 1048
	release_sock(sk);

	return ret;
}

1049
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
P
Per Liden 已提交
1050
{
1051
	struct sock *sk = sock->sk;
1052
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1053 1054 1055 1056 1057 1058 1059 1060
	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = &tsk->phdr;
	struct net *net = sock_net(sk);
	struct sk_buff_head pkts;
	u32 dnode = tsk_peer_node(tsk);
	int send, sent = 0;
	int rc = 0;
1061

1062
	skb_queue_head_init(&pkts);
1063

1064 1065
	if (unlikely(dlen > INT_MAX))
		return -EMSGSIZE;
1066

1067 1068 1069 1070 1071
	/* Handle implicit connection setup */
	if (unlikely(dest)) {
		rc = __tipc_sendmsg(sock, m, dlen);
		if (dlen && (dlen == rc))
			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1072
		return rc;
1073
	}
1074

1075
	do {
1076 1077
		rc = tipc_wait_for_cond(sock, &timeout,
					(!tsk->cong_link_cnt &&
1078 1079
					 !tsk_conn_cong(tsk) &&
					 tipc_sk_connected(sk)));
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
		if (unlikely(rc))
			break;

		send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
		if (unlikely(rc != send))
			break;

		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
		if (unlikely(rc == -ELINKCONG)) {
			tsk->cong_link_cnt = 1;
			rc = 0;
		}
		if (likely(!rc)) {
			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
			sent += send;
		}
	} while (sent < dlen && !rc);
1098

1099
	return sent ? sent : rc;
P
Per Liden 已提交
1100 1101
}

1102
/**
1103
 * tipc_send_packet - send a connection-oriented message
P
Per Liden 已提交
1104
 * @sock: socket structure
1105 1106
 * @m: message to send
 * @dsz: length of data to be transmitted
1107
 *
1108
 * Used for SOCK_SEQPACKET messages.
1109
 *
1110
 * Returns the number of bytes sent on success, or errno otherwise
P
Per Liden 已提交
1111
 */
1112
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1113
{
1114 1115
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
		return -EMSGSIZE;
P
Per Liden 已提交
1116

1117
	return tipc_sendstream(sock, m, dsz);
P
Per Liden 已提交
1118 1119
}

1120
/* tipc_sk_finish_conn - complete the setup of a connection
P
Per Liden 已提交
1121
 */
1122
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1123
				u32 peer_node)
P
Per Liden 已提交
1124
{
1125 1126
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1127
	struct tipc_msg *msg = &tsk->phdr;
P
Per Liden 已提交
1128

1129 1130 1131 1132 1133
	msg_set_destnode(msg, peer_node);
	msg_set_destport(msg, peer_port);
	msg_set_type(msg, TIPC_CONN_MSG);
	msg_set_lookup_scope(msg, 0);
	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1134

1135
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
1136
	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1137 1138
	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1139
	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1140 1141 1142 1143 1144 1145
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
		return;

	/* Fall back to message based flow control */
	tsk->rcv_win = FLOWCTL_MSG_WIN;
	tsk->snd_win = FLOWCTL_MSG_WIN;
P
Per Liden 已提交
1146 1147 1148 1149 1150 1151
}

/**
 * set_orig_addr - capture sender's address for received message
 * @m: descriptor for message info
 * @msg: received message header
1152
 *
P
Per Liden 已提交
1153 1154
 * Note: Address is not captured if not requested by receiver.
 */
S
Sam Ravnborg 已提交
1155
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
P
Per Liden 已提交
1156
{
1157
	DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
P
Per Liden 已提交
1158

1159
	if (addr) {
P
Per Liden 已提交
1160 1161
		addr->family = AF_TIPC;
		addr->addrtype = TIPC_ADDR_ID;
1162
		memset(&addr->addr, 0, sizeof(addr->addr));
P
Per Liden 已提交
1163 1164
		addr->addr.id.ref = msg_origport(msg);
		addr->addr.id.node = msg_orignode(msg);
1165 1166
		addr->addr.name.domain = 0;	/* could leave uninitialized */
		addr->scope = 0;		/* could leave uninitialized */
P
Per Liden 已提交
1167 1168 1169 1170 1171
		m->msg_namelen = sizeof(struct sockaddr_tipc);
	}
}

/**
1172
 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
P
Per Liden 已提交
1173 1174
 * @m: descriptor for message info
 * @msg: received message header
1175
 * @tsk: TIPC port associated with message
1176
 *
P
Per Liden 已提交
1177
 * Note: Ancillary data is not captured if not requested by receiver.
1178
 *
P
Per Liden 已提交
1179 1180
 * Returns 0 if successful, otherwise errno
 */
1181 1182
static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				 struct tipc_sock *tsk)
P
Per Liden 已提交
1183 1184 1185 1186
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
1187
	int has_name;
P
Per Liden 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */
	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
1198 1199
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
P
Per Liden 已提交
1200
			return res;
1201 1202 1203 1204 1205 1206
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
P
Per Liden 已提交
1207 1208 1209 1210 1211 1212
	}

	/* Optionally capture message destination object */
	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
1213
		has_name = 1;
P
Per Liden 已提交
1214 1215 1216 1217 1218
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
1219
		has_name = 1;
P
Per Liden 已提交
1220 1221 1222 1223 1224
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
1225 1226 1227 1228
		has_name = (tsk->conn_type != 0);
		anc_data[0] = tsk->conn_type;
		anc_data[1] = tsk->conn_instance;
		anc_data[2] = tsk->conn_instance;
P
Per Liden 已提交
1229 1230
		break;
	default:
1231
		has_name = 0;
P
Per Liden 已提交
1232
	}
1233 1234 1235 1236 1237
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}
P
Per Liden 已提交
1238 1239 1240 1241

	return 0;
}

1242
static void tipc_sk_send_ack(struct tipc_sock *tsk)
1243
{
1244 1245
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1246
	struct sk_buff *skb = NULL;
1247
	struct tipc_msg *msg;
1248 1249
	u32 peer_port = tsk_peer_port(tsk);
	u32 dnode = tsk_peer_node(tsk);
1250

1251
	if (!tipc_sk_connected(sk))
1252
		return;
1253 1254 1255
	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
			      dnode, tsk_own_node(tsk), peer_port,
			      tsk->portid, TIPC_OK);
1256
	if (!skb)
1257
		return;
1258
	msg = buf_msg(skb);
1259 1260 1261 1262 1263 1264 1265 1266
	msg_set_conn_ack(msg, tsk->rcv_unacked);
	tsk->rcv_unacked = 0;

	/* Adjust to and advertize the correct window limit */
	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
		msg_set_adv_win(msg, tsk->rcv_win);
	}
1267
	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1268 1269
}

1270
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Y
Ying Xue 已提交
1271 1272 1273
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
1274
	long timeo = *timeop;
1275 1276 1277 1278
	int err = sock_error(sk);

	if (err)
		return err;
Y
Ying Xue 已提交
1279 1280 1281

	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1282
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1283
			if (sk->sk_shutdown & RCV_SHUTDOWN) {
Y
Ying Xue 已提交
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
				err = -ENOTCONN;
				break;
			}
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1297 1298 1299
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
1300 1301 1302 1303

		err = sock_error(sk);
		if (err)
			break;
Y
Ying Xue 已提交
1304 1305
	}
	finish_wait(sk_sleep(sk), &wait);
1306
	*timeop = timeo;
Y
Ying Xue 已提交
1307 1308 1309
	return err;
}

1310
/**
1311
 * tipc_recvmsg - receive packet-oriented message
P
Per Liden 已提交
1312
 * @m: descriptor for message info
1313
 * @buflen: length of user buffer area
P
Per Liden 已提交
1314
 * @flags: receive flags
1315
 *
P
Per Liden 已提交
1316 1317 1318 1319 1320
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
1321 1322
static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
			size_t buflen,	int flags)
P
Per Liden 已提交
1323
{
1324
	struct sock *sk = sock->sk;
1325
	struct tipc_sock *tsk = tipc_sk(sk);
1326 1327 1328 1329 1330
	struct sk_buff *skb;
	struct tipc_msg *hdr;
	bool connected = !tipc_sk_type_connectionless(sk);
	int rc, err, hlen, dlen, copy;
	long timeout;
P
Per Liden 已提交
1331

1332
	/* Catch invalid receive requests */
1333
	if (unlikely(!buflen))
P
Per Liden 已提交
1334 1335
		return -EINVAL;

1336
	lock_sock(sk);
1337 1338
	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
		rc = -ENOTCONN;
P
Per Liden 已提交
1339 1340
		goto exit;
	}
1341
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1342

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
	do {
		/* Look at first msg in receive queue; wait if necessary */
		rc = tipc_wait_for_rcvmsg(sock, &timeout);
		if (unlikely(rc))
			goto exit;
		skb = skb_peek(&sk->sk_receive_queue);
		hdr = buf_msg(skb);
		dlen = msg_data_sz(hdr);
		hlen = msg_hdr_sz(hdr);
		err = msg_errcode(hdr);
		if (likely(dlen || err))
			break;
1355
		tsk_advance_rx_queue(sk);
1356
	} while (1);
P
Per Liden 已提交
1357

1358 1359 1360 1361
	/* Collect msg meta data, including error code and rejected data */
	set_orig_addr(m, hdr);
	rc = tipc_sk_anc_data_recv(m, hdr, tsk);
	if (unlikely(rc))
P
Per Liden 已提交
1362 1363
		goto exit;

1364 1365 1366 1367
	/* Capture data if non-error msg, otherwise just set return value */
	if (likely(!err)) {
		copy = min_t(int, dlen, buflen);
		if (unlikely(copy != dlen))
P
Per Liden 已提交
1368
			m->msg_flags |= MSG_TRUNC;
1369
		rc = skb_copy_datagram_msg(skb, hlen, m, copy);
P
Per Liden 已提交
1370
	} else {
1371 1372 1373 1374
		copy = 0;
		rc = 0;
		if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
			rc = -ECONNRESET;
P
Per Liden 已提交
1375
	}
1376 1377
	if (unlikely(rc))
		goto exit;
P
Per Liden 已提交
1378

1379
	/* Caption of data or error code/rejected data was successful */
1380 1381 1382 1383
	if (unlikely(flags & MSG_PEEK))
		goto exit;

	tsk_advance_rx_queue(sk);
1384 1385 1386 1387 1388 1389 1390
	if (likely(!connected))
		goto exit;

	/* Send connection flow control ack when applicable */
	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
		tipc_sk_send_ack(tsk);
P
Per Liden 已提交
1391
exit:
1392
	release_sock(sk);
1393
	return rc ? rc : copy;
P
Per Liden 已提交
1394 1395
}

1396
/**
1397
 * tipc_recvstream - receive stream-oriented data
P
Per Liden 已提交
1398
 * @m: descriptor for message info
1399
 * @buflen: total size of user buffer area
P
Per Liden 已提交
1400
 * @flags: receive flags
1401 1402
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
P
Per Liden 已提交
1403 1404 1405 1406
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
1407 1408
static int tipc_recvstream(struct socket *sock, struct msghdr *m,
			   size_t buflen, int flags)
P
Per Liden 已提交
1409
{
1410
	struct sock *sk = sock->sk;
1411
	struct tipc_sock *tsk = tipc_sk(sk);
1412 1413 1414 1415 1416 1417 1418
	struct sk_buff *skb;
	struct tipc_msg *hdr;
	struct tipc_skb_cb *skb_cb;
	bool peek = flags & MSG_PEEK;
	int offset, required, copy, copied = 0;
	int hlen, dlen, err, rc;
	long timeout;
P
Per Liden 已提交
1419

1420
	/* Catch invalid receive attempts */
1421
	if (unlikely(!buflen))
P
Per Liden 已提交
1422 1423
		return -EINVAL;

1424
	lock_sock(sk);
P
Per Liden 已提交
1425

1426
	if (unlikely(sk->sk_state == TIPC_OPEN)) {
1427
		rc = -ENOTCONN;
Y
Ying Xue 已提交
1428
		goto exit;
P
Per Liden 已提交
1429
	}
1430 1431
	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1432

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
	do {
		/* Look at first msg in receive queue; wait if necessary */
		rc = tipc_wait_for_rcvmsg(sock, &timeout);
		if (unlikely(rc))
			break;
		skb = skb_peek(&sk->sk_receive_queue);
		skb_cb = TIPC_SKB_CB(skb);
		hdr = buf_msg(skb);
		dlen = msg_data_sz(hdr);
		hlen = msg_hdr_sz(hdr);
		err = msg_errcode(hdr);
1444

1445 1446 1447 1448 1449
		/* Discard any empty non-errored (SYN-) message */
		if (unlikely(!dlen && !err)) {
			tsk_advance_rx_queue(sk);
			continue;
		}
1450

1451 1452 1453 1454 1455 1456 1457
		/* Collect msg meta data, incl. error code and rejected data */
		if (!copied) {
			set_orig_addr(m, hdr);
			rc = tipc_sk_anc_data_recv(m, hdr, tsk);
			if (rc)
				break;
		}
P
Per Liden 已提交
1458

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		/* Copy data if msg ok, otherwise return error/partial data */
		if (likely(!err)) {
			offset = skb_cb->bytes_read;
			copy = min_t(int, dlen - offset, buflen - copied);
			rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
			if (unlikely(rc))
				break;
			copied += copy;
			offset += copy;
			if (unlikely(offset < dlen)) {
				if (!peek)
					skb_cb->bytes_read = offset;
				break;
			}
		} else {
			rc = 0;
			if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
				rc = -ECONNRESET;
			if (copied || rc)
				break;
P
Per Liden 已提交
1479 1480
		}

1481 1482
		if (unlikely(peek))
			break;
P
Per Liden 已提交
1483

1484
		tsk_advance_rx_queue(sk);
1485

1486 1487 1488 1489
		/* Send connection flow control advertisement when applicable */
		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
		if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
			tipc_sk_send_ack(tsk);
P
Per Liden 已提交
1490

1491 1492 1493
		/* Exit if all requested data or FIN/error received */
		if (copied == buflen || err)
			break;
P
Per Liden 已提交
1494

1495
	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
P
Per Liden 已提交
1496
exit:
1497
	release_sock(sk);
1498
	return copied ? copied : rc;
P
Per Liden 已提交
1499 1500
}

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
/**
 * tipc_write_space - wake up thread if port congestion is released
 * @sk: socket
 */
static void tipc_write_space(struct sock *sk)
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1511
	if (skwq_has_sleeper(wq))
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
						POLLWRNORM | POLLWRBAND);
	rcu_read_unlock();
}

/**
 * tipc_data_ready - wake up threads to indicate messages have been received
 * @sk: socket
 * @len: the length of messages
 */
1522
static void tipc_data_ready(struct sock *sk)
1523 1524 1525 1526 1527
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
1528
	if (skwq_has_sleeper(wq))
1529 1530 1531 1532 1533
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
						POLLRDNORM | POLLRDBAND);
	rcu_read_unlock();
}

1534 1535 1536 1537 1538
static void tipc_sock_destruct(struct sock *sk)
{
	__skb_queue_purge(&sk->sk_receive_queue);
}

J
Jon Maloy 已提交
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
static void tipc_sk_proto_rcv(struct sock *sk,
			      struct sk_buff_head *inputq,
			      struct sk_buff_head *xmitq)
{
	struct sk_buff *skb = __skb_dequeue(inputq);
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = buf_msg(skb);

	switch (msg_user(hdr)) {
	case CONN_MANAGER:
		tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
		return;
	case SOCK_WAKEUP:
		u32_del(&tsk->cong_links, msg_orignode(hdr));
		tsk->cong_link_cnt--;
		sk->sk_write_space(sk);
		break;
	case TOP_SRV:
		tipc_sk_top_evt(tsk, (void *)msg_data(hdr));
		break;
	default:
		break;
	}

	kfree_skb(skb);
}

1566
/**
J
Jon Maloy 已提交
1567
 * tipc_filter_connect - Handle incoming message for a connection-based socket
1568
 * @tsk: TIPC socket
1569
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1570
 *
1571
 * Returns true if everything ok, false otherwise
1572
 */
J
Jon Maloy 已提交
1573
static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1574
{
1575
	struct sock *sk = &tsk->sk;
1576
	struct net *net = sock_net(sk);
1577
	struct tipc_msg *hdr = buf_msg(skb);
1578 1579
	u32 pport = msg_origport(hdr);
	u32 pnode = msg_orignode(hdr);
1580

1581 1582
	if (unlikely(msg_mcast(hdr)))
		return false;
1583

1584 1585
	switch (sk->sk_state) {
	case TIPC_CONNECTING:
1586
		/* Accept only ACK or NACK message */
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		if (unlikely(!msg_connected(hdr))) {
			if (pport != tsk_peer_port(tsk) ||
			    pnode != tsk_peer_node(tsk))
				return false;

			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			sk->sk_err = ECONNREFUSED;
			sk->sk_state_change(sk);
			return true;
		}
1597

1598
		if (unlikely(msg_errcode(hdr))) {
1599
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1600
			sk->sk_err = ECONNREFUSED;
1601
			sk->sk_state_change(sk);
1602
			return true;
1603 1604
		}

1605
		if (unlikely(!msg_isdata(hdr))) {
1606
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1607
			sk->sk_err = EINVAL;
1608
			sk->sk_state_change(sk);
1609
			return true;
1610 1611
		}

1612 1613
		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
		msg_set_importance(&tsk->phdr, msg_importance(hdr));
1614

1615 1616 1617 1618 1619
		/* If 'ACK+' message, add to socket receive queue */
		if (msg_data_sz(hdr))
			return true;

		/* If empty 'ACK-' message, wake up sleeping connect() */
1620
		sk->sk_data_ready(sk);
1621 1622 1623 1624 1625

		/* 'ACK-' message is neither accepted nor rejected: */
		msg_set_dest_droppable(hdr, 1);
		return false;

1626
	case TIPC_OPEN:
1627
	case TIPC_DISCONNECTING:
1628 1629
		break;
	case TIPC_LISTEN:
1630
		/* Accept only SYN message */
1631 1632
		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
			return true;
1633
		break;
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
	case TIPC_ESTABLISHED:
		/* Accept only connection-based messages sent by peer */
		if (unlikely(!tsk_peer_msg(tsk, hdr)))
			return false;

		if (unlikely(msg_errcode(hdr))) {
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
			/* Let timer expire on it's own */
			tipc_node_remove_conn(net, tsk_peer_node(tsk),
					      tsk->portid);
			sk->sk_state_change(sk);
		}
		return true;
1647
	default:
1648
		pr_err("Unknown sk_state %u\n", sk->sk_state);
1649
	}
1650

1651
	return false;
1652 1653
}

1654 1655 1656
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
1657
 * @skb: message
1658
 *
1659 1660
 * For connection oriented messages, irrespective of importance,
 * default queue limit is 2 MB.
1661
 *
1662 1663
 * For connectionless messages, queue limits are based on message
 * importance as follows:
1664
 *
1665 1666 1667 1668
 * TIPC_LOW_IMPORTANCE       (2 MB)
 * TIPC_MEDIUM_IMPORTANCE    (4 MB)
 * TIPC_HIGH_IMPORTANCE      (8 MB)
 * TIPC_CRITICAL_IMPORTANCE  (16 MB)
1669 1670 1671
 *
 * Returns overload limit according to corresponding message importance
 */
1672
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
1673
{
1674 1675 1676 1677 1678
	struct tipc_sock *tsk = tipc_sk(sk);
	struct tipc_msg *hdr = buf_msg(skb);

	if (unlikely(!msg_connected(hdr)))
		return sk->sk_rcvbuf << msg_importance(hdr);
1679

1680 1681
	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
		return sk->sk_rcvbuf;
1682

1683
	return FLOWCTL_MSG_LIM;
1684 1685
}

1686
/**
J
Jon Maloy 已提交
1687
 * tipc_sk_filter_rcv - validate incoming message
1688
 * @sk: socket
1689
 * @skb: pointer to message.
1690
 *
1691 1692 1693
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
1694
 * Called with socket lock already taken
1695
 *
P
Per Liden 已提交
1696
 */
J
Jon Maloy 已提交
1697 1698
static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
			       struct sk_buff_head *xmitq)
P
Per Liden 已提交
1699
{
J
Jon Maloy 已提交
1700
	bool sk_conn = !tipc_sk_type_connectionless(sk);
1701
	struct tipc_sock *tsk = tipc_sk(sk);
1702
	struct tipc_msg *hdr = buf_msg(skb);
J
Jon Maloy 已提交
1703 1704 1705
	struct net *net = sock_net(sk);
	struct sk_buff_head inputq;
	int limit, err = TIPC_OK;
1706

J
Jon Maloy 已提交
1707 1708 1709
	TIPC_SKB_CB(skb)->bytes_read = 0;
	__skb_queue_head_init(&inputq);
	__skb_queue_tail(&inputq, skb);
1710

J
Jon Maloy 已提交
1711 1712 1713 1714
	if (unlikely(!msg_isdata(hdr)))
		tipc_sk_proto_rcv(sk, &inputq, xmitq);
	else if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG))
		return kfree_skb(skb);
1715

J
Jon Maloy 已提交
1716 1717 1718 1719 1720 1721
	/* Validate and add to receive buffer if there is space */
	while ((skb = __skb_dequeue(&inputq))) {
		hdr = buf_msg(skb);
		limit = rcvbuf_limit(sk, skb);
		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
		    (!sk_conn && msg_connected(hdr)))
1722
			err = TIPC_ERR_NO_PORT;
J
Jon Maloy 已提交
1723 1724
		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit)
			err = TIPC_ERR_OVERLOAD;
P
Per Liden 已提交
1725

J
Jon Maloy 已提交
1726 1727 1728 1729 1730 1731 1732 1733
		if (unlikely(err)) {
			tipc_skb_reject(net, err, skb, xmitq);
			err = TIPC_OK;
			continue;
		}
		__skb_queue_tail(&sk->sk_receive_queue, skb);
		skb_set_owner_r(skb, sk);
		sk->sk_data_ready(sk);
1734
	}
1735
}
P
Per Liden 已提交
1736

1737
/**
J
Jon Maloy 已提交
1738
 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
1739
 * @sk: socket
1740
 * @skb: message
1741
 *
1742
 * Caller must hold socket lock
1743
 */
J
Jon Maloy 已提交
1744
static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1745
{
J
Jon Maloy 已提交
1746
	unsigned int before = sk_rmem_alloc_get(sk);
J
Jon Paul Maloy 已提交
1747
	struct sk_buff_head xmitq;
J
Jon Maloy 已提交
1748
	unsigned int added;
1749

J
Jon Paul Maloy 已提交
1750 1751
	__skb_queue_head_init(&xmitq);

J
Jon Maloy 已提交
1752 1753 1754
	tipc_sk_filter_rcv(sk, skb, &xmitq);
	added = sk_rmem_alloc_get(sk) - before;
	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
J
Jon Paul Maloy 已提交
1755

J
Jon Maloy 已提交
1756
	/* Send pending response/rejected messages, if any */
1757
	tipc_node_distr_xmit(sock_net(sk), &xmitq);
1758 1759 1760
	return 0;
}

1761
/**
1762 1763 1764 1765 1766
 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
 *                   inputq and try adding them to socket or backlog queue
 * @inputq: list of incoming buffers with potentially different destinations
 * @sk: socket where the buffers should be enqueued
 * @dport: port number for the socket
1767 1768 1769
 *
 * Caller must hold socket lock
 */
1770
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
J
Jon Paul Maloy 已提交
1771
			    u32 dport, struct sk_buff_head *xmitq)
1772
{
J
Jon Paul Maloy 已提交
1773 1774
	unsigned long time_limit = jiffies + 2;
	struct sk_buff *skb;
1775 1776
	unsigned int lim;
	atomic_t *dcnt;
J
Jon Paul Maloy 已提交
1777
	u32 onode;
1778 1779

	while (skb_queue_len(inputq)) {
1780
		if (unlikely(time_after_eq(jiffies, time_limit)))
1781 1782
			return;

1783 1784
		skb = tipc_skb_dequeue(inputq, dport);
		if (unlikely(!skb))
1785 1786 1787
			return;

		/* Add message directly to receive queue if possible */
1788
		if (!sock_owned_by_user(sk)) {
J
Jon Maloy 已提交
1789
			tipc_sk_filter_rcv(sk, skb, xmitq);
1790
			continue;
1791
		}
1792 1793

		/* Try backlog, compensating for double-counted bytes */
1794
		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1795
		if (!sk->sk_backlog.len)
1796 1797 1798 1799
			atomic_set(dcnt, 0);
		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
		if (likely(!sk_add_backlog(sk, skb, lim)))
			continue;
1800 1801

		/* Overload => reject message back to sender */
J
Jon Paul Maloy 已提交
1802 1803 1804
		onode = tipc_own_addr(sock_net(sk));
		if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
			__skb_queue_tail(xmitq, skb);
1805
		break;
1806
	}
1807 1808
}

1809
/**
1810 1811 1812 1813
 * tipc_sk_rcv - handle a chain of incoming buffers
 * @inputq: buffer list containing the buffers
 * Consumes all buffers in list until inputq is empty
 * Note: may be called in multiple threads referring to the same queue
1814
 */
1815
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1816
{
J
Jon Paul Maloy 已提交
1817
	struct sk_buff_head xmitq;
1818
	u32 dnode, dport = 0;
E
Erik Hugne 已提交
1819
	int err;
1820 1821
	struct tipc_sock *tsk;
	struct sock *sk;
1822
	struct sk_buff *skb;
1823

J
Jon Paul Maloy 已提交
1824
	__skb_queue_head_init(&xmitq);
1825 1826 1827
	while (skb_queue_len(inputq)) {
		dport = tipc_skb_peek_port(inputq, dport);
		tsk = tipc_sk_lookup(net, dport);
1828

1829 1830 1831
		if (likely(tsk)) {
			sk = &tsk->sk;
			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
J
Jon Paul Maloy 已提交
1832
				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1833 1834
				spin_unlock_bh(&sk->sk_lock.slock);
			}
J
Jon Paul Maloy 已提交
1835
			/* Send pending response/rejected messages, if any */
1836
			tipc_node_distr_xmit(sock_net(sk), &xmitq);
1837 1838 1839
			sock_put(sk);
			continue;
		}
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852

		/* No destination socket => dequeue skb if still there */
		skb = tipc_skb_dequeue(inputq, dport);
		if (!skb)
			return;

		/* Try secondary lookup if unresolved named message */
		err = TIPC_ERR_NO_PORT;
		if (tipc_msg_lookup_dest(net, skb, &err))
			goto xmit;

		/* Prepare for message rejection */
		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1853
			continue;
1854
xmit:
1855
		dnode = msg_destnode(buf_msg(skb));
1856
		tipc_node_xmit_skb(net, skb, dnode, dport);
1857
	}
P
Per Liden 已提交
1858 1859
}

Y
Ying Xue 已提交
1860 1861
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
W
WANG Cong 已提交
1862
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
Y
Ying Xue 已提交
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874
	struct sock *sk = sock->sk;
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (!*timeo_p)
			return -ETIMEDOUT;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

W
WANG Cong 已提交
1875
		add_wait_queue(sk_sleep(sk), &wait);
1876
		done = sk_wait_event(sk, timeo_p,
W
WANG Cong 已提交
1877 1878
				     sk->sk_state != TIPC_CONNECTING, &wait);
		remove_wait_queue(sk_sleep(sk), &wait);
Y
Ying Xue 已提交
1879 1880 1881 1882
	} while (!done);
	return 0;
}

P
Per Liden 已提交
1883
/**
1884
 * tipc_connect - establish a connection to another TIPC port
P
Per Liden 已提交
1885 1886 1887
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
1888
 * @flags: file-related flags associated with socket
P
Per Liden 已提交
1889 1890 1891
 *
 * Returns 0 on success, errno otherwise
 */
1892 1893
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
			int destlen, int flags)
P
Per Liden 已提交
1894
{
1895
	struct sock *sk = sock->sk;
1896
	struct tipc_sock *tsk = tipc_sk(sk);
1897 1898
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
1899
	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
1900
	int previous;
1901
	int res = 0;
1902

1903 1904 1905
	if (destlen != sizeof(struct sockaddr_tipc))
		return -EINVAL;

1906 1907
	lock_sock(sk);

1908 1909 1910
	if (dst->family == AF_UNSPEC) {
		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
		if (!tipc_sk_type_connectionless(sk))
1911
			res = -EINVAL;
1912
		goto exit;
1913 1914
	} else if (dst->family != AF_TIPC) {
		res = -EINVAL;
1915
	}
1916
	if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
1917
		res = -EINVAL;
1918 1919 1920 1921 1922 1923
	if (res)
		goto exit;

	/* DGRAM/RDM connect(), just save the destaddr */
	if (tipc_sk_type_connectionless(sk)) {
		memcpy(&tsk->peer, dest, destlen);
1924 1925 1926
		goto exit;
	}

1927
	previous = sk->sk_state;
1928 1929 1930

	switch (sk->sk_state) {
	case TIPC_OPEN:
1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
		/* Send a 'SYN-' to destination */
		m.msg_name = dest;
		m.msg_namelen = destlen;

		/* If connect is in non-blocking case, set MSG_DONTWAIT to
		 * indicate send_msg() is never blocked.
		 */
		if (!timeout)
			m.msg_flags = MSG_DONTWAIT;

1941
		res = __tipc_sendmsg(sock, &m, 0);
1942 1943 1944
		if ((res < 0) && (res != -EWOULDBLOCK))
			goto exit;

1945
		/* Just entered TIPC_CONNECTING state; the only
1946 1947 1948 1949
		 * difference is that return value in non-blocking
		 * case is EINPROGRESS, rather than EALREADY.
		 */
		res = -EINPROGRESS;
1950 1951 1952 1953 1954
		/* fall thru' */
	case TIPC_CONNECTING:
		if (!timeout) {
			if (previous == TIPC_CONNECTING)
				res = -EALREADY;
Y
Ying Xue 已提交
1955
			goto exit;
1956
		}
Y
Ying Xue 已提交
1957 1958 1959
		timeout = msecs_to_jiffies(timeout);
		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
		res = tipc_wait_for_connect(sock, &timeout);
1960 1961
		break;
	case TIPC_ESTABLISHED:
1962
		res = -EISCONN;
1963 1964
		break;
	default:
1965
		res = -EINVAL;
1966
	}
1967

1968 1969
exit:
	release_sock(sk);
1970
	return res;
P
Per Liden 已提交
1971 1972
}

1973
/**
1974
 * tipc_listen - allow socket to listen for incoming connections
P
Per Liden 已提交
1975 1976
 * @sock: socket structure
 * @len: (unused)
1977
 *
P
Per Liden 已提交
1978 1979
 * Returns 0 on success, errno otherwise
 */
1980
static int tipc_listen(struct socket *sock, int len)
P
Per Liden 已提交
1981
{
1982 1983 1984 1985
	struct sock *sk = sock->sk;
	int res;

	lock_sock(sk);
1986
	res = tipc_set_sk_state(sk, TIPC_LISTEN);
1987
	release_sock(sk);
1988

1989
	return res;
P
Per Liden 已提交
1990 1991
}

Y
Ying Xue 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int err;

	/* True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	*/
	for (;;) {
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
					  TASK_INTERRUPTIBLE);
2006
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
2017 2018 2019
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
2020 2021 2022 2023 2024
	}
	finish_wait(sk_sleep(sk), &wait);
	return err;
}

2025
/**
2026
 * tipc_accept - wait for connection request
P
Per Liden 已提交
2027 2028 2029
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
2030
 *
P
Per Liden 已提交
2031 2032
 * Returns 0 on success, errno otherwise
 */
2033 2034
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
		       bool kern)
P
Per Liden 已提交
2035
{
2036
	struct sock *new_sk, *sk = sock->sk;
P
Per Liden 已提交
2037
	struct sk_buff *buf;
2038
	struct tipc_sock *new_tsock;
2039
	struct tipc_msg *msg;
Y
Ying Xue 已提交
2040
	long timeo;
2041
	int res;
P
Per Liden 已提交
2042

2043
	lock_sock(sk);
P
Per Liden 已提交
2044

2045
	if (sk->sk_state != TIPC_LISTEN) {
2046
		res = -EINVAL;
P
Per Liden 已提交
2047 2048
		goto exit;
	}
Y
Ying Xue 已提交
2049 2050 2051 2052
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	res = tipc_wait_for_accept(sock, timeo);
	if (res)
		goto exit;
2053 2054 2055

	buf = skb_peek(&sk->sk_receive_queue);

2056
	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2057 2058
	if (res)
		goto exit;
2059
	security_sk_clone(sock->sk, new_sock->sk);
P
Per Liden 已提交
2060

2061
	new_sk = new_sock->sk;
2062
	new_tsock = tipc_sk(new_sk);
2063
	msg = buf_msg(buf);
P
Per Liden 已提交
2064

2065 2066 2067 2068 2069 2070 2071
	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
2072
	tsk_rej_rx_queue(new_sk);
2073 2074

	/* Connect new socket to it's peer */
2075
	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2076

2077
	tsk_set_importance(new_tsock, msg_importance(msg));
2078
	if (msg_named(msg)) {
2079 2080
		new_tsock->conn_type = msg_nametype(msg);
		new_tsock->conn_instance = msg_nameinst(msg);
P
Per Liden 已提交
2081
	}
2082 2083 2084 2085 2086 2087 2088 2089

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

2090
		tsk_advance_rx_queue(sk);
2091
		__tipc_sendstream(new_sock, &m, 0);
2092 2093 2094
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2095
		skb_set_owner_r(buf, new_sk);
2096 2097
	}
	release_sock(new_sk);
P
Per Liden 已提交
2098
exit:
2099
	release_sock(sk);
P
Per Liden 已提交
2100 2101 2102 2103
	return res;
}

/**
2104
 * tipc_shutdown - shutdown socket connection
P
Per Liden 已提交
2105
 * @sock: socket structure
2106
 * @how: direction to close (must be SHUT_RDWR)
P
Per Liden 已提交
2107 2108
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
2109
 *
P
Per Liden 已提交
2110 2111
 * Returns 0 on success, errno otherwise
 */
2112
static int tipc_shutdown(struct socket *sock, int how)
P
Per Liden 已提交
2113
{
2114
	struct sock *sk = sock->sk;
P
Per Liden 已提交
2115 2116
	int res;

2117 2118
	if (how != SHUT_RDWR)
		return -EINVAL;
P
Per Liden 已提交
2119

2120
	lock_sock(sk);
P
Per Liden 已提交
2121

2122 2123
	__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
	sk->sk_shutdown = SEND_SHUTDOWN;
P
Per Liden 已提交
2124

2125
	if (sk->sk_state == TIPC_DISCONNECTING) {
2126
		/* Discard any unreceived messages */
2127
		__skb_queue_purge(&sk->sk_receive_queue);
2128 2129 2130

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
P
Per Liden 已提交
2131
		res = 0;
2132
	} else {
P
Per Liden 已提交
2133 2134 2135
		res = -ENOTCONN;
	}

2136
	release_sock(sk);
P
Per Liden 已提交
2137 2138 2139
	return res;
}

2140
static void tipc_sk_timeout(unsigned long data)
2141
{
2142 2143
	struct tipc_sock *tsk = (struct tipc_sock *)data;
	struct sock *sk = &tsk->sk;
2144
	struct sk_buff *skb = NULL;
2145
	u32 peer_port, peer_node;
2146
	u32 own_node = tsk_own_node(tsk);
2147

J
Jon Paul Maloy 已提交
2148
	bh_lock_sock(sk);
2149
	if (!tipc_sk_connected(sk)) {
J
Jon Paul Maloy 已提交
2150 2151
		bh_unlock_sock(sk);
		goto exit;
2152
	}
2153 2154
	peer_port = tsk_peer_port(tsk);
	peer_node = tsk_peer_node(tsk);
2155

2156
	if (tsk->probe_unacked) {
2157
		if (!sock_owned_by_user(sk)) {
2158
			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2159 2160 2161 2162 2163 2164 2165 2166
			tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
					      tsk_peer_port(tsk));
			sk->sk_state_change(sk);
		} else {
			/* Try again later */
			sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
		}

2167 2168
		bh_unlock_sock(sk);
		goto exit;
2169
	}
2170 2171 2172 2173

	skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
			      INT_H_SIZE, 0, peer_node, own_node,
			      peer_port, tsk->portid, TIPC_OK);
2174
	tsk->probe_unacked = true;
2175
	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
2176
	bh_unlock_sock(sk);
2177
	if (skb)
2178
		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
J
Jon Paul Maloy 已提交
2179
exit:
2180
	sock_put(sk);
2181 2182
}

2183
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2184 2185
			   struct tipc_name_seq const *seq)
{
2186 2187
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
J
Jon Paul Maloy 已提交
2188 2189 2190
	struct publication *publ;
	u32 key;

2191
	if (tipc_sk_connected(sk))
J
Jon Paul Maloy 已提交
2192
		return -EINVAL;
2193 2194
	key = tsk->portid + tsk->pub_count + 1;
	if (key == tsk->portid)
J
Jon Paul Maloy 已提交
2195 2196
		return -EADDRINUSE;

2197
	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2198
				    scope, tsk->portid, key);
J
Jon Paul Maloy 已提交
2199 2200 2201
	if (unlikely(!publ))
		return -EINVAL;

2202 2203 2204
	list_add(&publ->pport_list, &tsk->publications);
	tsk->pub_count++;
	tsk->published = 1;
J
Jon Paul Maloy 已提交
2205 2206 2207
	return 0;
}

2208
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2209 2210
			    struct tipc_name_seq const *seq)
{
2211
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2212 2213 2214 2215
	struct publication *publ;
	struct publication *safe;
	int rc = -EINVAL;

2216
	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
J
Jon Paul Maloy 已提交
2217 2218 2219 2220 2221 2222 2223 2224 2225
		if (seq) {
			if (publ->scope != scope)
				continue;
			if (publ->type != seq->type)
				continue;
			if (publ->lower != seq->lower)
				continue;
			if (publ->upper != seq->upper)
				break;
2226
			tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2227 2228 2229 2230
					      publ->ref, publ->key);
			rc = 0;
			break;
		}
2231
		tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2232 2233 2234
				      publ->ref, publ->key);
		rc = 0;
	}
2235 2236
	if (list_empty(&tsk->publications))
		tsk->published = 0;
J
Jon Paul Maloy 已提交
2237 2238 2239
	return rc;
}

2240 2241 2242
/* tipc_sk_reinit: set non-zero address in all existing sockets
 *                 when we go from standalone to network mode.
 */
2243
void tipc_sk_reinit(struct net *net)
2244
{
2245
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2246
	struct rhashtable_iter iter;
2247
	struct tipc_sock *tsk;
2248 2249
	struct tipc_msg *msg;

2250 2251 2252 2253
	rhashtable_walk_enter(&tn->sk_rht, &iter);

	do {
		tsk = ERR_PTR(rhashtable_walk_start(&iter));
2254 2255
		if (IS_ERR(tsk))
			goto walk_stop;
2256 2257

		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2258 2259
			spin_lock_bh(&tsk->sk.sk_lock.slock);
			msg = &tsk->phdr;
2260 2261
			msg_set_prevnode(msg, tn->own_addr);
			msg_set_orignode(msg, tn->own_addr);
2262 2263
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2264
walk_stop:
2265 2266
		rhashtable_walk_stop(&iter);
	} while (tsk == ERR_PTR(-EAGAIN));
2267 2268
}

2269
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2270
{
2271
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2272
	struct tipc_sock *tsk;
2273

2274
	rcu_read_lock();
2275
	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2276 2277 2278
	if (tsk)
		sock_hold(&tsk->sk);
	rcu_read_unlock();
2279

2280
	return tsk;
2281 2282
}

2283
static int tipc_sk_insert(struct tipc_sock *tsk)
2284
{
2285 2286 2287
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2288 2289
	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2290

2291 2292 2293 2294 2295 2296
	while (remaining--) {
		portid++;
		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
			portid = TIPC_MIN_PORT;
		tsk->portid = portid;
		sock_hold(&tsk->sk);
2297 2298
		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
						   tsk_rht_params))
2299 2300
			return 0;
		sock_put(&tsk->sk);
2301 2302
	}

2303
	return -1;
2304 2305
}

2306
static void tipc_sk_remove(struct tipc_sock *tsk)
2307
{
2308
	struct sock *sk = &tsk->sk;
2309
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2310

2311
	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2312
		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2313
		__sock_put(sk);
2314 2315 2316
	}
}

2317 2318 2319 2320 2321 2322 2323
static const struct rhashtable_params tsk_rht_params = {
	.nelem_hint = 192,
	.head_offset = offsetof(struct tipc_sock, node),
	.key_offset = offsetof(struct tipc_sock, portid),
	.key_len = sizeof(u32), /* portid */
	.max_size = 1048576,
	.min_size = 256,
2324
	.automatic_shrinking = true,
2325 2326
};

2327
int tipc_sk_rht_init(struct net *net)
2328
{
2329
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2330 2331

	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2332 2333
}

2334
void tipc_sk_rht_destroy(struct net *net)
2335
{
2336 2337
	struct tipc_net *tn = net_generic(net, tipc_net_id);

2338 2339
	/* Wait for socket readers to complete */
	synchronize_net();
2340

2341
	rhashtable_destroy(&tn->sk_rht);
2342 2343
}

P
Per Liden 已提交
2344
/**
2345
 * tipc_setsockopt - set socket option
P
Per Liden 已提交
2346 2347 2348 2349 2350
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: pointer to new option value
 * @ol: length of option value
2351 2352
 *
 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
P
Per Liden 已提交
2353
 * (to ease compatibility).
2354
 *
P
Per Liden 已提交
2355 2356
 * Returns 0 on success, errno otherwise
 */
2357 2358
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, unsigned int ol)
P
Per Liden 已提交
2359
{
2360
	struct sock *sk = sock->sk;
2361
	struct tipc_sock *tsk = tipc_sk(sk);
2362
	u32 value = 0;
2363
	int res = 0;
P
Per Liden 已提交
2364

2365 2366
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return 0;
P
Per Liden 已提交
2367 2368
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384

	switch (opt) {
	case TIPC_IMPORTANCE:
	case TIPC_SRC_DROPPABLE:
	case TIPC_DEST_DROPPABLE:
	case TIPC_CONN_TIMEOUT:
		if (ol < sizeof(value))
			return -EINVAL;
		res = get_user(value, (u32 __user *)ov);
		if (res)
			return res;
		break;
	default:
		if (ov || ol)
			return -EINVAL;
	}
P
Per Liden 已提交
2385

2386
	lock_sock(sk);
2387

P
Per Liden 已提交
2388 2389
	switch (opt) {
	case TIPC_IMPORTANCE:
2390
		res = tsk_set_importance(tsk, value);
P
Per Liden 已提交
2391 2392 2393
		break;
	case TIPC_SRC_DROPPABLE:
		if (sock->type != SOCK_STREAM)
2394
			tsk_set_unreliable(tsk, value);
2395
		else
P
Per Liden 已提交
2396 2397 2398
			res = -ENOPROTOOPT;
		break;
	case TIPC_DEST_DROPPABLE:
2399
		tsk_set_unreturnable(tsk, value);
P
Per Liden 已提交
2400 2401
		break;
	case TIPC_CONN_TIMEOUT:
2402
		tipc_sk(sk)->conn_timeout = value;
P
Per Liden 已提交
2403
		break;
2404 2405 2406 2407 2408 2409 2410 2411
	case TIPC_MCAST_BROADCAST:
		tsk->mc_method.rcast = false;
		tsk->mc_method.mandatory = true;
		break;
	case TIPC_MCAST_REPLICAST:
		tsk->mc_method.rcast = true;
		tsk->mc_method.mandatory = true;
		break;
P
Per Liden 已提交
2412 2413 2414 2415
	default:
		res = -EINVAL;
	}

2416 2417
	release_sock(sk);

P
Per Liden 已提交
2418 2419 2420 2421
	return res;
}

/**
2422
 * tipc_getsockopt - get socket option
P
Per Liden 已提交
2423 2424 2425 2426 2427
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: receptacle for option value
 * @ol: receptacle for length of option value
2428 2429
 *
 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
P
Per Liden 已提交
2430
 * (to ease compatibility).
2431
 *
P
Per Liden 已提交
2432 2433
 * Returns 0 on success, errno otherwise
 */
2434 2435
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, int __user *ol)
P
Per Liden 已提交
2436
{
2437
	struct sock *sk = sock->sk;
2438
	struct tipc_sock *tsk = tipc_sk(sk);
2439
	int len;
P
Per Liden 已提交
2440
	u32 value;
2441
	int res;
P
Per Liden 已提交
2442

2443 2444
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return put_user(0, ol);
P
Per Liden 已提交
2445 2446
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2447 2448
	res = get_user(len, ol);
	if (res)
2449
		return res;
P
Per Liden 已提交
2450

2451
	lock_sock(sk);
P
Per Liden 已提交
2452 2453 2454

	switch (opt) {
	case TIPC_IMPORTANCE:
2455
		value = tsk_importance(tsk);
P
Per Liden 已提交
2456 2457
		break;
	case TIPC_SRC_DROPPABLE:
2458
		value = tsk_unreliable(tsk);
P
Per Liden 已提交
2459 2460
		break;
	case TIPC_DEST_DROPPABLE:
2461
		value = tsk_unreturnable(tsk);
P
Per Liden 已提交
2462 2463
		break;
	case TIPC_CONN_TIMEOUT:
2464
		value = tsk->conn_timeout;
2465
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2466
		break;
2467
	case TIPC_NODE_RECVQ_DEPTH:
2468
		value = 0; /* was tipc_queue_size, now obsolete */
2469
		break;
2470
	case TIPC_SOCK_RECVQ_DEPTH:
2471 2472
		value = skb_queue_len(&sk->sk_receive_queue);
		break;
P
Per Liden 已提交
2473 2474 2475 2476
	default:
		res = -EINVAL;
	}

2477 2478
	release_sock(sk);

2479 2480
	if (res)
		return res;	/* "get" failed */
P
Per Liden 已提交
2481

2482 2483 2484 2485 2486 2487 2488
	if (len < sizeof(value))
		return -EINVAL;

	if (copy_to_user(ov, &value, sizeof(value)))
		return -EFAULT;

	return put_user(sizeof(value), ol);
P
Per Liden 已提交
2489 2490
}

2491
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
E
Erik Hugne 已提交
2492
{
2493
	struct sock *sk = sock->sk;
E
Erik Hugne 已提交
2494 2495 2496 2497 2498 2499 2500
	struct tipc_sioc_ln_req lnr;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case SIOCGETLINKNAME:
		if (copy_from_user(&lnr, argp, sizeof(lnr)))
			return -EFAULT;
2501 2502
		if (!tipc_node_get_linkname(sock_net(sk),
					    lnr.bearer_id & 0xffff, lnr.peer,
E
Erik Hugne 已提交
2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
			if (copy_to_user(argp, &lnr, sizeof(lnr)))
				return -EFAULT;
			return 0;
		}
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}

2514 2515 2516 2517
static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
{
	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
E
Erik Hugne 已提交
2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	u32 onode = tipc_own_addr(sock_net(sock1->sk));

	tsk1->peer.family = AF_TIPC;
	tsk1->peer.addrtype = TIPC_ADDR_ID;
	tsk1->peer.scope = TIPC_NODE_SCOPE;
	tsk1->peer.addr.id.ref = tsk2->portid;
	tsk1->peer.addr.id.node = onode;
	tsk2->peer.family = AF_TIPC;
	tsk2->peer.addrtype = TIPC_ADDR_ID;
	tsk2->peer.scope = TIPC_NODE_SCOPE;
	tsk2->peer.addr.id.ref = tsk1->portid;
	tsk2->peer.addr.id.node = onode;

	tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
	tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
2533 2534 2535
	return 0;
}

2536 2537
/* Protocol switches for the various types of TIPC sockets */

2538
static const struct proto_ops msg_ops = {
2539
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2540
	.family		= AF_TIPC,
2541 2542 2543
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
E
Erik Hugne 已提交
2544
	.socketpair	= tipc_socketpair,
2545
	.accept		= sock_no_accept,
2546 2547
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2548
	.ioctl		= tipc_ioctl,
2549
	.listen		= sock_no_listen,
2550 2551 2552 2553 2554
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_sendmsg,
	.recvmsg	= tipc_recvmsg,
2555 2556
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2557 2558
};

2559
static const struct proto_ops packet_ops = {
2560
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2561
	.family		= AF_TIPC,
2562 2563 2564
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2565
	.socketpair	= tipc_socketpair,
2566 2567 2568
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2569
	.ioctl		= tipc_ioctl,
2570 2571 2572 2573 2574 2575
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_packet,
	.recvmsg	= tipc_recvmsg,
2576 2577
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2578 2579
};

2580
static const struct proto_ops stream_ops = {
2581
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2582
	.family		= AF_TIPC,
2583 2584 2585
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2586
	.socketpair	= tipc_socketpair,
2587 2588 2589
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2590
	.ioctl		= tipc_ioctl,
2591 2592 2593 2594
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
2595
	.sendmsg	= tipc_sendstream,
2596
	.recvmsg	= tipc_recvstream,
2597 2598
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2599 2600
};

2601
static const struct net_proto_family tipc_family_ops = {
2602
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2603
	.family		= AF_TIPC,
2604
	.create		= tipc_sk_create
P
Per Liden 已提交
2605 2606 2607 2608 2609
};

static struct proto tipc_proto = {
	.name		= "TIPC",
	.owner		= THIS_MODULE,
2610 2611
	.obj_size	= sizeof(struct tipc_sock),
	.sysctl_rmem	= sysctl_tipc_rmem
P
Per Liden 已提交
2612 2613 2614
};

/**
2615
 * tipc_socket_init - initialize TIPC socket interface
2616
 *
P
Per Liden 已提交
2617 2618
 * Returns 0 on success, errno otherwise
 */
2619
int tipc_socket_init(void)
P
Per Liden 已提交
2620 2621 2622
{
	int res;

2623
	res = proto_register(&tipc_proto, 1);
P
Per Liden 已提交
2624
	if (res) {
2625
		pr_err("Failed to register TIPC protocol type\n");
P
Per Liden 已提交
2626 2627 2628 2629 2630
		goto out;
	}

	res = sock_register(&tipc_family_ops);
	if (res) {
2631
		pr_err("Failed to register TIPC socket type\n");
P
Per Liden 已提交
2632 2633 2634 2635 2636 2637 2638 2639
		proto_unregister(&tipc_proto);
		goto out;
	}
 out:
	return res;
}

/**
2640
 * tipc_socket_stop - stop TIPC socket interface
P
Per Liden 已提交
2641
 */
2642
void tipc_socket_stop(void)
P
Per Liden 已提交
2643 2644 2645 2646
{
	sock_unregister(tipc_family_ops.family);
	proto_unregister(&tipc_proto);
}
2647 2648

/* Caller should hold socket lock for the passed tipc socket. */
2649
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683
{
	u32 peer_node;
	u32 peer_port;
	struct nlattr *nest;

	peer_node = tsk_peer_node(tsk);
	peer_port = tsk_peer_port(tsk);

	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);

	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
		goto msg_full;
	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
		goto msg_full;

	if (tsk->conn_type != 0) {
		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
			goto msg_full;
	}
	nla_nest_end(skb, nest);

	return 0;

msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2684 2685
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
			    struct tipc_sock *tsk)
2686 2687 2688 2689
{
	int err;
	void *hdr;
	struct nlattr *attrs;
2690 2691
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2692
	struct sock *sk = &tsk->sk;
2693 2694

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2695
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2696 2697 2698 2699 2700 2701
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
	if (!attrs)
		goto genlmsg_cancel;
2702
	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2703
		goto attr_msg_cancel;
2704
	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2705 2706
		goto attr_msg_cancel;

2707
	if (tipc_sk_connected(sk)) {
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
		err = __tipc_nl_add_sk_con(skb, tsk);
		if (err)
			goto attr_msg_cancel;
	} else if (!list_empty(&tsk->publications)) {
		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
			goto attr_msg_cancel;
	}
	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
	struct tipc_sock *tsk;
2732 2733
	const struct bucket_table *tbl;
	struct rhash_head *pos;
2734 2735
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2736 2737
	u32 tbl_id = cb->args[0];
	u32 prev_portid = cb->args[1];
2738

2739
	rcu_read_lock();
2740
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2741 2742
	for (; tbl_id < tbl->size; tbl_id++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2743
			spin_lock_bh(&tsk->sk.sk_lock.slock);
2744 2745 2746 2747 2748
			if (prev_portid && prev_portid != tsk->portid) {
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				continue;
			}

2749
			err = __tipc_nl_add_sk(skb, cb, tsk);
2750 2751 2752 2753 2754 2755
			if (err) {
				prev_portid = tsk->portid;
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				goto out;
			}
			prev_portid = 0;
2756 2757
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2758
	}
2759
out:
2760
	rcu_read_unlock();
2761 2762
	cb->args[0] = tbl_id;
	cb->args[1] = prev_portid;
2763 2764 2765

	return skb->len;
}
2766 2767

/* Caller should hold socket lock for the passed tipc socket. */
2768 2769 2770
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 struct publication *publ)
2771 2772 2773 2774 2775
{
	void *hdr;
	struct nlattr *attrs;

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2776
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
	if (!attrs)
		goto genlmsg_cancel;

	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
		goto attr_msg_cancel;

	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2807 2808 2809
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
				  struct netlink_callback *cb,
				  struct tipc_sock *tsk, u32 *last_publ)
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
{
	int err;
	struct publication *p;

	if (*last_publ) {
		list_for_each_entry(p, &tsk->publications, pport_list) {
			if (p->key == *last_publ)
				break;
		}
		if (p->key != *last_publ) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			*last_publ = 0;
			return -EPIPE;
		}
	} else {
		p = list_first_entry(&tsk->publications, struct publication,
				     pport_list);
	}

	list_for_each_entry_from(p, &tsk->publications, pport_list) {
		err = __tipc_nl_add_sk_publ(skb, cb, p);
		if (err) {
			*last_publ = p->key;
			return err;
		}
	}
	*last_publ = 0;

	return 0;
}

int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
2850
	u32 tsk_portid = cb->args[0];
2851 2852
	u32 last_publ = cb->args[1];
	u32 done = cb->args[2];
2853
	struct net *net = sock_net(skb->sk);
2854 2855
	struct tipc_sock *tsk;

2856
	if (!tsk_portid) {
2857 2858 2859 2860 2861 2862 2863
		struct nlattr **attrs;
		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];

		err = tipc_nlmsg_parse(cb->nlh, &attrs);
		if (err)
			return err;

2864 2865 2866
		if (!attrs[TIPC_NLA_SOCK])
			return -EINVAL;

2867 2868
		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
				       attrs[TIPC_NLA_SOCK],
2869
				       tipc_nl_sock_policy, NULL);
2870 2871 2872 2873 2874 2875
		if (err)
			return err;

		if (!sock[TIPC_NLA_SOCK_REF])
			return -EINVAL;

2876
		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2877 2878 2879 2880 2881
	}

	if (done)
		return 0;

2882
	tsk = tipc_sk_lookup(net, tsk_portid);
2883 2884 2885 2886 2887 2888 2889 2890
	if (!tsk)
		return -EINVAL;

	lock_sock(&tsk->sk);
	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
	if (!err)
		done = 1;
	release_sock(&tsk->sk);
2891
	sock_put(&tsk->sk);
2892

2893
	cb->args[0] = tsk_portid;
2894 2895 2896 2897 2898
	cb->args[1] = last_publ;
	cb->args[2] = done;

	return skb->len;
}