socket.c 72.0 KB
Newer Older
P
Per Liden 已提交
1
/*
2
 * net/tipc/socket.c: TIPC socket API
3
 *
4
 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5
 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37
#include <linux/rhashtable.h>
P
Per Liden 已提交
38
#include "core.h"
39
#include "name_table.h"
E
Erik Hugne 已提交
40
#include "node.h"
41
#include "link.h"
42
#include "name_distr.h"
43
#include "socket.h"
44
#include "bcast.h"
45

46 47
#define SS_LISTENING		-1	/* socket is listening */
#define SS_READY		-2	/* socket is connectionless */
P
Per Liden 已提交
48

49
#define CONN_TIMEOUT_DEFAULT	8000	/* default connect timeout = 8s */
50
#define CONN_PROBING_INTERVAL	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
51 52 53 54 55
#define TIPC_FWD_MSG		1
#define TIPC_CONN_OK		0
#define TIPC_CONN_PROBING	1
#define TIPC_MAX_PORT		0xffffffff
#define TIPC_MIN_PORT		1
56 57 58 59 60 61 62 63 64

/**
 * struct tipc_sock - TIPC socket structure
 * @sk: socket - interacts with 'port' and with user via the socket API
 * @connected: non-zero if port is currently connected to a peer port
 * @conn_type: TIPC type used when connection was established
 * @conn_instance: TIPC instance used when connection was established
 * @published: non-zero if port has one or more associated names
 * @max_pkt: maximum packet size "hint" used when building messages sent by port
65
 * @portid: unique port identity in TIPC socket hash table
66 67 68 69 70
 * @phdr: preformatted message header used when sending messages
 * @port_list: adjacent ports in TIPC's global list of ports
 * @publications: list of publications for port
 * @pub_count: total # of publications port has made during its lifetime
 * @probing_state:
71
 * @probing_intv:
72 73 74 75 76
 * @conn_timeout: the time we can wait for an unresponded setup request
 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
 * @link_cong: non-zero if owner must sleep because of link congestion
 * @sent_unacked: # messages sent by socket, and not yet acked by peer
 * @rcv_unacked: # messages read by user, but not yet acked back to peer
77
 * @remote: 'connected' peer for dgram/rdm
78 79
 * @node: hash table node
 * @rcu: rcu struct for tipc_sock
80 81 82 83 84 85 86 87
 */
struct tipc_sock {
	struct sock sk;
	int connected;
	u32 conn_type;
	u32 conn_instance;
	int published;
	u32 max_pkt;
88
	u32 portid;
89 90 91 92 93
	struct tipc_msg phdr;
	struct list_head sock_list;
	struct list_head publications;
	u32 pub_count;
	u32 probing_state;
94
	unsigned long probing_intv;
95 96 97 98 99
	uint conn_timeout;
	atomic_t dupl_rcvcnt;
	bool link_cong;
	uint sent_unacked;
	uint rcv_unacked;
100
	struct sockaddr_tipc remote;
101 102
	struct rhash_head node;
	struct rcu_head rcu;
103
};
P
Per Liden 已提交
104

105
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
106
static void tipc_data_ready(struct sock *sk);
107
static void tipc_write_space(struct sock *sk);
108 109
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
110
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
111
static void tipc_sk_timeout(unsigned long data);
112
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
113
			   struct tipc_name_seq const *seq);
114
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
115
			    struct tipc_name_seq const *seq);
116
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
117 118
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
119 120 121
static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
			      size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
P
Per Liden 已提交
122

123 124 125
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
P
Per Liden 已提交
126 127
static struct proto tipc_proto;

128 129 130 131 132 133 134 135
static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
	[TIPC_NLA_SOCK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_SOCK_ADDR]		= { .type = NLA_U32 },
	[TIPC_NLA_SOCK_REF]		= { .type = NLA_U32 },
	[TIPC_NLA_SOCK_CON]		= { .type = NLA_NESTED },
	[TIPC_NLA_SOCK_HAS_PUBL]	= { .type = NLA_FLAG }
};

136 137
static const struct rhashtable_params tsk_rht_params;

138
/*
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
 * Revised TIPC socket locking policy:
 *
 * Most socket operations take the standard socket lock when they start
 * and hold it until they finish (or until they need to sleep).  Acquiring
 * this lock grants the owner exclusive access to the fields of the socket
 * data structures, with the exception of the backlog queue.  A few socket
 * operations can be done without taking the socket lock because they only
 * read socket information that never changes during the life of the socket.
 *
 * Socket operations may acquire the lock for the associated TIPC port if they
 * need to perform an operation on the port.  If any routine needs to acquire
 * both the socket lock and the port lock it must take the socket lock first
 * to avoid the risk of deadlock.
 *
 * The dispatcher handling incoming messages cannot grab the socket lock in
 * the standard fashion, since invoked it runs at the BH level and cannot block.
 * Instead, it checks to see if the socket lock is currently owned by someone,
 * and either handles the message itself or adds it to the socket's backlog
 * queue; in the latter case the queued message is processed once the process
 * owning the socket lock releases it.
 *
 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
 * the problem of a blocked socket operation preventing any other operations
 * from occurring.  However, applications must be careful if they have
 * multiple threads trying to send (or receive) on the same socket, as these
 * operations might interfere with each other.  For example, doing a connect
 * and a receive at the same time might allow the receive to consume the
 * ACK message meant for the connect.  While additional work could be done
 * to try and overcome this, it doesn't seem to be worthwhile at the present.
 *
 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
 * that another operation that must be performed in a non-blocking manner is
 * not delayed for very long because the lock has already been taken.
 *
 * NOTE: This code assumes that certain fields of a port/socket pair are
 * constant over its lifetime; such fields can be examined without taking
 * the socket lock and/or port lock, and do not need to be re-read even
 * after resuming processing after waiting.  These fields include:
 *   - socket type
 *   - pointer to socket sk structure (aka tipc_sock structure)
 *   - pointer to port structure
 *   - port reference
 */

183 184 185 186 187
static u32 tsk_own_node(struct tipc_sock *tsk)
{
	return msg_prevnode(&tsk->phdr);
}

188
static u32 tsk_peer_node(struct tipc_sock *tsk)
189
{
190
	return msg_destnode(&tsk->phdr);
191 192
}

193
static u32 tsk_peer_port(struct tipc_sock *tsk)
194
{
195
	return msg_destport(&tsk->phdr);
196 197
}

198
static  bool tsk_unreliable(struct tipc_sock *tsk)
199
{
200
	return msg_src_droppable(&tsk->phdr) != 0;
201 202
}

203
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
204
{
205
	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
206 207
}

208
static bool tsk_unreturnable(struct tipc_sock *tsk)
209
{
210
	return msg_dest_droppable(&tsk->phdr) != 0;
211 212
}

213
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
214
{
215
	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
216 217
}

218
static int tsk_importance(struct tipc_sock *tsk)
219
{
220
	return msg_importance(&tsk->phdr);
221 222
}

223
static int tsk_set_importance(struct tipc_sock *tsk, int imp)
224 225 226
{
	if (imp > TIPC_CRITICAL_IMPORTANCE)
		return -EINVAL;
227
	msg_set_importance(&tsk->phdr, (u32)imp);
228 229
	return 0;
}
230

231 232 233 234 235 236 237 238 239 240
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
	return container_of(sk, struct tipc_sock, sk);
}

static int tsk_conn_cong(struct tipc_sock *tsk)
{
	return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
}

241
/**
242
 * tsk_advance_rx_queue - discard first buffer in socket receive queue
243 244
 *
 * Caller must hold socket lock
P
Per Liden 已提交
245
 */
246
static void tsk_advance_rx_queue(struct sock *sk)
P
Per Liden 已提交
247
{
248
	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
P
Per Liden 已提交
249 250
}

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
/* tipc_sk_respond() : send response message back to sender
 */
static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
{
	u32 selector;
	u32 dnode;
	u32 onode = tipc_own_addr(sock_net(sk));

	if (!tipc_msg_reverse(onode, &skb, err))
		return;

	dnode = msg_destnode(buf_msg(skb));
	selector = msg_origport(buf_msg(skb));
	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
}

P
Per Liden 已提交
267
/**
268
 * tsk_rej_rx_queue - reject all buffers in socket receive queue
269 270
 *
 * Caller must hold socket lock
P
Per Liden 已提交
271
 */
272
static void tsk_rej_rx_queue(struct sock *sk)
P
Per Liden 已提交
273
{
274
	struct sk_buff *skb;
275

276 277
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
P
Per Liden 已提交
278 279
}

280
/* tsk_peer_msg - verify if message was sent by connected port's peer
J
Jon Paul Maloy 已提交
281 282 283 284
 *
 * Handles cases where the node's network address has changed from
 * the default of <0.0.0> to its configured setting.
 */
285
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
J
Jon Paul Maloy 已提交
286
{
287
	struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
288
	u32 peer_port = tsk_peer_port(tsk);
J
Jon Paul Maloy 已提交
289 290 291
	u32 orig_node;
	u32 peer_node;

292
	if (unlikely(!tsk->connected))
J
Jon Paul Maloy 已提交
293 294 295 296 297 298
		return false;

	if (unlikely(msg_origport(msg) != peer_port))
		return false;

	orig_node = msg_orignode(msg);
299
	peer_node = tsk_peer_node(tsk);
J
Jon Paul Maloy 已提交
300 301 302 303

	if (likely(orig_node == peer_node))
		return true;

304
	if (!orig_node && (peer_node == tn->own_addr))
J
Jon Paul Maloy 已提交
305 306
		return true;

307
	if (!peer_node && (orig_node == tn->own_addr))
J
Jon Paul Maloy 已提交
308 309 310 311 312
		return true;

	return false;
}

P
Per Liden 已提交
313
/**
314
 * tipc_sk_create - create a TIPC socket
315
 * @net: network namespace (must be default network)
P
Per Liden 已提交
316 317
 * @sock: pre-allocated socket structure
 * @protocol: protocol indicator (must be 0)
318
 * @kern: caused by kernel or by userspace?
319
 *
320 321
 * This routine creates additional data structures used by the TIPC socket,
 * initializes them, and links them together.
P
Per Liden 已提交
322 323 324
 *
 * Returns 0 on success, errno otherwise
 */
325 326
static int tipc_sk_create(struct net *net, struct socket *sock,
			  int protocol, int kern)
P
Per Liden 已提交
327
{
328
	struct tipc_net *tn;
329 330
	const struct proto_ops *ops;
	socket_state state;
P
Per Liden 已提交
331
	struct sock *sk;
332
	struct tipc_sock *tsk;
333
	struct tipc_msg *msg;
334 335

	/* Validate arguments */
P
Per Liden 已提交
336 337 338 339 340
	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
341 342
		ops = &stream_ops;
		state = SS_UNCONNECTED;
P
Per Liden 已提交
343 344
		break;
	case SOCK_SEQPACKET:
345 346
		ops = &packet_ops;
		state = SS_UNCONNECTED;
P
Per Liden 已提交
347 348 349
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
350 351
		ops = &msg_ops;
		state = SS_READY;
P
Per Liden 已提交
352
		break;
353 354
	default:
		return -EPROTOTYPE;
P
Per Liden 已提交
355 356
	}

357
	/* Allocate socket's protocol area */
358
	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
359
	if (sk == NULL)
P
Per Liden 已提交
360 361
		return -ENOMEM;

362
	tsk = tipc_sk(sk);
363 364 365
	tsk->max_pkt = MAX_PKT_DEFAULT;
	INIT_LIST_HEAD(&tsk->publications);
	msg = &tsk->phdr;
366 367
	tn = net_generic(sock_net(sk), tipc_net_id);
	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
368
		      NAMED_H_SIZE, 0);
P
Per Liden 已提交
369

370 371 372 373
	/* Finish initializing socket data structures */
	sock->ops = ops;
	sock->state = state;
	sock_init_data(sock, sk);
374 375 376 377 378
	if (tipc_sk_insert(tsk)) {
		pr_warn("Socket create failed; port numbrer exhausted\n");
		return -EINVAL;
	}
	msg_set_origport(msg, tsk->portid);
379
	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
380
	sk->sk_backlog_rcv = tipc_backlog_rcv;
381
	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
382 383
	sk->sk_data_ready = tipc_data_ready;
	sk->sk_write_space = tipc_write_space;
384
	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
385
	tsk->sent_unacked = 0;
386
	atomic_set(&tsk->dupl_rcvcnt, 0);
387

388
	if (sock->state == SS_READY) {
389
		tsk_set_unreturnable(tsk, true);
390
		if (sock->type == SOCK_DGRAM)
391
			tsk_set_unreliable(tsk, true);
392
	}
P
Per Liden 已提交
393 394 395
	return 0;
}

396 397 398 399 400 401 402
static void tipc_sk_callback(struct rcu_head *head)
{
	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);

	sock_put(&tsk->sk);
}

P
Per Liden 已提交
403
/**
404
 * tipc_release - destroy a TIPC socket
P
Per Liden 已提交
405 406 407 408 409 410 411
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
412
 *
P
Per Liden 已提交
413 414 415 416 417 418
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
419
static int tipc_release(struct socket *sock)
P
Per Liden 已提交
420 421
{
	struct sock *sk = sock->sk;
422
	struct net *net;
423
	struct tipc_sock *tsk;
424
	struct sk_buff *skb;
425
	u32 dnode;
P
Per Liden 已提交
426

427 428 429 430 431
	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
P
Per Liden 已提交
432
		return 0;
433

434
	net = sock_net(sk);
435
	tsk = tipc_sk(sk);
436 437 438 439 440 441
	lock_sock(sk);

	/*
	 * Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer)
	 */
442
	dnode = tsk_peer_node(tsk);
P
Per Liden 已提交
443
	while (sock->state != SS_DISCONNECTING) {
444 445
		skb = __skb_dequeue(&sk->sk_receive_queue);
		if (skb == NULL)
P
Per Liden 已提交
446
			break;
447 448
		if (TIPC_SKB_CB(skb)->handle != NULL)
			kfree_skb(skb);
449 450 451 452
		else {
			if ((sock->state == SS_CONNECTING) ||
			    (sock->state == SS_CONNECTED)) {
				sock->state = SS_DISCONNECTING;
453
				tsk->connected = 0;
454
				tipc_node_remove_conn(net, dnode, tsk->portid);
455
			}
456
			tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
457
		}
P
Per Liden 已提交
458 459
	}

460
	tipc_sk_withdraw(tsk, 0, NULL);
461
	sk_stop_timer(sk, &sk->sk_timer);
462
	tipc_sk_remove(tsk);
463
	if (tsk->connected) {
464
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
465
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
466
				      tsk_own_node(tsk), tsk_peer_port(tsk),
467
				      tsk->portid, TIPC_ERR_NO_PORT);
468
		if (skb)
469
			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
470
		tipc_node_remove_conn(net, dnode, tsk->portid);
471
	}
P
Per Liden 已提交
472

473
	/* Discard any remaining (connection-based) messages in receive queue */
474
	__skb_queue_purge(&sk->sk_receive_queue);
P
Per Liden 已提交
475

476 477 478
	/* Reject any messages that accumulated in backlog queue */
	sock->state = SS_DISCONNECTING;
	release_sock(sk);
479 480

	call_rcu(&tsk->rcu, tipc_sk_callback);
481
	sock->sk = NULL;
P
Per Liden 已提交
482

483
	return 0;
P
Per Liden 已提交
484 485 486
}

/**
487
 * tipc_bind - associate or disassocate TIPC name(s) with a socket
P
Per Liden 已提交
488 489 490
 * @sock: socket structure
 * @uaddr: socket address describing name(s) and desired operation
 * @uaddr_len: size of socket address data structure
491
 *
P
Per Liden 已提交
492 493 494
 * Name and name sequence binding is indicated using a positive scope value;
 * a negative scope value unbinds the specified name.  Specifying no name
 * (i.e. a socket address length of 0) unbinds all names from the socket.
495
 *
P
Per Liden 已提交
496
 * Returns 0 on success, errno otherwise
497 498 499
 *
 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 *       access any non-constant socket information.
P
Per Liden 已提交
500
 */
501 502
static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
		     int uaddr_len)
P
Per Liden 已提交
503
{
504
	struct sock *sk = sock->sk;
P
Per Liden 已提交
505
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
506
	struct tipc_sock *tsk = tipc_sk(sk);
507
	int res = -EINVAL;
P
Per Liden 已提交
508

509 510
	lock_sock(sk);
	if (unlikely(!uaddr_len)) {
511
		res = tipc_sk_withdraw(tsk, 0, NULL);
512 513
		goto exit;
	}
514

515 516 517 518 519 520 521 522
	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
		res = -EINVAL;
		goto exit;
	}
	if (addr->family != AF_TIPC) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
P
Per Liden 已提交
523 524 525

	if (addr->addrtype == TIPC_ADDR_NAME)
		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
526 527 528 529
	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
530

531
	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
532
	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
533 534 535 536
	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
		res = -EACCES;
		goto exit;
	}
537

538
	res = (addr->scope > 0) ?
539 540
		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
541 542 543
exit:
	release_sock(sk);
	return res;
P
Per Liden 已提交
544 545
}

546
/**
547
 * tipc_getname - get port ID of socket or peer socket
P
Per Liden 已提交
548 549 550
 * @sock: socket structure
 * @uaddr: area for returned socket address
 * @uaddr_len: area for returned length of socket address
551
 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
552
 *
P
Per Liden 已提交
553
 * Returns 0 on success, errno otherwise
554
 *
555 556
 * NOTE: This routine doesn't need to take the socket lock since it only
 *       accesses socket information that is unchanging (or which changes in
557
 *       a completely predictable manner).
P
Per Liden 已提交
558
 */
559 560
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
			int *uaddr_len, int peer)
P
Per Liden 已提交
561 562
{
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
563
	struct tipc_sock *tsk = tipc_sk(sock->sk);
564
	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
P
Per Liden 已提交
565

566
	memset(addr, 0, sizeof(*addr));
567
	if (peer) {
568 569 570
		if ((sock->state != SS_CONNECTED) &&
			((peer != 2) || (sock->state != SS_DISCONNECTING)))
			return -ENOTCONN;
571 572
		addr->addr.id.ref = tsk_peer_port(tsk);
		addr->addr.id.node = tsk_peer_node(tsk);
573
	} else {
574
		addr->addr.id.ref = tsk->portid;
575
		addr->addr.id.node = tn->own_addr;
576
	}
P
Per Liden 已提交
577 578 579 580 581 582 583

	*uaddr_len = sizeof(*addr);
	addr->addrtype = TIPC_ADDR_ID;
	addr->family = AF_TIPC;
	addr->scope = 0;
	addr->addr.name.domain = 0;

584
	return 0;
P
Per Liden 已提交
585 586 587
}

/**
588
 * tipc_poll - read and possibly block on pollmask
P
Per Liden 已提交
589 590 591 592
 * @file: file structure associated with the socket
 * @sock: socket for which to calculate the poll bits
 * @wait: ???
 *
593 594 595 596 597 598 599 600 601
 * Returns pollmask value
 *
 * COMMENTARY:
 * It appears that the usual socket locking mechanisms are not useful here
 * since the pollmask info is potentially out-of-date the moment this routine
 * exits.  TCP and other protocols seem to rely on higher level poll routines
 * to handle any preventable race conditions, so TIPC will do the same ...
 *
 * TIPC sets the returned events as follows:
602 603 604 605
 *
 * socket state		flags set
 * ------------		---------
 * unconnected		no read flags
606
 *			POLLOUT if port is not congested
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
 *
 * connecting		POLLIN/POLLRDNORM if ACK/NACK in rx queue
 *			no write flags
 *
 * connected		POLLIN/POLLRDNORM if data in rx queue
 *			POLLOUT if port is not congested
 *
 * disconnecting	POLLIN/POLLRDNORM/POLLHUP
 *			no write flags
 *
 * listening		POLLIN if SYN in rx queue
 *			no write flags
 *
 * ready		POLLIN/POLLRDNORM if data in rx queue
 * [connectionless]	POLLOUT (since port cannot be congested)
 *
 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 * imply that the operation will succeed, merely that it should be performed
 * and will not block.
P
Per Liden 已提交
626
 */
627 628
static unsigned int tipc_poll(struct file *file, struct socket *sock,
			      poll_table *wait)
P
Per Liden 已提交
629
{
630
	struct sock *sk = sock->sk;
631
	struct tipc_sock *tsk = tipc_sk(sk);
632
	u32 mask = 0;
633

634
	sock_poll_wait(file, sk_sleep(sk), wait);
635

636
	switch ((int)sock->state) {
637
	case SS_UNCONNECTED:
638
		if (!tsk->link_cong)
639 640
			mask |= POLLOUT;
		break;
641 642
	case SS_READY:
	case SS_CONNECTED:
643
		if (!tsk->link_cong && !tsk_conn_cong(tsk))
644 645 646 647 648 649 650 651 652 653 654
			mask |= POLLOUT;
		/* fall thru' */
	case SS_CONNECTING:
	case SS_LISTENING:
		if (!skb_queue_empty(&sk->sk_receive_queue))
			mask |= (POLLIN | POLLRDNORM);
		break;
	case SS_DISCONNECTING:
		mask = (POLLIN | POLLRDNORM | POLLHUP);
		break;
	}
655 656

	return mask;
P
Per Liden 已提交
657 658
}

659 660 661 662
/**
 * tipc_sendmcast - send multicast message
 * @sock: socket structure
 * @seq: destination address
663
 * @msg: message to send
664 665 666 667 668 669 670
 * @dsz: total length of message data
 * @timeo: timeout to wait for wakeup
 *
 * Called from function tipc_sendmsg(), which has done all sanity checks
 * Returns the number of bytes sent on success, or errno
 */
static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
671
			  struct msghdr *msg, size_t dsz, long timeo)
672 673
{
	struct sock *sk = sock->sk;
674
	struct tipc_sock *tsk = tipc_sk(sk);
675
	struct net *net = sock_net(sk);
676
	struct tipc_msg *mhdr = &tsk->phdr;
677
	struct sk_buff_head *pktchain = &sk->sk_write_queue;
A
Al Viro 已提交
678
	struct iov_iter save = msg->msg_iter;
679 680 681 682 683 684 685 686 687 688 689 690 691
	uint mtu;
	int rc;

	msg_set_type(mhdr, TIPC_MCAST_MSG);
	msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
	msg_set_destport(mhdr, 0);
	msg_set_destnode(mhdr, 0);
	msg_set_nametype(mhdr, seq->type);
	msg_set_namelower(mhdr, seq->lower);
	msg_set_nameupper(mhdr, seq->upper);
	msg_set_hdr_sz(mhdr, MCAST_H_SIZE);

new_mtu:
692
	mtu = tipc_bcast_get_mtu(net);
693
	rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
694 695 696 697
	if (unlikely(rc < 0))
		return rc;

	do {
698
		rc = tipc_bcast_xmit(net, pktchain);
699 700 701 702 703 704 705 706
		if (likely(!rc))
			return dsz;

		if (rc == -ELINKCONG) {
			tsk->link_cong = 1;
			rc = tipc_wait_for_sndmsg(sock, &timeo);
			if (!rc)
				continue;
707
		}
708
		__skb_queue_purge(pktchain);
A
Al Viro 已提交
709 710
		if (rc == -EMSGSIZE) {
			msg->msg_iter = save;
711
			goto new_mtu;
A
Al Viro 已提交
712
		}
713 714
		break;
	} while (1);
715 716 717
	return rc;
}

718 719 720 721 722 723
/**
 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 * @arrvq: queue with arriving messages, to be cloned after destination lookup
 * @inputq: queue with cloned messages, delivered to socket after dest lookup
 *
 * Multi-threaded: parallel calls with reference to same queues may occur
724
 */
725 726
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
		       struct sk_buff_head *inputq)
727
{
728
	struct tipc_msg *msg;
729 730
	struct tipc_plist dports;
	u32 portid;
731
	u32 scope = TIPC_CLUSTER_SCOPE;
732 733 734
	struct sk_buff_head tmpq;
	uint hsz;
	struct sk_buff *skb, *_skb;
735

736
	__skb_queue_head_init(&tmpq);
737
	tipc_plist_init(&dports);
738

739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	skb = tipc_skb_peek(arrvq, &inputq->lock);
	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
		msg = buf_msg(skb);
		hsz = skb_headroom(skb) + msg_hdr_sz(msg);

		if (in_own_node(net, msg_orignode(msg)))
			scope = TIPC_NODE_SCOPE;

		/* Create destination port list and message clones: */
		tipc_nametbl_mc_translate(net,
					  msg_nametype(msg), msg_namelower(msg),
					  msg_nameupper(msg), scope, &dports);
		portid = tipc_plist_pop(&dports);
		for (; portid; portid = tipc_plist_pop(&dports)) {
			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
			if (_skb) {
				msg_set_destport(buf_msg(_skb), portid);
				__skb_queue_tail(&tmpq, _skb);
				continue;
			}
			pr_warn("Failed to clone mcast rcv buffer\n");
760
		}
761 762 763 764 765 766 767 768 769
		/* Append to inputq if not already done by other thread */
		spin_lock_bh(&inputq->lock);
		if (skb_peek(arrvq) == skb) {
			skb_queue_splice_tail_init(&tmpq, inputq);
			kfree_skb(__skb_dequeue(arrvq));
		}
		spin_unlock_bh(&inputq->lock);
		__skb_queue_purge(&tmpq);
		kfree_skb(skb);
770
	}
771
	tipc_sk_rcv(net, inputq);
772 773
}

774 775 776
/**
 * tipc_sk_proto_rcv - receive a connection mng protocol message
 * @tsk: receiving socket
777
 * @skb: pointer to message buffer.
778
 */
779
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
780
{
781 782 783
	struct sock *sk = &tsk->sk;
	struct tipc_msg *hdr = buf_msg(skb);
	int mtyp = msg_type(hdr);
784
	int conn_cong;
785

786
	/* Ignore if connection cannot be validated: */
787
	if (!tsk_peer_msg(tsk, hdr))
788 789
		goto exit;

790
	tsk->probing_state = TIPC_CONN_OK;
791

792 793 794 795 796
	if (mtyp == CONN_PROBE) {
		msg_set_type(hdr, CONN_PROBE_REPLY);
		tipc_sk_respond(sk, skb, TIPC_OK);
		return;
	} else if (mtyp == CONN_ACK) {
797
		conn_cong = tsk_conn_cong(tsk);
798
		tsk->sent_unacked -= msg_msgcnt(hdr);
799
		if (conn_cong)
800 801 802
			sk->sk_write_space(sk);
	} else if (mtyp != CONN_PROBE_REPLY) {
		pr_warn("Received unknown CONN_PROTO msg\n");
803 804
	}
exit:
805
	kfree_skb(skb);
806 807
}

808 809 810
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
{
	struct sock *sk = sock->sk;
811
	struct tipc_sock *tsk = tipc_sk(sk);
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	DEFINE_WAIT(wait);
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (sock->state == SS_DISCONNECTING)
			return -EPIPE;
		if (!*timeo_p)
			return -EAGAIN;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
827
		done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
828 829 830 831 832
		finish_wait(sk_sleep(sk), &wait);
	} while (!done);
	return 0;
}

P
Per Liden 已提交
833
/**
834
 * tipc_sendmsg - send message in connectionless manner
P
Per Liden 已提交
835 836
 * @sock: socket structure
 * @m: message to send
837
 * @dsz: amount of user data to be sent
838
 *
P
Per Liden 已提交
839
 * Message must have an destination specified explicitly.
840
 * Used for SOCK_RDM and SOCK_DGRAM messages,
P
Per Liden 已提交
841 842
 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
843
 *
P
Per Liden 已提交
844 845
 * Returns the number of bytes sent on success, or errno otherwise
 */
846
static int tipc_sendmsg(struct socket *sock,
847
			struct msghdr *m, size_t dsz)
848 849 850 851 852 853 854 855 856 857 858 859
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_sendmsg(sock, m, dsz);
	release_sock(sk);

	return ret;
}

static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
860
{
861
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
862
	struct sock *sk = sock->sk;
863
	struct tipc_sock *tsk = tipc_sk(sk);
864
	struct net *net = sock_net(sk);
865
	struct tipc_msg *mhdr = &tsk->phdr;
866
	u32 dnode, dport;
867
	struct sk_buff_head *pktchain = &sk->sk_write_queue;
868
	struct sk_buff *skb;
869
	struct tipc_name_seq *seq;
A
Al Viro 已提交
870
	struct iov_iter save;
871
	u32 mtu;
872
	long timeo;
E
Erik Hugne 已提交
873
	int rc;
P
Per Liden 已提交
874

875
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
876
		return -EMSGSIZE;
877 878 879 880 881 882 883 884 885
	if (unlikely(!dest)) {
		if (tsk->connected && sock->state == SS_READY)
			dest = &tsk->remote;
		else
			return -EDESTADDRREQ;
	} else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
		   dest->family != AF_TIPC) {
		return -EINVAL;
	}
886
	if (unlikely(sock->state != SS_READY)) {
887 888 889 890 891 892
		if (sock->state == SS_LISTENING)
			return -EPIPE;
		if (sock->state != SS_UNCONNECTED)
			return -EISCONN;
		if (tsk->published)
			return -EOPNOTSUPP;
893
		if (dest->addrtype == TIPC_ADDR_NAME) {
894 895
			tsk->conn_type = dest->addr.name.name.type;
			tsk->conn_instance = dest->addr.name.name.instance;
896
		}
P
Per Liden 已提交
897
	}
898
	seq = &dest->addr.nameseq;
899
	timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
900 901

	if (dest->addrtype == TIPC_ADDR_MCAST) {
902
		return tipc_sendmcast(sock, seq, m, dsz, timeo);
903 904 905 906 907 908 909 910 911 912 913
	} else if (dest->addrtype == TIPC_ADDR_NAME) {
		u32 type = dest->addr.name.name.type;
		u32 inst = dest->addr.name.name.instance;
		u32 domain = dest->addr.name.domain;

		dnode = domain;
		msg_set_type(mhdr, TIPC_NAMED_MSG);
		msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
		msg_set_nametype(mhdr, type);
		msg_set_nameinst(mhdr, inst);
		msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
914
		dport = tipc_nametbl_translate(net, type, inst, &dnode);
915 916
		msg_set_destnode(mhdr, dnode);
		msg_set_destport(mhdr, dport);
917 918
		if (unlikely(!dport && !dnode))
			return -EHOSTUNREACH;
919 920 921 922 923 924 925 926 927
	} else if (dest->addrtype == TIPC_ADDR_ID) {
		dnode = dest->addr.id.node;
		msg_set_type(mhdr, TIPC_DIRECT_MSG);
		msg_set_lookup_scope(mhdr, 0);
		msg_set_destnode(mhdr, dnode);
		msg_set_destport(mhdr, dest->addr.id.ref);
		msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
	}

A
Al Viro 已提交
928
	save = m->msg_iter;
929
new_mtu:
930
	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
931
	rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
932
	if (rc < 0)
933
		return rc;
934 935

	do {
936
		skb = skb_peek(pktchain);
937
		TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
938
		rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
939
		if (likely(!rc)) {
940
			if (sock->state != SS_READY)
941
				sock->state = SS_CONNECTING;
942
			return dsz;
943
		}
944 945 946 947 948 949 950
		if (rc == -ELINKCONG) {
			tsk->link_cong = 1;
			rc = tipc_wait_for_sndmsg(sock, &timeo);
			if (!rc)
				continue;
		}
		__skb_queue_purge(pktchain);
A
Al Viro 已提交
951 952
		if (rc == -EMSGSIZE) {
			m->msg_iter = save;
953
			goto new_mtu;
A
Al Viro 已提交
954
		}
955 956
		break;
	} while (1);
957 958

	return rc;
P
Per Liden 已提交
959 960
}

961 962 963
static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
{
	struct sock *sk = sock->sk;
964
	struct tipc_sock *tsk = tipc_sk(sk);
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
	DEFINE_WAIT(wait);
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (sock->state == SS_DISCONNECTING)
			return -EPIPE;
		else if (sock->state != SS_CONNECTED)
			return -ENOTCONN;
		if (!*timeo_p)
			return -EAGAIN;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, timeo_p,
983
				     (!tsk->link_cong &&
984 985
				      !tsk_conn_cong(tsk)) ||
				     !tsk->connected);
986 987 988 989 990
		finish_wait(sk_sleep(sk), &wait);
	} while (!done);
	return 0;
}

991
/**
992
 * tipc_send_stream - send stream-oriented data
P
Per Liden 已提交
993
 * @sock: socket structure
994 995
 * @m: data to send
 * @dsz: total length of data to be transmitted
996
 *
997
 * Used for SOCK_STREAM data.
998
 *
999 1000
 * Returns the number of bytes sent on success (or partial success),
 * or errno if no data sent
P
Per Liden 已提交
1001
 */
1002
static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_send_stream(sock, m, dsz);
	release_sock(sk);

	return ret;
}

static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1015
{
1016
	struct sock *sk = sock->sk;
1017
	struct net *net = sock_net(sk);
1018
	struct tipc_sock *tsk = tipc_sk(sk);
1019
	struct tipc_msg *mhdr = &tsk->phdr;
1020
	struct sk_buff_head *pktchain = &sk->sk_write_queue;
1021
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1022
	u32 portid = tsk->portid;
1023
	int rc = -EINVAL;
1024
	long timeo;
1025 1026
	u32 dnode;
	uint mtu, send, sent = 0;
A
Al Viro 已提交
1027
	struct iov_iter save;
P
Per Liden 已提交
1028 1029

	/* Handle implied connection establishment */
1030
	if (unlikely(dest)) {
1031
		rc = __tipc_sendmsg(sock, m, dsz);
1032
		if (dsz && (dsz == rc))
1033
			tsk->sent_unacked = 1;
1034 1035 1036
		return rc;
	}
	if (dsz > (uint)INT_MAX)
1037 1038
		return -EMSGSIZE;

1039 1040
	if (unlikely(sock->state != SS_CONNECTED)) {
		if (sock->state == SS_DISCONNECTING)
1041
			return -EPIPE;
1042
		else
1043
			return -ENOTCONN;
1044
	}
1045

1046
	timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1047
	dnode = tsk_peer_node(tsk);
1048 1049

next:
A
Al Viro 已提交
1050
	save = m->msg_iter;
1051
	mtu = tsk->max_pkt;
1052
	send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1053
	rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
1054
	if (unlikely(rc < 0))
1055
		return rc;
1056
	do {
1057
		if (likely(!tsk_conn_cong(tsk))) {
1058
			rc = tipc_node_xmit(net, pktchain, dnode, portid);
1059
			if (likely(!rc)) {
1060
				tsk->sent_unacked++;
1061 1062
				sent += send;
				if (sent == dsz)
1063
					return dsz;
1064 1065 1066
				goto next;
			}
			if (rc == -EMSGSIZE) {
1067
				__skb_queue_purge(pktchain);
1068 1069
				tsk->max_pkt = tipc_node_get_mtu(net, dnode,
								 portid);
A
Al Viro 已提交
1070
				m->msg_iter = save;
1071 1072 1073 1074
				goto next;
			}
			if (rc != -ELINKCONG)
				break;
1075

1076
			tsk->link_cong = 1;
1077 1078 1079
		}
		rc = tipc_wait_for_sndpkt(sock, &timeo);
	} while (!rc);
1080

1081
	__skb_queue_purge(pktchain);
1082
	return sent ? sent : rc;
P
Per Liden 已提交
1083 1084
}

1085
/**
1086
 * tipc_send_packet - send a connection-oriented message
P
Per Liden 已提交
1087
 * @sock: socket structure
1088 1089
 * @m: message to send
 * @dsz: length of data to be transmitted
1090
 *
1091
 * Used for SOCK_SEQPACKET messages.
1092
 *
1093
 * Returns the number of bytes sent on success, or errno otherwise
P
Per Liden 已提交
1094
 */
1095
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1096
{
1097 1098
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
		return -EMSGSIZE;
P
Per Liden 已提交
1099

1100
	return tipc_send_stream(sock, m, dsz);
P
Per Liden 已提交
1101 1102
}

1103
/* tipc_sk_finish_conn - complete the setup of a connection
P
Per Liden 已提交
1104
 */
1105
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1106
				u32 peer_node)
P
Per Liden 已提交
1107
{
1108 1109
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1110
	struct tipc_msg *msg = &tsk->phdr;
P
Per Liden 已提交
1111

1112 1113 1114 1115 1116
	msg_set_destnode(msg, peer_node);
	msg_set_destport(msg, peer_port);
	msg_set_type(msg, TIPC_CONN_MSG);
	msg_set_lookup_scope(msg, 0);
	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1117

1118
	tsk->probing_intv = CONN_PROBING_INTERVAL;
1119 1120
	tsk->probing_state = TIPC_CONN_OK;
	tsk->connected = 1;
1121
	sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1122 1123
	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
P
Per Liden 已提交
1124 1125 1126 1127 1128 1129
}

/**
 * set_orig_addr - capture sender's address for received message
 * @m: descriptor for message info
 * @msg: received message header
1130
 *
P
Per Liden 已提交
1131 1132
 * Note: Address is not captured if not requested by receiver.
 */
S
Sam Ravnborg 已提交
1133
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
P
Per Liden 已提交
1134
{
1135
	DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
P
Per Liden 已提交
1136

1137
	if (addr) {
P
Per Liden 已提交
1138 1139
		addr->family = AF_TIPC;
		addr->addrtype = TIPC_ADDR_ID;
1140
		memset(&addr->addr, 0, sizeof(addr->addr));
P
Per Liden 已提交
1141 1142
		addr->addr.id.ref = msg_origport(msg);
		addr->addr.id.node = msg_orignode(msg);
1143 1144
		addr->addr.name.domain = 0;	/* could leave uninitialized */
		addr->scope = 0;		/* could leave uninitialized */
P
Per Liden 已提交
1145 1146 1147 1148 1149
		m->msg_namelen = sizeof(struct sockaddr_tipc);
	}
}

/**
1150
 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
P
Per Liden 已提交
1151 1152
 * @m: descriptor for message info
 * @msg: received message header
1153
 * @tsk: TIPC port associated with message
1154
 *
P
Per Liden 已提交
1155
 * Note: Ancillary data is not captured if not requested by receiver.
1156
 *
P
Per Liden 已提交
1157 1158
 * Returns 0 if successful, otherwise errno
 */
1159 1160
static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				 struct tipc_sock *tsk)
P
Per Liden 已提交
1161 1162 1163 1164
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
1165
	int has_name;
P
Per Liden 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */
	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
1176 1177
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
P
Per Liden 已提交
1178
			return res;
1179 1180 1181 1182 1183 1184
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
P
Per Liden 已提交
1185 1186 1187 1188 1189 1190
	}

	/* Optionally capture message destination object */
	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
1191
		has_name = 1;
P
Per Liden 已提交
1192 1193 1194 1195 1196
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
1197
		has_name = 1;
P
Per Liden 已提交
1198 1199 1200 1201 1202
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
1203 1204 1205 1206
		has_name = (tsk->conn_type != 0);
		anc_data[0] = tsk->conn_type;
		anc_data[1] = tsk->conn_instance;
		anc_data[2] = tsk->conn_instance;
P
Per Liden 已提交
1207 1208
		break;
	default:
1209
		has_name = 0;
P
Per Liden 已提交
1210
	}
1211 1212 1213 1214 1215
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}
P
Per Liden 已提交
1216 1217 1218 1219

	return 0;
}

1220
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1221
{
1222
	struct net *net = sock_net(&tsk->sk);
1223
	struct sk_buff *skb = NULL;
1224
	struct tipc_msg *msg;
1225 1226
	u32 peer_port = tsk_peer_port(tsk);
	u32 dnode = tsk_peer_node(tsk);
1227

1228
	if (!tsk->connected)
1229
		return;
1230 1231 1232
	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
			      dnode, tsk_own_node(tsk), peer_port,
			      tsk->portid, TIPC_OK);
1233
	if (!skb)
1234
		return;
1235
	msg = buf_msg(skb);
1236
	msg_set_msgcnt(msg, ack);
1237
	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1238 1239
}

1240
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Y
Ying Xue 已提交
1241 1242 1243
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
1244
	long timeo = *timeop;
Y
Ying Xue 已提交
1245 1246 1247 1248
	int err;

	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1249
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
			if (sock->state == SS_DISCONNECTING) {
				err = -ENOTCONN;
				break;
			}
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1264 1265 1266
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
1267 1268
	}
	finish_wait(sk_sleep(sk), &wait);
1269
	*timeop = timeo;
Y
Ying Xue 已提交
1270 1271 1272
	return err;
}

1273
/**
1274
 * tipc_recvmsg - receive packet-oriented message
P
Per Liden 已提交
1275 1276 1277
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1278
 *
P
Per Liden 已提交
1279 1280 1281 1282 1283
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
1284 1285
static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
			int flags)
P
Per Liden 已提交
1286
{
1287
	struct sock *sk = sock->sk;
1288
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1289 1290
	struct sk_buff *buf;
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1291
	long timeo;
P
Per Liden 已提交
1292 1293 1294 1295
	unsigned int sz;
	u32 err;
	int res;

1296
	/* Catch invalid receive requests */
P
Per Liden 已提交
1297 1298 1299
	if (unlikely(!buf_len))
		return -EINVAL;

1300
	lock_sock(sk);
P
Per Liden 已提交
1301

1302 1303
	if (unlikely(sock->state == SS_UNCONNECTED)) {
		res = -ENOTCONN;
P
Per Liden 已提交
1304 1305 1306
		goto exit;
	}

Y
Ying Xue 已提交
1307
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1308
restart:
P
Per Liden 已提交
1309

1310
	/* Look for a message in receive queue; wait if necessary */
1311
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1312 1313
	if (res)
		goto exit;
P
Per Liden 已提交
1314

1315 1316
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1317 1318 1319 1320 1321 1322
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1323
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1324 1325 1326 1327 1328 1329 1330
		goto restart;
	}

	/* Capture sender's address (optional) */
	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */
1331
	res = tipc_sk_anc_data_recv(m, msg, tsk);
1332
	if (res)
P
Per Liden 已提交
1333 1334 1335 1336 1337 1338 1339 1340
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
1341
		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1342
		if (res)
P
Per Liden 已提交
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
			goto exit;
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */
	if (likely(!(flags & MSG_PEEK))) {
1355
		if ((sock->state != SS_READY) &&
1356
		    (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1357
			tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1358 1359
			tsk->rcv_unacked = 0;
		}
1360
		tsk_advance_rx_queue(sk);
1361
	}
P
Per Liden 已提交
1362
exit:
1363
	release_sock(sk);
P
Per Liden 已提交
1364 1365 1366
	return res;
}

1367
/**
1368
 * tipc_recv_stream - receive stream-oriented data
P
Per Liden 已提交
1369 1370 1371
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1372 1373
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
P
Per Liden 已提交
1374 1375 1376 1377
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
1378 1379
static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
			    size_t buf_len, int flags)
P
Per Liden 已提交
1380
{
1381
	struct sock *sk = sock->sk;
1382
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1383 1384
	struct sk_buff *buf;
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1385
	long timeo;
P
Per Liden 已提交
1386
	unsigned int sz;
1387
	int sz_to_copy, target, needed;
P
Per Liden 已提交
1388 1389
	int sz_copied = 0;
	u32 err;
1390
	int res = 0;
P
Per Liden 已提交
1391

1392
	/* Catch invalid receive attempts */
P
Per Liden 已提交
1393 1394 1395
	if (unlikely(!buf_len))
		return -EINVAL;

1396
	lock_sock(sk);
P
Per Liden 已提交
1397

Y
Ying Xue 已提交
1398
	if (unlikely(sock->state == SS_UNCONNECTED)) {
1399
		res = -ENOTCONN;
P
Per Liden 已提交
1400 1401 1402
		goto exit;
	}

1403
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
Y
Ying Xue 已提交
1404
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1405

1406
restart:
1407
	/* Look for a message in receive queue; wait if necessary */
1408
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1409 1410
	if (res)
		goto exit;
P
Per Liden 已提交
1411

1412 1413
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1414 1415 1416 1417 1418 1419
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1420
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1421 1422 1423 1424 1425 1426
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */
	if (sz_copied == 0) {
		set_orig_addr(m, msg);
1427
		res = tipc_sk_anc_data_recv(m, msg, tsk);
1428
		if (res)
P
Per Liden 已提交
1429 1430 1431 1432 1433
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
1434
		u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
P
Per Liden 已提交
1435

1436
		sz -= offset;
P
Per Liden 已提交
1437 1438
		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;
1439

1440 1441
		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
					    m, sz_to_copy);
1442
		if (res)
P
Per Liden 已提交
1443
			goto exit;
1444

P
Per Liden 已提交
1445 1446 1447 1448
		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
1449 1450
				TIPC_SKB_CB(buf)->handle =
				(void *)(unsigned long)(offset + sz_to_copy);
P
Per Liden 已提交
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */
	if (likely(!(flags & MSG_PEEK))) {
1465
		if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1466
			tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1467 1468
			tsk->rcv_unacked = 0;
		}
1469
		tsk_advance_rx_queue(sk);
1470
	}
P
Per Liden 已提交
1471 1472

	/* Loop around if more data is required */
1473 1474
	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
1475
	    (sz_copied < target)) &&	/* and more is ready or required */
1476 1477
	    (!(flags & MSG_PEEK)) &&	/* and aren't just peeking at data */
	    (!err))			/* and haven't reached a FIN */
P
Per Liden 已提交
1478 1479 1480
		goto restart;

exit:
1481
	release_sock(sk);
1482
	return sz_copied ? sz_copied : res;
P
Per Liden 已提交
1483 1484
}

1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
/**
 * tipc_write_space - wake up thread if port congestion is released
 * @sk: socket
 */
static void tipc_write_space(struct sock *sk)
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (wq_has_sleeper(wq))
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
						POLLWRNORM | POLLWRBAND);
	rcu_read_unlock();
}

/**
 * tipc_data_ready - wake up threads to indicate messages have been received
 * @sk: socket
 * @len: the length of messages
 */
1506
static void tipc_data_ready(struct sock *sk)
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (wq_has_sleeper(wq))
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
						POLLRDNORM | POLLRDBAND);
	rcu_read_unlock();
}

1518 1519
/**
 * filter_connect - Handle all incoming messages for a connection-based socket
1520
 * @tsk: TIPC socket
1521
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1522
 *
1523
 * Returns true if everything ok, false otherwise
1524
 */
1525
static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1526
{
1527
	struct sock *sk = &tsk->sk;
1528
	struct net *net = sock_net(sk);
1529
	struct socket *sock = sk->sk_socket;
1530
	struct tipc_msg *hdr = buf_msg(skb);
1531

1532 1533
	if (unlikely(msg_mcast(hdr)))
		return false;
1534 1535 1536

	switch ((int)sock->state) {
	case SS_CONNECTED:
1537

1538
		/* Accept only connection-based messages sent by peer */
1539 1540 1541 1542 1543 1544 1545 1546 1547
		if (unlikely(!tsk_peer_msg(tsk, hdr)))
			return false;

		if (unlikely(msg_errcode(hdr))) {
			sock->state = SS_DISCONNECTING;
			tsk->connected = 0;
			/* Let timer expire on it's own */
			tipc_node_remove_conn(net, tsk_peer_node(tsk),
					      tsk->portid);
1548
		}
1549 1550
		return true;

1551
	case SS_CONNECTING:
1552

1553 1554 1555
		/* Accept only ACK or NACK message */
		if (unlikely(!msg_connected(hdr)))
			return false;
1556

1557
		if (unlikely(msg_errcode(hdr))) {
1558
			sock->state = SS_DISCONNECTING;
1559
			sk->sk_err = ECONNREFUSED;
1560
			return true;
1561 1562
		}

1563
		if (unlikely(!msg_isdata(hdr))) {
1564
			sock->state = SS_DISCONNECTING;
1565
			sk->sk_err = EINVAL;
1566
			return true;
1567 1568
		}

1569 1570
		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
		msg_set_importance(&tsk->phdr, msg_importance(hdr));
1571 1572
		sock->state = SS_CONNECTED;

1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
		/* If 'ACK+' message, add to socket receive queue */
		if (msg_data_sz(hdr))
			return true;

		/* If empty 'ACK-' message, wake up sleeping connect() */
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));

		/* 'ACK-' message is neither accepted nor rejected: */
		msg_set_dest_droppable(hdr, 1);
		return false;

1585 1586
	case SS_LISTENING:
	case SS_UNCONNECTED:
1587

1588
		/* Accept only SYN message */
1589 1590
		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
			return true;
1591 1592 1593 1594 1595 1596
		break;
	case SS_DISCONNECTING:
		break;
	default:
		pr_err("Unknown socket state %u\n", sock->state);
	}
1597
	return false;
1598 1599
}

1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
 * @buf: message
 *
 * For all connection oriented messages, irrespective of importance,
 * the default overload value (i.e. 67MB) is set as limit.
 *
 * For all connectionless messages, by default new queue limits are
 * as belows:
 *
1611 1612 1613 1614
 * TIPC_LOW_IMPORTANCE       (4 MB)
 * TIPC_MEDIUM_IMPORTANCE    (8 MB)
 * TIPC_HIGH_IMPORTANCE      (16 MB)
 * TIPC_CRITICAL_IMPORTANCE  (32 MB)
1615 1616 1617 1618 1619 1620 1621 1622
 *
 * Returns overload limit according to corresponding message importance
 */
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);

	if (msg_connected(msg))
1623 1624 1625 1626
		return sysctl_tipc_rmem[2];

	return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
		msg_importance(msg);
1627 1628
}

1629
/**
1630 1631
 * filter_rcv - validate incoming message
 * @sk: socket
1632
 * @skb: pointer to message.
1633
 *
1634 1635 1636
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
1637
 * Called with socket lock already taken
1638
 *
1639
 * Returns true if message was added to socket receive queue, otherwise false
P
Per Liden 已提交
1640
 */
1641
static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
P
Per Liden 已提交
1642
{
1643
	struct socket *sock = sk->sk_socket;
1644
	struct tipc_sock *tsk = tipc_sk(sk);
1645 1646 1647 1648
	struct tipc_msg *hdr = buf_msg(skb);
	unsigned int limit = rcvbuf_limit(sk, skb);
	int err = TIPC_OK;
	int usr = msg_user(hdr);
P
Per Liden 已提交
1649

1650 1651 1652
	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
		tipc_sk_proto_rcv(tsk, skb);
		return false;
1653
	}
1654

1655 1656
	if (unlikely(usr == SOCK_WAKEUP)) {
		kfree_skb(skb);
1657 1658
		tsk->link_cong = 0;
		sk->sk_write_space(sk);
1659
		return false;
1660 1661
	}

1662 1663 1664 1665 1666
	/* Drop if illegal message type */
	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
		kfree_skb(skb);
		return false;
	}
1667

1668 1669 1670 1671 1672 1673 1674 1675 1676
	/* Reject if wrong message type for current socket state */
	if (unlikely(sock->state == SS_READY)) {
		if (msg_connected(hdr)) {
			err = TIPC_ERR_NO_PORT;
			goto reject;
		}
	} else if (unlikely(!filter_connect(tsk, skb))) {
		err = TIPC_ERR_NO_PORT;
		goto reject;
P
Per Liden 已提交
1677 1678 1679
	}

	/* Reject message if there isn't room to queue it */
1680 1681 1682 1683
	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
		err = TIPC_ERR_OVERLOAD;
		goto reject;
	}
P
Per Liden 已提交
1684

1685
	/* Enqueue message */
1686 1687 1688
	TIPC_SKB_CB(skb)->handle = NULL;
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
1689

1690
	sk->sk_data_ready(sk);
1691 1692 1693 1694 1695
	return true;

reject:
	tipc_sk_respond(sk, skb, err);
	return false;
1696
}
P
Per Liden 已提交
1697

1698
/**
1699
 * tipc_backlog_rcv - handle incoming message from backlog queue
1700
 * @sk: socket
1701
 * @skb: message
1702
 *
1703
 * Caller must hold socket lock
1704 1705 1706
 *
 * Returns 0
 */
1707
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1708
{
1709
	unsigned int truesize = skb->truesize;
1710

1711 1712
	if (likely(filter_rcv(sk, skb)))
		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1713 1714 1715
	return 0;
}

1716
/**
1717 1718 1719 1720 1721
 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
 *                   inputq and try adding them to socket or backlog queue
 * @inputq: list of incoming buffers with potentially different destinations
 * @sk: socket where the buffers should be enqueued
 * @dport: port number for the socket
1722 1723 1724
 *
 * Caller must hold socket lock
 */
1725 1726
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
			    u32 dport)
1727 1728 1729
{
	unsigned int lim;
	atomic_t *dcnt;
1730 1731 1732 1733
	struct sk_buff *skb;
	unsigned long time_limit = jiffies + 2;

	while (skb_queue_len(inputq)) {
1734
		if (unlikely(time_after_eq(jiffies, time_limit)))
1735 1736
			return;

1737 1738
		skb = tipc_skb_dequeue(inputq, dport);
		if (unlikely(!skb))
1739 1740 1741
			return;

		/* Add message directly to receive queue if possible */
1742
		if (!sock_owned_by_user(sk)) {
1743 1744
			filter_rcv(sk, skb);
			continue;
1745
		}
1746 1747

		/* Try backlog, compensating for double-counted bytes */
1748 1749 1750 1751 1752 1753
		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
		if (sk->sk_backlog.len)
			atomic_set(dcnt, 0);
		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
		if (likely(!sk_add_backlog(sk, skb, lim)))
			continue;
1754 1755 1756 1757

		/* Overload => reject message back to sender */
		tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
		break;
1758
	}
1759 1760
}

1761
/**
1762 1763 1764 1765
 * tipc_sk_rcv - handle a chain of incoming buffers
 * @inputq: buffer list containing the buffers
 * Consumes all buffers in list until inputq is empty
 * Note: may be called in multiple threads referring to the same queue
1766
 */
1767
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1768
{
1769
	u32 dnode, dport = 0;
E
Erik Hugne 已提交
1770
	int err;
1771 1772
	struct tipc_sock *tsk;
	struct sock *sk;
1773
	struct sk_buff *skb;
1774

1775 1776 1777
	while (skb_queue_len(inputq)) {
		dport = tipc_skb_peek_port(inputq, dport);
		tsk = tipc_sk_lookup(net, dport);
1778

1779 1780 1781
		if (likely(tsk)) {
			sk = &tsk->sk;
			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1782
				tipc_sk_enqueue(inputq, sk, dport);
1783 1784 1785 1786 1787
				spin_unlock_bh(&sk->sk_lock.slock);
			}
			sock_put(sk);
			continue;
		}
1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800

		/* No destination socket => dequeue skb if still there */
		skb = tipc_skb_dequeue(inputq, dport);
		if (!skb)
			return;

		/* Try secondary lookup if unresolved named message */
		err = TIPC_ERR_NO_PORT;
		if (tipc_msg_lookup_dest(net, skb, &err))
			goto xmit;

		/* Prepare for message rejection */
		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
1801
			continue;
1802
xmit:
1803
		dnode = msg_destnode(buf_msg(skb));
1804
		tipc_node_xmit_skb(net, skb, dnode, dport);
1805
	}
P
Per Liden 已提交
1806 1807
}

Y
Ying Xue 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (!*timeo_p)
			return -ETIMEDOUT;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
		finish_wait(sk_sleep(sk), &wait);
	} while (!done);
	return 0;
}

P
Per Liden 已提交
1830
/**
1831
 * tipc_connect - establish a connection to another TIPC port
P
Per Liden 已提交
1832 1833 1834
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
1835
 * @flags: file-related flags associated with socket
P
Per Liden 已提交
1836 1837 1838
 *
 * Returns 0 on success, errno otherwise
 */
1839 1840
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
			int destlen, int flags)
P
Per Liden 已提交
1841
{
1842
	struct sock *sk = sock->sk;
1843
	struct tipc_sock *tsk = tipc_sk(sk);
1844 1845
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
1846
	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
Y
Ying Xue 已提交
1847
	socket_state previous;
1848
	int res = 0;
1849

1850 1851
	lock_sock(sk);

1852
	/* DGRAM/RDM connect(), just save the destaddr */
1853
	if (sock->state == SS_READY) {
1854 1855 1856
		if (dst->family == AF_UNSPEC) {
			memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
			tsk->connected = 0;
1857 1858
		} else if (destlen != sizeof(struct sockaddr_tipc)) {
			res = -EINVAL;
1859 1860 1861 1862
		} else {
			memcpy(&tsk->remote, dest, destlen);
			tsk->connected = 1;
		}
1863 1864
		goto exit;
	}
1865 1866 1867 1868 1869 1870 1871

	/*
	 * Reject connection attempt using multicast address
	 *
	 * Note: send_msg() validates the rest of the address fields,
	 *       so there's no need to do it here
	 */
1872 1873 1874 1875 1876
	if (dst->addrtype == TIPC_ADDR_MCAST) {
		res = -EINVAL;
		goto exit;
	}

Y
Ying Xue 已提交
1877
	previous = sock->state;
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
	switch (sock->state) {
	case SS_UNCONNECTED:
		/* Send a 'SYN-' to destination */
		m.msg_name = dest;
		m.msg_namelen = destlen;

		/* If connect is in non-blocking case, set MSG_DONTWAIT to
		 * indicate send_msg() is never blocked.
		 */
		if (!timeout)
			m.msg_flags = MSG_DONTWAIT;

1890
		res = __tipc_sendmsg(sock, &m, 0);
1891 1892 1893 1894 1895 1896 1897 1898 1899
		if ((res < 0) && (res != -EWOULDBLOCK))
			goto exit;

		/* Just entered SS_CONNECTING state; the only
		 * difference is that return value in non-blocking
		 * case is EINPROGRESS, rather than EALREADY.
		 */
		res = -EINPROGRESS;
	case SS_CONNECTING:
Y
Ying Xue 已提交
1900 1901 1902 1903 1904 1905 1906
		if (previous == SS_CONNECTING)
			res = -EALREADY;
		if (!timeout)
			goto exit;
		timeout = msecs_to_jiffies(timeout);
		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
		res = tipc_wait_for_connect(sock, &timeout);
1907 1908 1909 1910 1911 1912
		break;
	case SS_CONNECTED:
		res = -EISCONN;
		break;
	default:
		res = -EINVAL;
Y
Ying Xue 已提交
1913
		break;
1914
	}
1915 1916
exit:
	release_sock(sk);
1917
	return res;
P
Per Liden 已提交
1918 1919
}

1920
/**
1921
 * tipc_listen - allow socket to listen for incoming connections
P
Per Liden 已提交
1922 1923
 * @sock: socket structure
 * @len: (unused)
1924
 *
P
Per Liden 已提交
1925 1926
 * Returns 0 on success, errno otherwise
 */
1927
static int tipc_listen(struct socket *sock, int len)
P
Per Liden 已提交
1928
{
1929 1930 1931 1932
	struct sock *sk = sock->sk;
	int res;

	lock_sock(sk);
P
Per Liden 已提交
1933

1934
	if (sock->state != SS_UNCONNECTED)
1935 1936 1937 1938 1939 1940 1941 1942
		res = -EINVAL;
	else {
		sock->state = SS_LISTENING;
		res = 0;
	}

	release_sock(sk);
	return res;
P
Per Liden 已提交
1943 1944
}

Y
Ying Xue 已提交
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int err;

	/* True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	*/
	for (;;) {
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
					  TASK_INTERRUPTIBLE);
1959
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EINVAL;
		if (sock->state != SS_LISTENING)
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1973 1974 1975
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
1976 1977 1978 1979 1980
	}
	finish_wait(sk_sleep(sk), &wait);
	return err;
}

1981
/**
1982
 * tipc_accept - wait for connection request
P
Per Liden 已提交
1983 1984 1985
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
1986
 *
P
Per Liden 已提交
1987 1988
 * Returns 0 on success, errno otherwise
 */
1989
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
P
Per Liden 已提交
1990
{
1991
	struct sock *new_sk, *sk = sock->sk;
P
Per Liden 已提交
1992
	struct sk_buff *buf;
1993
	struct tipc_sock *new_tsock;
1994
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1995
	long timeo;
1996
	int res;
P
Per Liden 已提交
1997

1998
	lock_sock(sk);
P
Per Liden 已提交
1999

2000 2001
	if (sock->state != SS_LISTENING) {
		res = -EINVAL;
P
Per Liden 已提交
2002 2003
		goto exit;
	}
Y
Ying Xue 已提交
2004 2005 2006 2007
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	res = tipc_wait_for_accept(sock, timeo);
	if (res)
		goto exit;
2008 2009 2010

	buf = skb_peek(&sk->sk_receive_queue);

2011
	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2012 2013
	if (res)
		goto exit;
2014
	security_sk_clone(sock->sk, new_sock->sk);
P
Per Liden 已提交
2015

2016
	new_sk = new_sock->sk;
2017
	new_tsock = tipc_sk(new_sk);
2018
	msg = buf_msg(buf);
P
Per Liden 已提交
2019

2020 2021 2022 2023 2024 2025 2026
	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
2027
	tsk_rej_rx_queue(new_sk);
2028 2029

	/* Connect new socket to it's peer */
2030
	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2031 2032
	new_sock->state = SS_CONNECTED;

2033
	tsk_set_importance(new_tsock, msg_importance(msg));
2034
	if (msg_named(msg)) {
2035 2036
		new_tsock->conn_type = msg_nametype(msg);
		new_tsock->conn_instance = msg_nameinst(msg);
P
Per Liden 已提交
2037
	}
2038 2039 2040 2041 2042 2043 2044 2045

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

2046
		tsk_advance_rx_queue(sk);
2047
		__tipc_send_stream(new_sock, &m, 0);
2048 2049 2050
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2051
		skb_set_owner_r(buf, new_sk);
2052 2053
	}
	release_sock(new_sk);
P
Per Liden 已提交
2054
exit:
2055
	release_sock(sk);
P
Per Liden 已提交
2056 2057 2058 2059
	return res;
}

/**
2060
 * tipc_shutdown - shutdown socket connection
P
Per Liden 已提交
2061
 * @sock: socket structure
2062
 * @how: direction to close (must be SHUT_RDWR)
P
Per Liden 已提交
2063 2064
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
2065
 *
P
Per Liden 已提交
2066 2067
 * Returns 0 on success, errno otherwise
 */
2068
static int tipc_shutdown(struct socket *sock, int how)
P
Per Liden 已提交
2069
{
2070
	struct sock *sk = sock->sk;
2071
	struct net *net = sock_net(sk);
2072
	struct tipc_sock *tsk = tipc_sk(sk);
2073
	struct sk_buff *skb;
2074 2075 2076 2077
	u32 dnode = tsk_peer_node(tsk);
	u32 dport = tsk_peer_port(tsk);
	u32 onode = tipc_own_addr(net);
	u32 oport = tsk->portid;
P
Per Liden 已提交
2078 2079
	int res;

2080 2081
	if (how != SHUT_RDWR)
		return -EINVAL;
P
Per Liden 已提交
2082

2083
	lock_sock(sk);
P
Per Liden 已提交
2084 2085

	switch (sock->state) {
2086
	case SS_CONNECTING:
P
Per Liden 已提交
2087 2088 2089
	case SS_CONNECTED:

restart:
2090 2091
		dnode = tsk_peer_node(tsk);

2092
		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2093 2094 2095 2096
		skb = __skb_dequeue(&sk->sk_receive_queue);
		if (skb) {
			if (TIPC_SKB_CB(skb)->handle != NULL) {
				kfree_skb(skb);
P
Per Liden 已提交
2097 2098
				goto restart;
			}
2099
			tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
2100
		} else {
2101
			skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2102
					      TIPC_CONN_MSG, SHORT_H_SIZE,
2103 2104
					      0, dnode, onode, dport, oport,
					      TIPC_CONN_SHUTDOWN);
2105
			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
P
Per Liden 已提交
2106
		}
2107
		tsk->connected = 0;
2108
		sock->state = SS_DISCONNECTING;
2109
		tipc_node_remove_conn(net, dnode, tsk->portid);
P
Per Liden 已提交
2110 2111 2112 2113
		/* fall through */

	case SS_DISCONNECTING:

2114
		/* Discard any unreceived messages */
2115
		__skb_queue_purge(&sk->sk_receive_queue);
2116 2117 2118

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
P
Per Liden 已提交
2119 2120 2121 2122 2123 2124 2125
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

2126
	release_sock(sk);
P
Per Liden 已提交
2127 2128 2129
	return res;
}

2130
static void tipc_sk_timeout(unsigned long data)
2131
{
2132 2133
	struct tipc_sock *tsk = (struct tipc_sock *)data;
	struct sock *sk = &tsk->sk;
2134
	struct sk_buff *skb = NULL;
2135
	u32 peer_port, peer_node;
2136
	u32 own_node = tsk_own_node(tsk);
2137

J
Jon Paul Maloy 已提交
2138
	bh_lock_sock(sk);
2139
	if (!tsk->connected) {
J
Jon Paul Maloy 已提交
2140 2141
		bh_unlock_sock(sk);
		goto exit;
2142
	}
2143 2144
	peer_port = tsk_peer_port(tsk);
	peer_node = tsk_peer_node(tsk);
2145

2146
	if (tsk->probing_state == TIPC_CONN_PROBING) {
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
		if (!sock_owned_by_user(sk)) {
			sk->sk_socket->state = SS_DISCONNECTING;
			tsk->connected = 0;
			tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
					      tsk_peer_port(tsk));
			sk->sk_state_change(sk);
		} else {
			/* Try again later */
			sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
		}

2158
	} else {
2159 2160
		skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
				      INT_H_SIZE, 0, peer_node, own_node,
2161
				      peer_port, tsk->portid, TIPC_OK);
2162
		tsk->probing_state = TIPC_CONN_PROBING;
2163
		sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2164 2165
	}
	bh_unlock_sock(sk);
2166
	if (skb)
2167
		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
J
Jon Paul Maloy 已提交
2168
exit:
2169
	sock_put(sk);
2170 2171
}

2172
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2173 2174
			   struct tipc_name_seq const *seq)
{
2175
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2176 2177 2178
	struct publication *publ;
	u32 key;

2179
	if (tsk->connected)
J
Jon Paul Maloy 已提交
2180
		return -EINVAL;
2181 2182
	key = tsk->portid + tsk->pub_count + 1;
	if (key == tsk->portid)
J
Jon Paul Maloy 已提交
2183 2184
		return -EADDRINUSE;

2185
	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2186
				    scope, tsk->portid, key);
J
Jon Paul Maloy 已提交
2187 2188 2189
	if (unlikely(!publ))
		return -EINVAL;

2190 2191 2192
	list_add(&publ->pport_list, &tsk->publications);
	tsk->pub_count++;
	tsk->published = 1;
J
Jon Paul Maloy 已提交
2193 2194 2195
	return 0;
}

2196
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2197 2198
			    struct tipc_name_seq const *seq)
{
2199
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2200 2201 2202 2203
	struct publication *publ;
	struct publication *safe;
	int rc = -EINVAL;

2204
	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
J
Jon Paul Maloy 已提交
2205 2206 2207 2208 2209 2210 2211 2212 2213
		if (seq) {
			if (publ->scope != scope)
				continue;
			if (publ->type != seq->type)
				continue;
			if (publ->lower != seq->lower)
				continue;
			if (publ->upper != seq->upper)
				break;
2214
			tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2215 2216 2217 2218
					      publ->ref, publ->key);
			rc = 0;
			break;
		}
2219
		tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2220 2221 2222
				      publ->ref, publ->key);
		rc = 0;
	}
2223 2224
	if (list_empty(&tsk->publications))
		tsk->published = 0;
J
Jon Paul Maloy 已提交
2225 2226 2227
	return rc;
}

2228 2229 2230
/* tipc_sk_reinit: set non-zero address in all existing sockets
 *                 when we go from standalone to network mode.
 */
2231
void tipc_sk_reinit(struct net *net)
2232
{
2233
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2234 2235 2236
	const struct bucket_table *tbl;
	struct rhash_head *pos;
	struct tipc_sock *tsk;
2237
	struct tipc_msg *msg;
2238
	int i;
2239

2240
	rcu_read_lock();
2241
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2242 2243 2244 2245
	for (i = 0; i < tbl->size; i++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
			spin_lock_bh(&tsk->sk.sk_lock.slock);
			msg = &tsk->phdr;
2246 2247
			msg_set_prevnode(msg, tn->own_addr);
			msg_set_orignode(msg, tn->own_addr);
2248 2249
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2250
	}
2251
	rcu_read_unlock();
2252 2253
}

2254
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2255
{
2256
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2257
	struct tipc_sock *tsk;
2258

2259
	rcu_read_lock();
2260
	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2261 2262 2263
	if (tsk)
		sock_hold(&tsk->sk);
	rcu_read_unlock();
2264

2265
	return tsk;
2266 2267
}

2268
static int tipc_sk_insert(struct tipc_sock *tsk)
2269
{
2270 2271 2272
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2273 2274
	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2275

2276 2277 2278 2279 2280 2281
	while (remaining--) {
		portid++;
		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
			portid = TIPC_MIN_PORT;
		tsk->portid = portid;
		sock_hold(&tsk->sk);
2282 2283
		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
						   tsk_rht_params))
2284 2285
			return 0;
		sock_put(&tsk->sk);
2286 2287
	}

2288
	return -1;
2289 2290
}

2291
static void tipc_sk_remove(struct tipc_sock *tsk)
2292
{
2293
	struct sock *sk = &tsk->sk;
2294
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2295

2296
	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2297 2298
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
2299 2300 2301
	}
}

2302 2303 2304 2305 2306 2307 2308
static const struct rhashtable_params tsk_rht_params = {
	.nelem_hint = 192,
	.head_offset = offsetof(struct tipc_sock, node),
	.key_offset = offsetof(struct tipc_sock, portid),
	.key_len = sizeof(u32), /* portid */
	.max_size = 1048576,
	.min_size = 256,
2309
	.automatic_shrinking = true,
2310 2311
};

2312
int tipc_sk_rht_init(struct net *net)
2313
{
2314
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2315 2316

	return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2317 2318
}

2319
void tipc_sk_rht_destroy(struct net *net)
2320
{
2321 2322
	struct tipc_net *tn = net_generic(net, tipc_net_id);

2323 2324
	/* Wait for socket readers to complete */
	synchronize_net();
2325

2326
	rhashtable_destroy(&tn->sk_rht);
2327 2328
}

P
Per Liden 已提交
2329
/**
2330
 * tipc_setsockopt - set socket option
P
Per Liden 已提交
2331 2332 2333 2334 2335
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: pointer to new option value
 * @ol: length of option value
2336 2337
 *
 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
P
Per Liden 已提交
2338
 * (to ease compatibility).
2339
 *
P
Per Liden 已提交
2340 2341
 * Returns 0 on success, errno otherwise
 */
2342 2343
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, unsigned int ol)
P
Per Liden 已提交
2344
{
2345
	struct sock *sk = sock->sk;
2346
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
2347 2348 2349
	u32 value;
	int res;

2350 2351
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return 0;
P
Per Liden 已提交
2352 2353 2354 2355
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
	if (ol < sizeof(value))
		return -EINVAL;
2356 2357
	res = get_user(value, (u32 __user *)ov);
	if (res)
P
Per Liden 已提交
2358 2359
		return res;

2360
	lock_sock(sk);
2361

P
Per Liden 已提交
2362 2363
	switch (opt) {
	case TIPC_IMPORTANCE:
2364
		res = tsk_set_importance(tsk, value);
P
Per Liden 已提交
2365 2366 2367
		break;
	case TIPC_SRC_DROPPABLE:
		if (sock->type != SOCK_STREAM)
2368
			tsk_set_unreliable(tsk, value);
2369
		else
P
Per Liden 已提交
2370 2371 2372
			res = -ENOPROTOOPT;
		break;
	case TIPC_DEST_DROPPABLE:
2373
		tsk_set_unreturnable(tsk, value);
P
Per Liden 已提交
2374 2375
		break;
	case TIPC_CONN_TIMEOUT:
2376
		tipc_sk(sk)->conn_timeout = value;
2377
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2378 2379 2380 2381 2382
		break;
	default:
		res = -EINVAL;
	}

2383 2384
	release_sock(sk);

P
Per Liden 已提交
2385 2386 2387 2388
	return res;
}

/**
2389
 * tipc_getsockopt - get socket option
P
Per Liden 已提交
2390 2391 2392 2393 2394
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: receptacle for option value
 * @ol: receptacle for length of option value
2395 2396
 *
 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
P
Per Liden 已提交
2397
 * (to ease compatibility).
2398
 *
P
Per Liden 已提交
2399 2400
 * Returns 0 on success, errno otherwise
 */
2401 2402
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, int __user *ol)
P
Per Liden 已提交
2403
{
2404
	struct sock *sk = sock->sk;
2405
	struct tipc_sock *tsk = tipc_sk(sk);
2406
	int len;
P
Per Liden 已提交
2407
	u32 value;
2408
	int res;
P
Per Liden 已提交
2409

2410 2411
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return put_user(0, ol);
P
Per Liden 已提交
2412 2413
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2414 2415
	res = get_user(len, ol);
	if (res)
2416
		return res;
P
Per Liden 已提交
2417

2418
	lock_sock(sk);
P
Per Liden 已提交
2419 2420 2421

	switch (opt) {
	case TIPC_IMPORTANCE:
2422
		value = tsk_importance(tsk);
P
Per Liden 已提交
2423 2424
		break;
	case TIPC_SRC_DROPPABLE:
2425
		value = tsk_unreliable(tsk);
P
Per Liden 已提交
2426 2427
		break;
	case TIPC_DEST_DROPPABLE:
2428
		value = tsk_unreturnable(tsk);
P
Per Liden 已提交
2429 2430
		break;
	case TIPC_CONN_TIMEOUT:
2431
		value = tsk->conn_timeout;
2432
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2433
		break;
2434
	case TIPC_NODE_RECVQ_DEPTH:
2435
		value = 0; /* was tipc_queue_size, now obsolete */
2436
		break;
2437
	case TIPC_SOCK_RECVQ_DEPTH:
2438 2439
		value = skb_queue_len(&sk->sk_receive_queue);
		break;
P
Per Liden 已提交
2440 2441 2442 2443
	default:
		res = -EINVAL;
	}

2444 2445
	release_sock(sk);

2446 2447
	if (res)
		return res;	/* "get" failed */
P
Per Liden 已提交
2448

2449 2450 2451 2452 2453 2454 2455
	if (len < sizeof(value))
		return -EINVAL;

	if (copy_to_user(ov, &value, sizeof(value)))
		return -EFAULT;

	return put_user(sizeof(value), ol);
P
Per Liden 已提交
2456 2457
}

2458
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
E
Erik Hugne 已提交
2459
{
2460
	struct sock *sk = sock->sk;
E
Erik Hugne 已提交
2461 2462 2463 2464 2465 2466 2467
	struct tipc_sioc_ln_req lnr;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case SIOCGETLINKNAME:
		if (copy_from_user(&lnr, argp, sizeof(lnr)))
			return -EFAULT;
2468 2469
		if (!tipc_node_get_linkname(sock_net(sk),
					    lnr.bearer_id & 0xffff, lnr.peer,
E
Erik Hugne 已提交
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
			if (copy_to_user(argp, &lnr, sizeof(lnr)))
				return -EFAULT;
			return 0;
		}
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}

2481 2482
/* Protocol switches for the various types of TIPC sockets */

2483
static const struct proto_ops msg_ops = {
2484
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2485
	.family		= AF_TIPC,
2486 2487 2488
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2489
	.socketpair	= sock_no_socketpair,
2490
	.accept		= sock_no_accept,
2491 2492
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2493
	.ioctl		= tipc_ioctl,
2494
	.listen		= sock_no_listen,
2495 2496 2497 2498 2499
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_sendmsg,
	.recvmsg	= tipc_recvmsg,
2500 2501
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2502 2503
};

2504
static const struct proto_ops packet_ops = {
2505
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2506
	.family		= AF_TIPC,
2507 2508 2509
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2510
	.socketpair	= sock_no_socketpair,
2511 2512 2513
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2514
	.ioctl		= tipc_ioctl,
2515 2516 2517 2518 2519 2520
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_packet,
	.recvmsg	= tipc_recvmsg,
2521 2522
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2523 2524
};

2525
static const struct proto_ops stream_ops = {
2526
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2527
	.family		= AF_TIPC,
2528 2529 2530
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2531
	.socketpair	= sock_no_socketpair,
2532 2533 2534
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2535
	.ioctl		= tipc_ioctl,
2536 2537 2538 2539 2540 2541
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_stream,
	.recvmsg	= tipc_recv_stream,
2542 2543
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2544 2545
};

2546
static const struct net_proto_family tipc_family_ops = {
2547
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2548
	.family		= AF_TIPC,
2549
	.create		= tipc_sk_create
P
Per Liden 已提交
2550 2551 2552 2553 2554
};

static struct proto tipc_proto = {
	.name		= "TIPC",
	.owner		= THIS_MODULE,
2555 2556
	.obj_size	= sizeof(struct tipc_sock),
	.sysctl_rmem	= sysctl_tipc_rmem
P
Per Liden 已提交
2557 2558 2559
};

/**
2560
 * tipc_socket_init - initialize TIPC socket interface
2561
 *
P
Per Liden 已提交
2562 2563
 * Returns 0 on success, errno otherwise
 */
2564
int tipc_socket_init(void)
P
Per Liden 已提交
2565 2566 2567
{
	int res;

2568
	res = proto_register(&tipc_proto, 1);
P
Per Liden 已提交
2569
	if (res) {
2570
		pr_err("Failed to register TIPC protocol type\n");
P
Per Liden 已提交
2571 2572 2573 2574 2575
		goto out;
	}

	res = sock_register(&tipc_family_ops);
	if (res) {
2576
		pr_err("Failed to register TIPC socket type\n");
P
Per Liden 已提交
2577 2578 2579 2580 2581 2582 2583 2584
		proto_unregister(&tipc_proto);
		goto out;
	}
 out:
	return res;
}

/**
2585
 * tipc_socket_stop - stop TIPC socket interface
P
Per Liden 已提交
2586
 */
2587
void tipc_socket_stop(void)
P
Per Liden 已提交
2588 2589 2590 2591
{
	sock_unregister(tipc_family_ops.family);
	proto_unregister(&tipc_proto);
}
2592 2593

/* Caller should hold socket lock for the passed tipc socket. */
2594
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628
{
	u32 peer_node;
	u32 peer_port;
	struct nlattr *nest;

	peer_node = tsk_peer_node(tsk);
	peer_port = tsk_peer_port(tsk);

	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);

	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
		goto msg_full;
	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
		goto msg_full;

	if (tsk->conn_type != 0) {
		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
			goto msg_full;
	}
	nla_nest_end(skb, nest);

	return 0;

msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2629 2630
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
			    struct tipc_sock *tsk)
2631 2632 2633 2634
{
	int err;
	void *hdr;
	struct nlattr *attrs;
2635 2636
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2637 2638

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2639
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2640 2641 2642 2643 2644 2645
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
	if (!attrs)
		goto genlmsg_cancel;
2646
	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2647
		goto attr_msg_cancel;
2648
	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
		goto attr_msg_cancel;

	if (tsk->connected) {
		err = __tipc_nl_add_sk_con(skb, tsk);
		if (err)
			goto attr_msg_cancel;
	} else if (!list_empty(&tsk->publications)) {
		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
			goto attr_msg_cancel;
	}
	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
	struct tipc_sock *tsk;
2676 2677
	const struct bucket_table *tbl;
	struct rhash_head *pos;
2678 2679
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2680 2681
	u32 tbl_id = cb->args[0];
	u32 prev_portid = cb->args[1];
2682

2683
	rcu_read_lock();
2684
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2685 2686
	for (; tbl_id < tbl->size; tbl_id++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2687
			spin_lock_bh(&tsk->sk.sk_lock.slock);
2688 2689 2690 2691 2692
			if (prev_portid && prev_portid != tsk->portid) {
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				continue;
			}

2693
			err = __tipc_nl_add_sk(skb, cb, tsk);
2694 2695 2696 2697 2698 2699
			if (err) {
				prev_portid = tsk->portid;
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				goto out;
			}
			prev_portid = 0;
2700 2701
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2702
	}
2703
out:
2704
	rcu_read_unlock();
2705 2706
	cb->args[0] = tbl_id;
	cb->args[1] = prev_portid;
2707 2708 2709

	return skb->len;
}
2710 2711

/* Caller should hold socket lock for the passed tipc socket. */
2712 2713 2714
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 struct publication *publ)
2715 2716 2717 2718 2719
{
	void *hdr;
	struct nlattr *attrs;

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2720
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
	if (!attrs)
		goto genlmsg_cancel;

	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
		goto attr_msg_cancel;

	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2751 2752 2753
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
				  struct netlink_callback *cb,
				  struct tipc_sock *tsk, u32 *last_publ)
2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793
{
	int err;
	struct publication *p;

	if (*last_publ) {
		list_for_each_entry(p, &tsk->publications, pport_list) {
			if (p->key == *last_publ)
				break;
		}
		if (p->key != *last_publ) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			*last_publ = 0;
			return -EPIPE;
		}
	} else {
		p = list_first_entry(&tsk->publications, struct publication,
				     pport_list);
	}

	list_for_each_entry_from(p, &tsk->publications, pport_list) {
		err = __tipc_nl_add_sk_publ(skb, cb, p);
		if (err) {
			*last_publ = p->key;
			return err;
		}
	}
	*last_publ = 0;

	return 0;
}

int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
2794
	u32 tsk_portid = cb->args[0];
2795 2796
	u32 last_publ = cb->args[1];
	u32 done = cb->args[2];
2797
	struct net *net = sock_net(skb->sk);
2798 2799
	struct tipc_sock *tsk;

2800
	if (!tsk_portid) {
2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
		struct nlattr **attrs;
		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];

		err = tipc_nlmsg_parse(cb->nlh, &attrs);
		if (err)
			return err;

		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
				       attrs[TIPC_NLA_SOCK],
				       tipc_nl_sock_policy);
		if (err)
			return err;

		if (!sock[TIPC_NLA_SOCK_REF])
			return -EINVAL;

2817
		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2818 2819 2820 2821 2822
	}

	if (done)
		return 0;

2823
	tsk = tipc_sk_lookup(net, tsk_portid);
2824 2825 2826 2827 2828 2829 2830 2831
	if (!tsk)
		return -EINVAL;

	lock_sock(&tsk->sk);
	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
	if (!err)
		done = 1;
	release_sock(&tsk->sk);
2832
	sock_put(&tsk->sk);
2833

2834
	cb->args[0] = tsk_portid;
2835 2836 2837 2838 2839
	cb->args[1] = last_publ;
	cb->args[2] = done;

	return skb->len;
}