socket.c 72.2 KB
Newer Older
P
Per Liden 已提交
1
/*
2
 * net/tipc/socket.c: TIPC socket API
3
 *
4
 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5
 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37 38
#include <linux/rhashtable.h>
#include <linux/jhash.h>
P
Per Liden 已提交
39
#include "core.h"
40
#include "name_table.h"
E
Erik Hugne 已提交
41
#include "node.h"
42
#include "link.h"
43
#include "name_distr.h"
44
#include "socket.h"
45

46 47
#define SS_LISTENING		-1	/* socket is listening */
#define SS_READY		-2	/* socket is connectionless */
P
Per Liden 已提交
48

49
#define CONN_TIMEOUT_DEFAULT	8000	/* default connect timeout = 8s */
50
#define CONN_PROBING_INTERVAL	msecs_to_jiffies(3600000)  /* [ms] => 1 h */
51 52 53 54 55
#define TIPC_FWD_MSG		1
#define TIPC_CONN_OK		0
#define TIPC_CONN_PROBING	1
#define TIPC_MAX_PORT		0xffffffff
#define TIPC_MIN_PORT		1
56 57 58 59 60 61 62 63 64

/**
 * struct tipc_sock - TIPC socket structure
 * @sk: socket - interacts with 'port' and with user via the socket API
 * @connected: non-zero if port is currently connected to a peer port
 * @conn_type: TIPC type used when connection was established
 * @conn_instance: TIPC instance used when connection was established
 * @published: non-zero if port has one or more associated names
 * @max_pkt: maximum packet size "hint" used when building messages sent by port
65
 * @portid: unique port identity in TIPC socket hash table
66 67 68 69 70
 * @phdr: preformatted message header used when sending messages
 * @port_list: adjacent ports in TIPC's global list of ports
 * @publications: list of publications for port
 * @pub_count: total # of publications port has made during its lifetime
 * @probing_state:
71
 * @probing_intv:
72 73 74 75 76
 * @conn_timeout: the time we can wait for an unresponded setup request
 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
 * @link_cong: non-zero if owner must sleep because of link congestion
 * @sent_unacked: # messages sent by socket, and not yet acked by peer
 * @rcv_unacked: # messages read by user, but not yet acked back to peer
77
 * @remote: 'connected' peer for dgram/rdm
78 79
 * @node: hash table node
 * @rcu: rcu struct for tipc_sock
80 81 82 83 84 85 86 87
 */
struct tipc_sock {
	struct sock sk;
	int connected;
	u32 conn_type;
	u32 conn_instance;
	int published;
	u32 max_pkt;
88
	u32 portid;
89 90 91 92 93
	struct tipc_msg phdr;
	struct list_head sock_list;
	struct list_head publications;
	u32 pub_count;
	u32 probing_state;
94
	unsigned long probing_intv;
95 96 97 98 99
	uint conn_timeout;
	atomic_t dupl_rcvcnt;
	bool link_cong;
	uint sent_unacked;
	uint rcv_unacked;
100
	struct sockaddr_tipc remote;
101 102
	struct rhash_head node;
	struct rcu_head rcu;
103
};
P
Per Liden 已提交
104

105
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
106
static void tipc_data_ready(struct sock *sk);
107
static void tipc_write_space(struct sock *sk);
108 109
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
110
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
111
static void tipc_sk_timeout(unsigned long data);
112
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
113
			   struct tipc_name_seq const *seq);
114
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
115
			    struct tipc_name_seq const *seq);
116
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
117 118
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
119 120 121
static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
			      size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
P
Per Liden 已提交
122

123 124 125
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
P
Per Liden 已提交
126 127
static struct proto tipc_proto;

128 129 130 131 132 133 134 135
static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
	[TIPC_NLA_SOCK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_SOCK_ADDR]		= { .type = NLA_U32 },
	[TIPC_NLA_SOCK_REF]		= { .type = NLA_U32 },
	[TIPC_NLA_SOCK_CON]		= { .type = NLA_NESTED },
	[TIPC_NLA_SOCK_HAS_PUBL]	= { .type = NLA_FLAG }
};

136
/*
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
 * Revised TIPC socket locking policy:
 *
 * Most socket operations take the standard socket lock when they start
 * and hold it until they finish (or until they need to sleep).  Acquiring
 * this lock grants the owner exclusive access to the fields of the socket
 * data structures, with the exception of the backlog queue.  A few socket
 * operations can be done without taking the socket lock because they only
 * read socket information that never changes during the life of the socket.
 *
 * Socket operations may acquire the lock for the associated TIPC port if they
 * need to perform an operation on the port.  If any routine needs to acquire
 * both the socket lock and the port lock it must take the socket lock first
 * to avoid the risk of deadlock.
 *
 * The dispatcher handling incoming messages cannot grab the socket lock in
 * the standard fashion, since invoked it runs at the BH level and cannot block.
 * Instead, it checks to see if the socket lock is currently owned by someone,
 * and either handles the message itself or adds it to the socket's backlog
 * queue; in the latter case the queued message is processed once the process
 * owning the socket lock releases it.
 *
 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
 * the problem of a blocked socket operation preventing any other operations
 * from occurring.  However, applications must be careful if they have
 * multiple threads trying to send (or receive) on the same socket, as these
 * operations might interfere with each other.  For example, doing a connect
 * and a receive at the same time might allow the receive to consume the
 * ACK message meant for the connect.  While additional work could be done
 * to try and overcome this, it doesn't seem to be worthwhile at the present.
 *
 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
 * that another operation that must be performed in a non-blocking manner is
 * not delayed for very long because the lock has already been taken.
 *
 * NOTE: This code assumes that certain fields of a port/socket pair are
 * constant over its lifetime; such fields can be examined without taking
 * the socket lock and/or port lock, and do not need to be re-read even
 * after resuming processing after waiting.  These fields include:
 *   - socket type
 *   - pointer to socket sk structure (aka tipc_sock structure)
 *   - pointer to port structure
 *   - port reference
 */

181 182 183 184 185
static u32 tsk_own_node(struct tipc_sock *tsk)
{
	return msg_prevnode(&tsk->phdr);
}

186
static u32 tsk_peer_node(struct tipc_sock *tsk)
187
{
188
	return msg_destnode(&tsk->phdr);
189 190
}

191
static u32 tsk_peer_port(struct tipc_sock *tsk)
192
{
193
	return msg_destport(&tsk->phdr);
194 195
}

196
static  bool tsk_unreliable(struct tipc_sock *tsk)
197
{
198
	return msg_src_droppable(&tsk->phdr) != 0;
199 200
}

201
static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
202
{
203
	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
204 205
}

206
static bool tsk_unreturnable(struct tipc_sock *tsk)
207
{
208
	return msg_dest_droppable(&tsk->phdr) != 0;
209 210
}

211
static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
212
{
213
	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
214 215
}

216
static int tsk_importance(struct tipc_sock *tsk)
217
{
218
	return msg_importance(&tsk->phdr);
219 220
}

221
static int tsk_set_importance(struct tipc_sock *tsk, int imp)
222 223 224
{
	if (imp > TIPC_CRITICAL_IMPORTANCE)
		return -EINVAL;
225
	msg_set_importance(&tsk->phdr, (u32)imp);
226 227
	return 0;
}
228

229 230 231 232 233 234 235 236 237 238
static struct tipc_sock *tipc_sk(const struct sock *sk)
{
	return container_of(sk, struct tipc_sock, sk);
}

static int tsk_conn_cong(struct tipc_sock *tsk)
{
	return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
}

239
/**
240
 * tsk_advance_rx_queue - discard first buffer in socket receive queue
241 242
 *
 * Caller must hold socket lock
P
Per Liden 已提交
243
 */
244
static void tsk_advance_rx_queue(struct sock *sk)
P
Per Liden 已提交
245
{
246
	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
P
Per Liden 已提交
247 248 249
}

/**
250
 * tsk_rej_rx_queue - reject all buffers in socket receive queue
251 252
 *
 * Caller must hold socket lock
P
Per Liden 已提交
253
 */
254
static void tsk_rej_rx_queue(struct sock *sk)
P
Per Liden 已提交
255
{
256
	struct sk_buff *skb;
257
	u32 dnode;
258
	u32 own_node = tsk_own_node(tipc_sk(sk));
259

260
	while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
261 262
		if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
			tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
263
	}
P
Per Liden 已提交
264 265
}

266
/* tsk_peer_msg - verify if message was sent by connected port's peer
J
Jon Paul Maloy 已提交
267 268 269 270
 *
 * Handles cases where the node's network address has changed from
 * the default of <0.0.0> to its configured setting.
 */
271
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
J
Jon Paul Maloy 已提交
272
{
273
	struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
274
	u32 peer_port = tsk_peer_port(tsk);
J
Jon Paul Maloy 已提交
275 276 277
	u32 orig_node;
	u32 peer_node;

278
	if (unlikely(!tsk->connected))
J
Jon Paul Maloy 已提交
279 280 281 282 283 284
		return false;

	if (unlikely(msg_origport(msg) != peer_port))
		return false;

	orig_node = msg_orignode(msg);
285
	peer_node = tsk_peer_node(tsk);
J
Jon Paul Maloy 已提交
286 287 288 289

	if (likely(orig_node == peer_node))
		return true;

290
	if (!orig_node && (peer_node == tn->own_addr))
J
Jon Paul Maloy 已提交
291 292
		return true;

293
	if (!peer_node && (orig_node == tn->own_addr))
J
Jon Paul Maloy 已提交
294 295 296 297 298
		return true;

	return false;
}

P
Per Liden 已提交
299
/**
300
 * tipc_sk_create - create a TIPC socket
301
 * @net: network namespace (must be default network)
P
Per Liden 已提交
302 303
 * @sock: pre-allocated socket structure
 * @protocol: protocol indicator (must be 0)
304
 * @kern: caused by kernel or by userspace?
305
 *
306 307
 * This routine creates additional data structures used by the TIPC socket,
 * initializes them, and links them together.
P
Per Liden 已提交
308 309 310
 *
 * Returns 0 on success, errno otherwise
 */
311 312
static int tipc_sk_create(struct net *net, struct socket *sock,
			  int protocol, int kern)
P
Per Liden 已提交
313
{
314
	struct tipc_net *tn;
315 316
	const struct proto_ops *ops;
	socket_state state;
P
Per Liden 已提交
317
	struct sock *sk;
318
	struct tipc_sock *tsk;
319
	struct tipc_msg *msg;
320 321

	/* Validate arguments */
P
Per Liden 已提交
322 323 324 325 326
	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
327 328
		ops = &stream_ops;
		state = SS_UNCONNECTED;
P
Per Liden 已提交
329 330
		break;
	case SOCK_SEQPACKET:
331 332
		ops = &packet_ops;
		state = SS_UNCONNECTED;
P
Per Liden 已提交
333 334 335
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
336 337
		ops = &msg_ops;
		state = SS_READY;
P
Per Liden 已提交
338
		break;
339 340
	default:
		return -EPROTOTYPE;
P
Per Liden 已提交
341 342
	}

343
	/* Allocate socket's protocol area */
Y
Ying Xue 已提交
344
	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
345
	if (sk == NULL)
P
Per Liden 已提交
346 347
		return -ENOMEM;

348
	tsk = tipc_sk(sk);
349 350 351
	tsk->max_pkt = MAX_PKT_DEFAULT;
	INIT_LIST_HEAD(&tsk->publications);
	msg = &tsk->phdr;
352 353
	tn = net_generic(sock_net(sk), tipc_net_id);
	tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
354
		      NAMED_H_SIZE, 0);
P
Per Liden 已提交
355

356 357 358 359
	/* Finish initializing socket data structures */
	sock->ops = ops;
	sock->state = state;
	sock_init_data(sock, sk);
360 361 362 363 364
	if (tipc_sk_insert(tsk)) {
		pr_warn("Socket create failed; port numbrer exhausted\n");
		return -EINVAL;
	}
	msg_set_origport(msg, tsk->portid);
365
	setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
366
	sk->sk_backlog_rcv = tipc_backlog_rcv;
367
	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
368 369
	sk->sk_data_ready = tipc_data_ready;
	sk->sk_write_space = tipc_write_space;
370
	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
371
	tsk->sent_unacked = 0;
372
	atomic_set(&tsk->dupl_rcvcnt, 0);
373

374
	if (sock->state == SS_READY) {
375
		tsk_set_unreturnable(tsk, true);
376
		if (sock->type == SOCK_DGRAM)
377
			tsk_set_unreliable(tsk, true);
378
	}
P
Per Liden 已提交
379 380 381
	return 0;
}

382 383 384 385 386 387 388
static void tipc_sk_callback(struct rcu_head *head)
{
	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);

	sock_put(&tsk->sk);
}

P
Per Liden 已提交
389
/**
390
 * tipc_release - destroy a TIPC socket
P
Per Liden 已提交
391 392 393 394 395 396 397
 * @sock: socket to destroy
 *
 * This routine cleans up any messages that are still queued on the socket.
 * For DGRAM and RDM socket types, all queued messages are rejected.
 * For SEQPACKET and STREAM socket types, the first message is rejected
 * and any others are discarded.  (If the first message on a STREAM socket
 * is partially-read, it is discarded and the next one is rejected instead.)
398
 *
P
Per Liden 已提交
399 400 401 402 403 404
 * NOTE: Rejected messages are not necessarily returned to the sender!  They
 * are returned or discarded according to the "destination droppable" setting
 * specified for the message by the sender.
 *
 * Returns 0 on success, errno otherwise
 */
405
static int tipc_release(struct socket *sock)
P
Per Liden 已提交
406 407
{
	struct sock *sk = sock->sk;
408
	struct net *net;
409
	struct tipc_sock *tsk;
410
	struct sk_buff *skb;
411
	u32 dnode, probing_state;
P
Per Liden 已提交
412

413 414 415 416 417
	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */
	if (sk == NULL)
P
Per Liden 已提交
418
		return 0;
419

420
	net = sock_net(sk);
421
	tsk = tipc_sk(sk);
422 423 424 425 426 427
	lock_sock(sk);

	/*
	 * Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer)
	 */
428
	dnode = tsk_peer_node(tsk);
P
Per Liden 已提交
429
	while (sock->state != SS_DISCONNECTING) {
430 431
		skb = __skb_dequeue(&sk->sk_receive_queue);
		if (skb == NULL)
P
Per Liden 已提交
432
			break;
433 434
		if (TIPC_SKB_CB(skb)->handle != NULL)
			kfree_skb(skb);
435 436 437 438
		else {
			if ((sock->state == SS_CONNECTING) ||
			    (sock->state == SS_CONNECTED)) {
				sock->state = SS_DISCONNECTING;
439
				tsk->connected = 0;
440
				tipc_node_remove_conn(net, dnode, tsk->portid);
441
			}
442
			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
443
					     TIPC_ERR_NO_PORT))
444
				tipc_link_xmit_skb(net, skb, dnode, 0);
445
		}
P
Per Liden 已提交
446 447
	}

448
	tipc_sk_withdraw(tsk, 0, NULL);
449
	probing_state = tsk->probing_state;
450 451
	if (del_timer_sync(&sk->sk_timer) &&
	    probing_state != TIPC_CONN_PROBING)
452
		sock_put(sk);
453
	tipc_sk_remove(tsk);
454
	if (tsk->connected) {
455
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
456
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
457
				      tsk_own_node(tsk), tsk_peer_port(tsk),
458
				      tsk->portid, TIPC_ERR_NO_PORT);
459
		if (skb)
460 461
			tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
		tipc_node_remove_conn(net, dnode, tsk->portid);
462
	}
P
Per Liden 已提交
463

464
	/* Discard any remaining (connection-based) messages in receive queue */
465
	__skb_queue_purge(&sk->sk_receive_queue);
P
Per Liden 已提交
466

467 468 469
	/* Reject any messages that accumulated in backlog queue */
	sock->state = SS_DISCONNECTING;
	release_sock(sk);
470 471

	call_rcu(&tsk->rcu, tipc_sk_callback);
472
	sock->sk = NULL;
P
Per Liden 已提交
473

474
	return 0;
P
Per Liden 已提交
475 476 477
}

/**
478
 * tipc_bind - associate or disassocate TIPC name(s) with a socket
P
Per Liden 已提交
479 480 481
 * @sock: socket structure
 * @uaddr: socket address describing name(s) and desired operation
 * @uaddr_len: size of socket address data structure
482
 *
P
Per Liden 已提交
483 484 485
 * Name and name sequence binding is indicated using a positive scope value;
 * a negative scope value unbinds the specified name.  Specifying no name
 * (i.e. a socket address length of 0) unbinds all names from the socket.
486
 *
P
Per Liden 已提交
487
 * Returns 0 on success, errno otherwise
488 489 490
 *
 * NOTE: This routine doesn't need to take the socket lock since it doesn't
 *       access any non-constant socket information.
P
Per Liden 已提交
491
 */
492 493
static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
		     int uaddr_len)
P
Per Liden 已提交
494
{
495
	struct sock *sk = sock->sk;
P
Per Liden 已提交
496
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
497
	struct tipc_sock *tsk = tipc_sk(sk);
498
	int res = -EINVAL;
P
Per Liden 已提交
499

500 501
	lock_sock(sk);
	if (unlikely(!uaddr_len)) {
502
		res = tipc_sk_withdraw(tsk, 0, NULL);
503 504
		goto exit;
	}
505

506 507 508 509 510 511 512 513
	if (uaddr_len < sizeof(struct sockaddr_tipc)) {
		res = -EINVAL;
		goto exit;
	}
	if (addr->family != AF_TIPC) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
P
Per Liden 已提交
514 515 516

	if (addr->addrtype == TIPC_ADDR_NAME)
		addr->addr.nameseq.upper = addr->addr.nameseq.lower;
517 518 519 520
	else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
		res = -EAFNOSUPPORT;
		goto exit;
	}
521

522
	if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
523
	    (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
524 525 526 527
	    (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
		res = -EACCES;
		goto exit;
	}
528

529
	res = (addr->scope > 0) ?
530 531
		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
532 533 534
exit:
	release_sock(sk);
	return res;
P
Per Liden 已提交
535 536
}

537
/**
538
 * tipc_getname - get port ID of socket or peer socket
P
Per Liden 已提交
539 540 541
 * @sock: socket structure
 * @uaddr: area for returned socket address
 * @uaddr_len: area for returned length of socket address
542
 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
543
 *
P
Per Liden 已提交
544
 * Returns 0 on success, errno otherwise
545
 *
546 547
 * NOTE: This routine doesn't need to take the socket lock since it only
 *       accesses socket information that is unchanging (or which changes in
548
 *       a completely predictable manner).
P
Per Liden 已提交
549
 */
550 551
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
			int *uaddr_len, int peer)
P
Per Liden 已提交
552 553
{
	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
554
	struct tipc_sock *tsk = tipc_sk(sock->sk);
555
	struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
P
Per Liden 已提交
556

557
	memset(addr, 0, sizeof(*addr));
558
	if (peer) {
559 560 561
		if ((sock->state != SS_CONNECTED) &&
			((peer != 2) || (sock->state != SS_DISCONNECTING)))
			return -ENOTCONN;
562 563
		addr->addr.id.ref = tsk_peer_port(tsk);
		addr->addr.id.node = tsk_peer_node(tsk);
564
	} else {
565
		addr->addr.id.ref = tsk->portid;
566
		addr->addr.id.node = tn->own_addr;
567
	}
P
Per Liden 已提交
568 569 570 571 572 573 574

	*uaddr_len = sizeof(*addr);
	addr->addrtype = TIPC_ADDR_ID;
	addr->family = AF_TIPC;
	addr->scope = 0;
	addr->addr.name.domain = 0;

575
	return 0;
P
Per Liden 已提交
576 577 578
}

/**
579
 * tipc_poll - read and possibly block on pollmask
P
Per Liden 已提交
580 581 582 583
 * @file: file structure associated with the socket
 * @sock: socket for which to calculate the poll bits
 * @wait: ???
 *
584 585 586 587 588 589 590 591 592
 * Returns pollmask value
 *
 * COMMENTARY:
 * It appears that the usual socket locking mechanisms are not useful here
 * since the pollmask info is potentially out-of-date the moment this routine
 * exits.  TCP and other protocols seem to rely on higher level poll routines
 * to handle any preventable race conditions, so TIPC will do the same ...
 *
 * TIPC sets the returned events as follows:
593 594 595 596
 *
 * socket state		flags set
 * ------------		---------
 * unconnected		no read flags
597
 *			POLLOUT if port is not congested
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
 *
 * connecting		POLLIN/POLLRDNORM if ACK/NACK in rx queue
 *			no write flags
 *
 * connected		POLLIN/POLLRDNORM if data in rx queue
 *			POLLOUT if port is not congested
 *
 * disconnecting	POLLIN/POLLRDNORM/POLLHUP
 *			no write flags
 *
 * listening		POLLIN if SYN in rx queue
 *			no write flags
 *
 * ready		POLLIN/POLLRDNORM if data in rx queue
 * [connectionless]	POLLOUT (since port cannot be congested)
 *
 * IMPORTANT: The fact that a read or write operation is indicated does NOT
 * imply that the operation will succeed, merely that it should be performed
 * and will not block.
P
Per Liden 已提交
617
 */
618 619
static unsigned int tipc_poll(struct file *file, struct socket *sock,
			      poll_table *wait)
P
Per Liden 已提交
620
{
621
	struct sock *sk = sock->sk;
622
	struct tipc_sock *tsk = tipc_sk(sk);
623
	u32 mask = 0;
624

625
	sock_poll_wait(file, sk_sleep(sk), wait);
626

627
	switch ((int)sock->state) {
628
	case SS_UNCONNECTED:
629
		if (!tsk->link_cong)
630 631
			mask |= POLLOUT;
		break;
632 633
	case SS_READY:
	case SS_CONNECTED:
634
		if (!tsk->link_cong && !tsk_conn_cong(tsk))
635 636 637 638 639 640 641 642 643 644 645
			mask |= POLLOUT;
		/* fall thru' */
	case SS_CONNECTING:
	case SS_LISTENING:
		if (!skb_queue_empty(&sk->sk_receive_queue))
			mask |= (POLLIN | POLLRDNORM);
		break;
	case SS_DISCONNECTING:
		mask = (POLLIN | POLLRDNORM | POLLHUP);
		break;
	}
646 647

	return mask;
P
Per Liden 已提交
648 649
}

650 651 652 653
/**
 * tipc_sendmcast - send multicast message
 * @sock: socket structure
 * @seq: destination address
654
 * @msg: message to send
655 656 657 658 659 660 661
 * @dsz: total length of message data
 * @timeo: timeout to wait for wakeup
 *
 * Called from function tipc_sendmsg(), which has done all sanity checks
 * Returns the number of bytes sent on success, or errno
 */
static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
662
			  struct msghdr *msg, size_t dsz, long timeo)
663 664
{
	struct sock *sk = sock->sk;
665
	struct tipc_sock *tsk = tipc_sk(sk);
666
	struct net *net = sock_net(sk);
667
	struct tipc_msg *mhdr = &tsk->phdr;
668
	struct sk_buff_head *pktchain = &sk->sk_write_queue;
A
Al Viro 已提交
669
	struct iov_iter save = msg->msg_iter;
670 671 672 673 674 675 676 677 678 679 680 681 682 683
	uint mtu;
	int rc;

	msg_set_type(mhdr, TIPC_MCAST_MSG);
	msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
	msg_set_destport(mhdr, 0);
	msg_set_destnode(mhdr, 0);
	msg_set_nametype(mhdr, seq->type);
	msg_set_namelower(mhdr, seq->lower);
	msg_set_nameupper(mhdr, seq->upper);
	msg_set_hdr_sz(mhdr, MCAST_H_SIZE);

new_mtu:
	mtu = tipc_bclink_get_mtu();
684
	rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
685 686 687 688
	if (unlikely(rc < 0))
		return rc;

	do {
689
		rc = tipc_bclink_xmit(net, pktchain);
690 691 692 693
		if (likely(rc >= 0)) {
			rc = dsz;
			break;
		}
A
Al Viro 已提交
694 695
		if (rc == -EMSGSIZE) {
			msg->msg_iter = save;
696
			goto new_mtu;
A
Al Viro 已提交
697
		}
698 699
		if (rc != -ELINKCONG)
			break;
700
		tipc_sk(sk)->link_cong = 1;
701 702
		rc = tipc_wait_for_sndmsg(sock, &timeo);
		if (rc)
703
			__skb_queue_purge(pktchain);
704 705 706 707
	} while (!rc);
	return rc;
}

708 709 710 711 712 713
/**
 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
 * @arrvq: queue with arriving messages, to be cloned after destination lookup
 * @inputq: queue with cloned messages, delivered to socket after dest lookup
 *
 * Multi-threaded: parallel calls with reference to same queues may occur
714
 */
715 716
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
		       struct sk_buff_head *inputq)
717
{
718
	struct tipc_msg *msg;
719 720
	struct tipc_plist dports;
	u32 portid;
721
	u32 scope = TIPC_CLUSTER_SCOPE;
722 723 724
	struct sk_buff_head tmpq;
	uint hsz;
	struct sk_buff *skb, *_skb;
725

726
	__skb_queue_head_init(&tmpq);
727
	tipc_plist_init(&dports);
728

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	skb = tipc_skb_peek(arrvq, &inputq->lock);
	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
		msg = buf_msg(skb);
		hsz = skb_headroom(skb) + msg_hdr_sz(msg);

		if (in_own_node(net, msg_orignode(msg)))
			scope = TIPC_NODE_SCOPE;

		/* Create destination port list and message clones: */
		tipc_nametbl_mc_translate(net,
					  msg_nametype(msg), msg_namelower(msg),
					  msg_nameupper(msg), scope, &dports);
		portid = tipc_plist_pop(&dports);
		for (; portid; portid = tipc_plist_pop(&dports)) {
			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
			if (_skb) {
				msg_set_destport(buf_msg(_skb), portid);
				__skb_queue_tail(&tmpq, _skb);
				continue;
			}
			pr_warn("Failed to clone mcast rcv buffer\n");
750
		}
751 752 753 754 755 756 757 758 759
		/* Append to inputq if not already done by other thread */
		spin_lock_bh(&inputq->lock);
		if (skb_peek(arrvq) == skb) {
			skb_queue_splice_tail_init(&tmpq, inputq);
			kfree_skb(__skb_dequeue(arrvq));
		}
		spin_unlock_bh(&inputq->lock);
		__skb_queue_purge(&tmpq);
		kfree_skb(skb);
760
	}
761
	tipc_sk_rcv(net, inputq);
762 763
}

764 765 766
/**
 * tipc_sk_proto_rcv - receive a connection mng protocol message
 * @tsk: receiving socket
767
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
768
 */
769
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
770
{
771
	struct tipc_msg *msg = buf_msg(*skb);
772
	int conn_cong;
773 774
	u32 dnode;
	u32 own_node = tsk_own_node(tsk);
775
	/* Ignore if connection cannot be validated: */
776
	if (!tsk_peer_msg(tsk, msg))
777 778
		goto exit;

779
	tsk->probing_state = TIPC_CONN_OK;
780 781

	if (msg_type(msg) == CONN_ACK) {
782
		conn_cong = tsk_conn_cong(tsk);
783 784
		tsk->sent_unacked -= msg_msgcnt(msg);
		if (conn_cong)
785
			tsk->sk.sk_write_space(&tsk->sk);
786
	} else if (msg_type(msg) == CONN_PROBE) {
787 788 789 790
		if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
			msg_set_type(msg, CONN_PROBE_REPLY);
			return;
		}
791 792 793
	}
	/* Do nothing if msg_type() == CONN_PROBE_REPLY */
exit:
794 795
	kfree_skb(*skb);
	*skb = NULL;
796 797
}

798 799 800
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
{
	struct sock *sk = sock->sk;
801
	struct tipc_sock *tsk = tipc_sk(sk);
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	DEFINE_WAIT(wait);
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (sock->state == SS_DISCONNECTING)
			return -EPIPE;
		if (!*timeo_p)
			return -EAGAIN;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
817
		done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
818 819 820 821 822
		finish_wait(sk_sleep(sk), &wait);
	} while (!done);
	return 0;
}

P
Per Liden 已提交
823
/**
824
 * tipc_sendmsg - send message in connectionless manner
P
Per Liden 已提交
825 826
 * @sock: socket structure
 * @m: message to send
827
 * @dsz: amount of user data to be sent
828
 *
P
Per Liden 已提交
829
 * Message must have an destination specified explicitly.
830
 * Used for SOCK_RDM and SOCK_DGRAM messages,
P
Per Liden 已提交
831 832
 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
833
 *
P
Per Liden 已提交
834 835
 * Returns the number of bytes sent on success, or errno otherwise
 */
836
static int tipc_sendmsg(struct socket *sock,
837
			struct msghdr *m, size_t dsz)
838 839 840 841 842 843 844 845 846 847 848 849
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_sendmsg(sock, m, dsz);
	release_sock(sk);

	return ret;
}

static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
850
{
851
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
852
	struct sock *sk = sock->sk;
853
	struct tipc_sock *tsk = tipc_sk(sk);
854
	struct net *net = sock_net(sk);
855
	struct tipc_msg *mhdr = &tsk->phdr;
856
	u32 dnode, dport;
857
	struct sk_buff_head *pktchain = &sk->sk_write_queue;
858
	struct sk_buff *skb;
859
	struct tipc_name_seq *seq;
A
Al Viro 已提交
860
	struct iov_iter save;
861
	u32 mtu;
862
	long timeo;
E
Erik Hugne 已提交
863
	int rc;
P
Per Liden 已提交
864

865
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
866
		return -EMSGSIZE;
867 868 869 870 871 872 873 874 875
	if (unlikely(!dest)) {
		if (tsk->connected && sock->state == SS_READY)
			dest = &tsk->remote;
		else
			return -EDESTADDRREQ;
	} else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
		   dest->family != AF_TIPC) {
		return -EINVAL;
	}
876
	if (unlikely(sock->state != SS_READY)) {
877 878 879 880 881 882
		if (sock->state == SS_LISTENING)
			return -EPIPE;
		if (sock->state != SS_UNCONNECTED)
			return -EISCONN;
		if (tsk->published)
			return -EOPNOTSUPP;
883
		if (dest->addrtype == TIPC_ADDR_NAME) {
884 885
			tsk->conn_type = dest->addr.name.name.type;
			tsk->conn_instance = dest->addr.name.name.instance;
886
		}
P
Per Liden 已提交
887
	}
888
	seq = &dest->addr.nameseq;
889
	timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
890 891

	if (dest->addrtype == TIPC_ADDR_MCAST) {
892
		return tipc_sendmcast(sock, seq, m, dsz, timeo);
893 894 895 896 897 898 899 900 901 902 903
	} else if (dest->addrtype == TIPC_ADDR_NAME) {
		u32 type = dest->addr.name.name.type;
		u32 inst = dest->addr.name.name.instance;
		u32 domain = dest->addr.name.domain;

		dnode = domain;
		msg_set_type(mhdr, TIPC_NAMED_MSG);
		msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
		msg_set_nametype(mhdr, type);
		msg_set_nameinst(mhdr, inst);
		msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
904
		dport = tipc_nametbl_translate(net, type, inst, &dnode);
905 906
		msg_set_destnode(mhdr, dnode);
		msg_set_destport(mhdr, dport);
907 908
		if (unlikely(!dport && !dnode))
			return -EHOSTUNREACH;
909 910 911 912 913 914 915 916 917
	} else if (dest->addrtype == TIPC_ADDR_ID) {
		dnode = dest->addr.id.node;
		msg_set_type(mhdr, TIPC_DIRECT_MSG);
		msg_set_lookup_scope(mhdr, 0);
		msg_set_destnode(mhdr, dnode);
		msg_set_destport(mhdr, dest->addr.id.ref);
		msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
	}

A
Al Viro 已提交
918
	save = m->msg_iter;
919
new_mtu:
920
	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
921
	rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
922
	if (rc < 0)
923
		return rc;
924 925

	do {
926
		skb = skb_peek(pktchain);
927
		TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
928
		rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
929 930
		if (likely(rc >= 0)) {
			if (sock->state != SS_READY)
931
				sock->state = SS_CONNECTING;
932
			rc = dsz;
933
			break;
934
		}
A
Al Viro 已提交
935 936
		if (rc == -EMSGSIZE) {
			m->msg_iter = save;
937
			goto new_mtu;
A
Al Viro 已提交
938
		}
939
		if (rc != -ELINKCONG)
940
			break;
941
		tsk->link_cong = 1;
942
		rc = tipc_wait_for_sndmsg(sock, &timeo);
943
		if (rc)
944
			__skb_queue_purge(pktchain);
945 946 947
	} while (!rc);

	return rc;
P
Per Liden 已提交
948 949
}

950 951 952
static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
{
	struct sock *sk = sock->sk;
953
	struct tipc_sock *tsk = tipc_sk(sk);
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
	DEFINE_WAIT(wait);
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (sock->state == SS_DISCONNECTING)
			return -EPIPE;
		else if (sock->state != SS_CONNECTED)
			return -ENOTCONN;
		if (!*timeo_p)
			return -EAGAIN;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, timeo_p,
972
				     (!tsk->link_cong &&
973 974
				      !tsk_conn_cong(tsk)) ||
				     !tsk->connected);
975 976 977 978 979
		finish_wait(sk_sleep(sk), &wait);
	} while (!done);
	return 0;
}

980
/**
981
 * tipc_send_stream - send stream-oriented data
P
Per Liden 已提交
982
 * @sock: socket structure
983 984
 * @m: data to send
 * @dsz: total length of data to be transmitted
985
 *
986
 * Used for SOCK_STREAM data.
987
 *
988 989
 * Returns the number of bytes sent on success (or partial success),
 * or errno if no data sent
P
Per Liden 已提交
990
 */
991
static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
992 993 994 995 996 997 998 999 1000 1001 1002 1003
{
	struct sock *sk = sock->sk;
	int ret;

	lock_sock(sk);
	ret = __tipc_send_stream(sock, m, dsz);
	release_sock(sk);

	return ret;
}

static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1004
{
1005
	struct sock *sk = sock->sk;
1006
	struct net *net = sock_net(sk);
1007
	struct tipc_sock *tsk = tipc_sk(sk);
1008
	struct tipc_msg *mhdr = &tsk->phdr;
1009
	struct sk_buff_head *pktchain = &sk->sk_write_queue;
1010
	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1011
	u32 portid = tsk->portid;
1012
	int rc = -EINVAL;
1013
	long timeo;
1014 1015
	u32 dnode;
	uint mtu, send, sent = 0;
A
Al Viro 已提交
1016
	struct iov_iter save;
P
Per Liden 已提交
1017 1018

	/* Handle implied connection establishment */
1019
	if (unlikely(dest)) {
1020
		rc = __tipc_sendmsg(sock, m, dsz);
1021
		if (dsz && (dsz == rc))
1022
			tsk->sent_unacked = 1;
1023 1024 1025
		return rc;
	}
	if (dsz > (uint)INT_MAX)
1026 1027
		return -EMSGSIZE;

1028 1029
	if (unlikely(sock->state != SS_CONNECTED)) {
		if (sock->state == SS_DISCONNECTING)
1030
			return -EPIPE;
1031
		else
1032
			return -ENOTCONN;
1033
	}
1034

1035
	timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1036
	dnode = tsk_peer_node(tsk);
1037 1038

next:
A
Al Viro 已提交
1039
	save = m->msg_iter;
1040
	mtu = tsk->max_pkt;
1041
	send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1042
	rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
1043
	if (unlikely(rc < 0))
1044
		return rc;
1045
	do {
1046
		if (likely(!tsk_conn_cong(tsk))) {
1047
			rc = tipc_link_xmit(net, pktchain, dnode, portid);
1048
			if (likely(!rc)) {
1049
				tsk->sent_unacked++;
1050 1051 1052 1053 1054 1055
				sent += send;
				if (sent == dsz)
					break;
				goto next;
			}
			if (rc == -EMSGSIZE) {
1056 1057
				tsk->max_pkt = tipc_node_get_mtu(net, dnode,
								 portid);
A
Al Viro 已提交
1058
				m->msg_iter = save;
1059 1060 1061 1062
				goto next;
			}
			if (rc != -ELINKCONG)
				break;
1063
			tsk->link_cong = 1;
1064 1065
		}
		rc = tipc_wait_for_sndpkt(sock, &timeo);
1066
		if (rc)
1067
			__skb_queue_purge(pktchain);
1068
	} while (!rc);
1069

1070
	return sent ? sent : rc;
P
Per Liden 已提交
1071 1072
}

1073
/**
1074
 * tipc_send_packet - send a connection-oriented message
P
Per Liden 已提交
1075
 * @sock: socket structure
1076 1077
 * @m: message to send
 * @dsz: length of data to be transmitted
1078
 *
1079
 * Used for SOCK_SEQPACKET messages.
1080
 *
1081
 * Returns the number of bytes sent on success, or errno otherwise
P
Per Liden 已提交
1082
 */
1083
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
P
Per Liden 已提交
1084
{
1085 1086
	if (dsz > TIPC_MAX_USER_MSG_SIZE)
		return -EMSGSIZE;
P
Per Liden 已提交
1087

1088
	return tipc_send_stream(sock, m, dsz);
P
Per Liden 已提交
1089 1090
}

1091
/* tipc_sk_finish_conn - complete the setup of a connection
P
Per Liden 已提交
1092
 */
1093
static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1094
				u32 peer_node)
P
Per Liden 已提交
1095
{
1096 1097
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
1098
	struct tipc_msg *msg = &tsk->phdr;
P
Per Liden 已提交
1099

1100 1101 1102 1103 1104
	msg_set_destnode(msg, peer_node);
	msg_set_destport(msg, peer_port);
	msg_set_type(msg, TIPC_CONN_MSG);
	msg_set_lookup_scope(msg, 0);
	msg_set_hdr_sz(msg, SHORT_H_SIZE);
1105

1106
	tsk->probing_intv = CONN_PROBING_INTERVAL;
1107 1108
	tsk->probing_state = TIPC_CONN_OK;
	tsk->connected = 1;
1109
	sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1110 1111
	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
P
Per Liden 已提交
1112 1113 1114 1115 1116 1117
}

/**
 * set_orig_addr - capture sender's address for received message
 * @m: descriptor for message info
 * @msg: received message header
1118
 *
P
Per Liden 已提交
1119 1120
 * Note: Address is not captured if not requested by receiver.
 */
S
Sam Ravnborg 已提交
1121
static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
P
Per Liden 已提交
1122
{
1123
	DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
P
Per Liden 已提交
1124

1125
	if (addr) {
P
Per Liden 已提交
1126 1127
		addr->family = AF_TIPC;
		addr->addrtype = TIPC_ADDR_ID;
1128
		memset(&addr->addr, 0, sizeof(addr->addr));
P
Per Liden 已提交
1129 1130
		addr->addr.id.ref = msg_origport(msg);
		addr->addr.id.node = msg_orignode(msg);
1131 1132
		addr->addr.name.domain = 0;	/* could leave uninitialized */
		addr->scope = 0;		/* could leave uninitialized */
P
Per Liden 已提交
1133 1134 1135 1136 1137
		m->msg_namelen = sizeof(struct sockaddr_tipc);
	}
}

/**
1138
 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
P
Per Liden 已提交
1139 1140
 * @m: descriptor for message info
 * @msg: received message header
1141
 * @tsk: TIPC port associated with message
1142
 *
P
Per Liden 已提交
1143
 * Note: Ancillary data is not captured if not requested by receiver.
1144
 *
P
Per Liden 已提交
1145 1146
 * Returns 0 if successful, otherwise errno
 */
1147 1148
static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
				 struct tipc_sock *tsk)
P
Per Liden 已提交
1149 1150 1151 1152
{
	u32 anc_data[3];
	u32 err;
	u32 dest_type;
1153
	int has_name;
P
Per Liden 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	int res;

	if (likely(m->msg_controllen == 0))
		return 0;

	/* Optionally capture errored message object(s) */
	err = msg ? msg_errcode(msg) : 0;
	if (unlikely(err)) {
		anc_data[0] = err;
		anc_data[1] = msg_data_sz(msg);
1164 1165
		res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
		if (res)
P
Per Liden 已提交
1166
			return res;
1167 1168 1169 1170 1171 1172
		if (anc_data[1]) {
			res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
				       msg_data(msg));
			if (res)
				return res;
		}
P
Per Liden 已提交
1173 1174 1175 1176 1177 1178
	}

	/* Optionally capture message destination object */
	dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
	switch (dest_type) {
	case TIPC_NAMED_MSG:
1179
		has_name = 1;
P
Per Liden 已提交
1180 1181 1182 1183 1184
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_namelower(msg);
		break;
	case TIPC_MCAST_MSG:
1185
		has_name = 1;
P
Per Liden 已提交
1186 1187 1188 1189 1190
		anc_data[0] = msg_nametype(msg);
		anc_data[1] = msg_namelower(msg);
		anc_data[2] = msg_nameupper(msg);
		break;
	case TIPC_CONN_MSG:
1191 1192 1193 1194
		has_name = (tsk->conn_type != 0);
		anc_data[0] = tsk->conn_type;
		anc_data[1] = tsk->conn_instance;
		anc_data[2] = tsk->conn_instance;
P
Per Liden 已提交
1195 1196
		break;
	default:
1197
		has_name = 0;
P
Per Liden 已提交
1198
	}
1199 1200 1201 1202 1203
	if (has_name) {
		res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
		if (res)
			return res;
	}
P
Per Liden 已提交
1204 1205 1206 1207

	return 0;
}

1208
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1209
{
1210
	struct net *net = sock_net(&tsk->sk);
1211
	struct sk_buff *skb = NULL;
1212
	struct tipc_msg *msg;
1213 1214
	u32 peer_port = tsk_peer_port(tsk);
	u32 dnode = tsk_peer_node(tsk);
1215

1216
	if (!tsk->connected)
1217
		return;
1218 1219 1220
	skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
			      dnode, tsk_own_node(tsk), peer_port,
			      tsk->portid, TIPC_OK);
1221
	if (!skb)
1222
		return;
1223
	msg = buf_msg(skb);
1224
	msg_set_msgcnt(msg, ack);
1225
	tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1226 1227
}

1228
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Y
Ying Xue 已提交
1229 1230 1231
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
1232
	long timeo = *timeop;
Y
Ying Xue 已提交
1233 1234 1235 1236
	int err;

	for (;;) {
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1237
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
			if (sock->state == SS_DISCONNECTING) {
				err = -ENOTCONN;
				break;
			}
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1252 1253 1254
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
1255 1256
	}
	finish_wait(sk_sleep(sk), &wait);
1257
	*timeop = timeo;
Y
Ying Xue 已提交
1258 1259 1260
	return err;
}

1261
/**
1262
 * tipc_recvmsg - receive packet-oriented message
P
Per Liden 已提交
1263 1264 1265
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1266
 *
P
Per Liden 已提交
1267 1268 1269 1270 1271
 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
 * If the complete message doesn't fit in user area, truncate it.
 *
 * Returns size of returned message data, errno otherwise
 */
1272 1273
static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
			int flags)
P
Per Liden 已提交
1274
{
1275
	struct sock *sk = sock->sk;
1276
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1277 1278
	struct sk_buff *buf;
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1279
	long timeo;
P
Per Liden 已提交
1280 1281 1282 1283
	unsigned int sz;
	u32 err;
	int res;

1284
	/* Catch invalid receive requests */
P
Per Liden 已提交
1285 1286 1287
	if (unlikely(!buf_len))
		return -EINVAL;

1288
	lock_sock(sk);
P
Per Liden 已提交
1289

1290 1291
	if (unlikely(sock->state == SS_UNCONNECTED)) {
		res = -ENOTCONN;
P
Per Liden 已提交
1292 1293 1294
		goto exit;
	}

Y
Ying Xue 已提交
1295
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1296
restart:
P
Per Liden 已提交
1297

1298
	/* Look for a message in receive queue; wait if necessary */
1299
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1300 1301
	if (res)
		goto exit;
P
Per Liden 已提交
1302

1303 1304
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1305 1306 1307 1308 1309 1310
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1311
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1312 1313 1314 1315 1316 1317 1318
		goto restart;
	}

	/* Capture sender's address (optional) */
	set_orig_addr(m, msg);

	/* Capture ancillary data (optional) */
1319
	res = tipc_sk_anc_data_recv(m, msg, tsk);
1320
	if (res)
P
Per Liden 已提交
1321 1322 1323 1324 1325 1326 1327 1328
		goto exit;

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
		if (unlikely(buf_len < sz)) {
			sz = buf_len;
			m->msg_flags |= MSG_TRUNC;
		}
1329
		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1330
		if (res)
P
Per Liden 已提交
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
			goto exit;
		res = sz;
	} else {
		if ((sock->state == SS_READY) ||
		    ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */
	if (likely(!(flags & MSG_PEEK))) {
1343
		if ((sock->state != SS_READY) &&
1344
		    (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1345
			tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1346 1347
			tsk->rcv_unacked = 0;
		}
1348
		tsk_advance_rx_queue(sk);
1349
	}
P
Per Liden 已提交
1350
exit:
1351
	release_sock(sk);
P
Per Liden 已提交
1352 1353 1354
	return res;
}

1355
/**
1356
 * tipc_recv_stream - receive stream-oriented data
P
Per Liden 已提交
1357 1358 1359
 * @m: descriptor for message info
 * @buf_len: total size of user buffer area
 * @flags: receive flags
1360 1361
 *
 * Used for SOCK_STREAM messages only.  If not enough data is available
P
Per Liden 已提交
1362 1363 1364 1365
 * will optionally wait for more; never truncates data.
 *
 * Returns size of returned message data, errno otherwise
 */
1366 1367
static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
			    size_t buf_len, int flags)
P
Per Liden 已提交
1368
{
1369
	struct sock *sk = sock->sk;
1370
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
1371 1372
	struct sk_buff *buf;
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1373
	long timeo;
P
Per Liden 已提交
1374
	unsigned int sz;
1375
	int sz_to_copy, target, needed;
P
Per Liden 已提交
1376 1377
	int sz_copied = 0;
	u32 err;
1378
	int res = 0;
P
Per Liden 已提交
1379

1380
	/* Catch invalid receive attempts */
P
Per Liden 已提交
1381 1382 1383
	if (unlikely(!buf_len))
		return -EINVAL;

1384
	lock_sock(sk);
P
Per Liden 已提交
1385

Y
Ying Xue 已提交
1386
	if (unlikely(sock->state == SS_UNCONNECTED)) {
1387
		res = -ENOTCONN;
P
Per Liden 已提交
1388 1389 1390
		goto exit;
	}

1391
	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
Y
Ying Xue 已提交
1392
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
P
Per Liden 已提交
1393

1394
restart:
1395
	/* Look for a message in receive queue; wait if necessary */
1396
	res = tipc_wait_for_rcvmsg(sock, &timeo);
Y
Ying Xue 已提交
1397 1398
	if (res)
		goto exit;
P
Per Liden 已提交
1399

1400 1401
	/* Look at first message in receive queue */
	buf = skb_peek(&sk->sk_receive_queue);
P
Per Liden 已提交
1402 1403 1404 1405 1406 1407
	msg = buf_msg(buf);
	sz = msg_data_sz(msg);
	err = msg_errcode(msg);

	/* Discard an empty non-errored message & try again */
	if ((!sz) && (!err)) {
1408
		tsk_advance_rx_queue(sk);
P
Per Liden 已提交
1409 1410 1411 1412 1413 1414
		goto restart;
	}

	/* Optionally capture sender's address & ancillary data of first msg */
	if (sz_copied == 0) {
		set_orig_addr(m, msg);
1415
		res = tipc_sk_anc_data_recv(m, msg, tsk);
1416
		if (res)
P
Per Liden 已提交
1417 1418 1419 1420 1421
			goto exit;
	}

	/* Capture message data (if valid) & compute return value (always) */
	if (!err) {
1422
		u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
P
Per Liden 已提交
1423

1424
		sz -= offset;
P
Per Liden 已提交
1425 1426
		needed = (buf_len - sz_copied);
		sz_to_copy = (sz <= needed) ? sz : needed;
1427

1428 1429
		res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
					    m, sz_to_copy);
1430
		if (res)
P
Per Liden 已提交
1431
			goto exit;
1432

P
Per Liden 已提交
1433 1434 1435 1436
		sz_copied += sz_to_copy;

		if (sz_to_copy < sz) {
			if (!(flags & MSG_PEEK))
1437 1438
				TIPC_SKB_CB(buf)->handle =
				(void *)(unsigned long)(offset + sz_to_copy);
P
Per Liden 已提交
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
			goto exit;
		}
	} else {
		if (sz_copied != 0)
			goto exit; /* can't add error msg to valid data */

		if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
			res = 0;
		else
			res = -ECONNRESET;
	}

	/* Consume received message (optional) */
	if (likely(!(flags & MSG_PEEK))) {
1453
		if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1454
			tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1455 1456
			tsk->rcv_unacked = 0;
		}
1457
		tsk_advance_rx_queue(sk);
1458
	}
P
Per Liden 已提交
1459 1460

	/* Loop around if more data is required */
1461 1462
	if ((sz_copied < buf_len) &&	/* didn't get all requested data */
	    (!skb_queue_empty(&sk->sk_receive_queue) ||
1463
	    (sz_copied < target)) &&	/* and more is ready or required */
1464 1465
	    (!(flags & MSG_PEEK)) &&	/* and aren't just peeking at data */
	    (!err))			/* and haven't reached a FIN */
P
Per Liden 已提交
1466 1467 1468
		goto restart;

exit:
1469
	release_sock(sk);
1470
	return sz_copied ? sz_copied : res;
P
Per Liden 已提交
1471 1472
}

1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
/**
 * tipc_write_space - wake up thread if port congestion is released
 * @sk: socket
 */
static void tipc_write_space(struct sock *sk)
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (wq_has_sleeper(wq))
		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
						POLLWRNORM | POLLWRBAND);
	rcu_read_unlock();
}

/**
 * tipc_data_ready - wake up threads to indicate messages have been received
 * @sk: socket
 * @len: the length of messages
 */
1494
static void tipc_data_ready(struct sock *sk)
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
{
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
	if (wq_has_sleeper(wq))
		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
						POLLRDNORM | POLLRDBAND);
	rcu_read_unlock();
}

1506 1507
/**
 * filter_connect - Handle all incoming messages for a connection-based socket
1508
 * @tsk: TIPC socket
1509
 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1510
 *
S
stephen hemminger 已提交
1511
 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1512
 */
1513
static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
1514
{
1515
	struct sock *sk = &tsk->sk;
1516
	struct net *net = sock_net(sk);
1517
	struct socket *sock = sk->sk_socket;
1518
	struct tipc_msg *msg = buf_msg(*skb);
1519
	int retval = -TIPC_ERR_NO_PORT;
1520 1521 1522 1523 1524 1525 1526

	if (msg_mcast(msg))
		return retval;

	switch ((int)sock->state) {
	case SS_CONNECTED:
		/* Accept only connection-based messages sent by peer */
1527
		if (tsk_peer_msg(tsk, msg)) {
1528 1529
			if (unlikely(msg_errcode(msg))) {
				sock->state = SS_DISCONNECTING;
1530
				tsk->connected = 0;
1531
				/* let timer expire on it's own */
1532
				tipc_node_remove_conn(net, tsk_peer_node(tsk),
1533
						      tsk->portid);
1534 1535 1536 1537 1538 1539
			}
			retval = TIPC_OK;
		}
		break;
	case SS_CONNECTING:
		/* Accept only ACK or NACK message */
1540 1541 1542 1543

		if (unlikely(!msg_connected(msg)))
			break;

1544 1545
		if (unlikely(msg_errcode(msg))) {
			sock->state = SS_DISCONNECTING;
1546
			sk->sk_err = ECONNREFUSED;
1547 1548 1549 1550
			retval = TIPC_OK;
			break;
		}

1551
		if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
1552
			sock->state = SS_DISCONNECTING;
1553
			sk->sk_err = EINVAL;
1554
			retval = TIPC_OK;
1555 1556 1557
			break;
		}

1558 1559
		tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
		msg_set_importance(&tsk->phdr, msg_importance(msg));
1560 1561
		sock->state = SS_CONNECTED;

1562 1563 1564 1565 1566 1567
		/* If an incoming message is an 'ACK-', it should be
		 * discarded here because it doesn't contain useful
		 * data. In addition, we should try to wake up
		 * connect() routine if sleeping.
		 */
		if (msg_data_sz(msg) == 0) {
1568 1569
			kfree_skb(*skb);
			*skb = NULL;
1570 1571 1572 1573
			if (waitqueue_active(sk_sleep(sk)))
				wake_up_interruptible(sk_sleep(sk));
		}
		retval = TIPC_OK;
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
		break;
	case SS_LISTENING:
	case SS_UNCONNECTED:
		/* Accept only SYN message */
		if (!msg_connected(msg) && !(msg_errcode(msg)))
			retval = TIPC_OK;
		break;
	case SS_DISCONNECTING:
		break;
	default:
		pr_err("Unknown socket state %u\n", sock->state);
	}
	return retval;
}

1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
 * @buf: message
 *
 * For all connection oriented messages, irrespective of importance,
 * the default overload value (i.e. 67MB) is set as limit.
 *
 * For all connectionless messages, by default new queue limits are
 * as belows:
 *
1600 1601 1602 1603
 * TIPC_LOW_IMPORTANCE       (4 MB)
 * TIPC_MEDIUM_IMPORTANCE    (8 MB)
 * TIPC_HIGH_IMPORTANCE      (16 MB)
 * TIPC_CRITICAL_IMPORTANCE  (32 MB)
1604 1605 1606 1607 1608 1609 1610 1611
 *
 * Returns overload limit according to corresponding message importance
 */
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);

	if (msg_connected(msg))
1612 1613 1614 1615
		return sysctl_tipc_rmem[2];

	return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
		msg_importance(msg);
1616 1617
}

1618
/**
1619 1620
 * filter_rcv - validate incoming message
 * @sk: socket
1621
 * @skb: pointer to message. Set to NULL if buffer is consumed.
1622
 *
1623 1624 1625
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
1626
 * Called with socket lock already taken
1627
 *
1628
 * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
P
Per Liden 已提交
1629
 */
1630
static int filter_rcv(struct sock *sk, struct sk_buff **skb)
P
Per Liden 已提交
1631
{
1632
	struct socket *sock = sk->sk_socket;
1633
	struct tipc_sock *tsk = tipc_sk(sk);
1634 1635
	struct tipc_msg *msg = buf_msg(*skb);
	unsigned int limit = rcvbuf_limit(sk, *skb);
1636
	int rc = TIPC_OK;
P
Per Liden 已提交
1637

1638 1639 1640 1641
	if (unlikely(msg_user(msg) == CONN_MANAGER)) {
		tipc_sk_proto_rcv(tsk, skb);
		return TIPC_OK;
	}
1642

1643
	if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
1644
		kfree_skb(*skb);
1645 1646
		tsk->link_cong = 0;
		sk->sk_write_space(sk);
1647
		*skb = NULL;
1648 1649 1650
		return TIPC_OK;
	}

P
Per Liden 已提交
1651
	/* Reject message if it is wrong sort of message for socket */
1652
	if (msg_type(msg) > TIPC_DIRECT_MSG)
1653
		return -TIPC_ERR_NO_PORT;
1654

P
Per Liden 已提交
1655
	if (sock->state == SS_READY) {
1656
		if (msg_connected(msg))
1657
			return -TIPC_ERR_NO_PORT;
P
Per Liden 已提交
1658
	} else {
1659 1660
		rc = filter_connect(tsk, skb);
		if (rc != TIPC_OK || !*skb)
1661
			return rc;
P
Per Liden 已提交
1662 1663 1664
	}

	/* Reject message if there isn't room to queue it */
1665
	if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
1666
		return -TIPC_ERR_OVERLOAD;
P
Per Liden 已提交
1667

1668
	/* Enqueue message */
1669 1670 1671
	TIPC_SKB_CB(*skb)->handle = NULL;
	__skb_queue_tail(&sk->sk_receive_queue, *skb);
	skb_set_owner_r(*skb, sk);
1672

1673
	sk->sk_data_ready(sk);
1674
	*skb = NULL;
1675 1676
	return TIPC_OK;
}
P
Per Liden 已提交
1677

1678
/**
1679
 * tipc_backlog_rcv - handle incoming message from backlog queue
1680
 * @sk: socket
1681
 * @skb: message
1682
 *
1683
 * Caller must hold socket lock
1684 1685 1686
 *
 * Returns 0
 */
1687
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1688
{
1689 1690 1691
	int err;
	atomic_t *dcnt;
	u32 dnode;
1692
	struct tipc_sock *tsk = tipc_sk(sk);
1693
	struct net *net = sock_net(sk);
1694
	uint truesize = skb->truesize;
1695

1696 1697 1698 1699 1700
	err = filter_rcv(sk, &skb);
	if (likely(!skb)) {
		dcnt = &tsk->dupl_rcvcnt;
		if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
			atomic_add(truesize, dcnt);
1701 1702
		return 0;
	}
1703 1704
	if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
		tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
1705 1706 1707
	return 0;
}

1708
/**
1709 1710 1711 1712 1713 1714
 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
 *                   inputq and try adding them to socket or backlog queue
 * @inputq: list of incoming buffers with potentially different destinations
 * @sk: socket where the buffers should be enqueued
 * @dport: port number for the socket
 * @_skb: returned buffer to be forwarded or rejected, if applicable
1715 1716 1717
 *
 * Caller must hold socket lock
 *
1718 1719
 * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
 * or -TIPC_ERR_NO_PORT
1720
 */
1721 1722
static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
			   u32 dport, struct sk_buff **_skb)
1723 1724 1725
{
	unsigned int lim;
	atomic_t *dcnt;
1726 1727 1728 1729 1730
	int err;
	struct sk_buff *skb;
	unsigned long time_limit = jiffies + 2;

	while (skb_queue_len(inputq)) {
1731 1732
		if (unlikely(time_after_eq(jiffies, time_limit)))
			return TIPC_OK;
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
		skb = tipc_skb_dequeue(inputq, dport);
		if (unlikely(!skb))
			return TIPC_OK;
		if (!sock_owned_by_user(sk)) {
			err = filter_rcv(sk, &skb);
			if (likely(!skb))
				continue;
			*_skb = skb;
			return err;
		}
		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
		if (sk->sk_backlog.len)
			atomic_set(dcnt, 0);
		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
		if (likely(!sk_add_backlog(sk, skb, lim)))
			continue;
		*_skb = skb;
1750
		return -TIPC_ERR_OVERLOAD;
1751
	}
1752 1753 1754
	return TIPC_OK;
}

1755
/**
1756 1757 1758 1759 1760 1761
 * tipc_sk_rcv - handle a chain of incoming buffers
 * @inputq: buffer list containing the buffers
 * Consumes all buffers in list until inputq is empty
 * Note: may be called in multiple threads referring to the same queue
 * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
 * Only node local calls check the return value, sending single-buffer queues
1762
 */
1763
int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1764
{
1765 1766 1767
	u32 dnode, dport = 0;
	int err = -TIPC_ERR_NO_PORT;
	struct sk_buff *skb;
1768
	struct tipc_sock *tsk;
1769
	struct tipc_net *tn;
1770 1771
	struct sock *sk;

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
	while (skb_queue_len(inputq)) {
		skb = NULL;
		dport = tipc_skb_peek_port(inputq, dport);
		tsk = tipc_sk_lookup(net, dport);
		if (likely(tsk)) {
			sk = &tsk->sk;
			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
				err = tipc_sk_enqueue(inputq, sk, dport, &skb);
				spin_unlock_bh(&sk->sk_lock.slock);
				dport = 0;
			}
			sock_put(sk);
		} else {
			skb = tipc_skb_dequeue(inputq, dport);
		}
		if (likely(!skb))
			continue;
		if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
			goto xmit;
		if (!err) {
			dnode = msg_destnode(buf_msg(skb));
			goto xmit;
		}
		tn = net_generic(net, tipc_net_id);
		if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
			continue;
1798
xmit:
1799 1800
		tipc_link_xmit_skb(net, skb, dnode, dport);
	}
1801
	return err ? -EHOSTUNREACH : 0;
P
Per Liden 已提交
1802 1803
}

Y
Ying Xue 已提交
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int done;

	do {
		int err = sock_error(sk);
		if (err)
			return err;
		if (!*timeo_p)
			return -ETIMEDOUT;
		if (signal_pending(current))
			return sock_intr_errno(*timeo_p);

		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
		done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
		finish_wait(sk_sleep(sk), &wait);
	} while (!done);
	return 0;
}

P
Per Liden 已提交
1826
/**
1827
 * tipc_connect - establish a connection to another TIPC port
P
Per Liden 已提交
1828 1829 1830
 * @sock: socket structure
 * @dest: socket address for destination port
 * @destlen: size of socket address data structure
1831
 * @flags: file-related flags associated with socket
P
Per Liden 已提交
1832 1833 1834
 *
 * Returns 0 on success, errno otherwise
 */
1835 1836
static int tipc_connect(struct socket *sock, struct sockaddr *dest,
			int destlen, int flags)
P
Per Liden 已提交
1837
{
1838
	struct sock *sk = sock->sk;
1839
	struct tipc_sock *tsk = tipc_sk(sk);
1840 1841
	struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
	struct msghdr m = {NULL,};
1842
	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
Y
Ying Xue 已提交
1843
	socket_state previous;
1844
	int res = 0;
1845

1846 1847
	lock_sock(sk);

1848
	/* DGRAM/RDM connect(), just save the destaddr */
1849
	if (sock->state == SS_READY) {
1850 1851 1852 1853 1854 1855 1856
		if (dst->family == AF_UNSPEC) {
			memset(&tsk->remote, 0, sizeof(struct sockaddr_tipc));
			tsk->connected = 0;
		} else {
			memcpy(&tsk->remote, dest, destlen);
			tsk->connected = 1;
		}
1857 1858
		goto exit;
	}
1859 1860 1861 1862 1863 1864 1865

	/*
	 * Reject connection attempt using multicast address
	 *
	 * Note: send_msg() validates the rest of the address fields,
	 *       so there's no need to do it here
	 */
1866 1867 1868 1869 1870
	if (dst->addrtype == TIPC_ADDR_MCAST) {
		res = -EINVAL;
		goto exit;
	}

Y
Ying Xue 已提交
1871
	previous = sock->state;
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
	switch (sock->state) {
	case SS_UNCONNECTED:
		/* Send a 'SYN-' to destination */
		m.msg_name = dest;
		m.msg_namelen = destlen;

		/* If connect is in non-blocking case, set MSG_DONTWAIT to
		 * indicate send_msg() is never blocked.
		 */
		if (!timeout)
			m.msg_flags = MSG_DONTWAIT;

1884
		res = __tipc_sendmsg(sock, &m, 0);
1885 1886 1887 1888 1889 1890 1891 1892 1893
		if ((res < 0) && (res != -EWOULDBLOCK))
			goto exit;

		/* Just entered SS_CONNECTING state; the only
		 * difference is that return value in non-blocking
		 * case is EINPROGRESS, rather than EALREADY.
		 */
		res = -EINPROGRESS;
	case SS_CONNECTING:
Y
Ying Xue 已提交
1894 1895 1896 1897 1898 1899 1900
		if (previous == SS_CONNECTING)
			res = -EALREADY;
		if (!timeout)
			goto exit;
		timeout = msecs_to_jiffies(timeout);
		/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
		res = tipc_wait_for_connect(sock, &timeout);
1901 1902 1903 1904 1905 1906
		break;
	case SS_CONNECTED:
		res = -EISCONN;
		break;
	default:
		res = -EINVAL;
Y
Ying Xue 已提交
1907
		break;
1908
	}
1909 1910
exit:
	release_sock(sk);
1911
	return res;
P
Per Liden 已提交
1912 1913
}

1914
/**
1915
 * tipc_listen - allow socket to listen for incoming connections
P
Per Liden 已提交
1916 1917
 * @sock: socket structure
 * @len: (unused)
1918
 *
P
Per Liden 已提交
1919 1920
 * Returns 0 on success, errno otherwise
 */
1921
static int tipc_listen(struct socket *sock, int len)
P
Per Liden 已提交
1922
{
1923 1924 1925 1926
	struct sock *sk = sock->sk;
	int res;

	lock_sock(sk);
P
Per Liden 已提交
1927

1928
	if (sock->state != SS_UNCONNECTED)
1929 1930 1931 1932 1933 1934 1935 1936
		res = -EINVAL;
	else {
		sock->state = SS_LISTENING;
		res = 0;
	}

	release_sock(sk);
	return res;
P
Per Liden 已提交
1937 1938
}

Y
Ying Xue 已提交
1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
	struct sock *sk = sock->sk;
	DEFINE_WAIT(wait);
	int err;

	/* True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	*/
	for (;;) {
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
					  TASK_INTERRUPTIBLE);
1953
		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Y
Ying Xue 已提交
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
			release_sock(sk);
			timeo = schedule_timeout(timeo);
			lock_sock(sk);
		}
		err = 0;
		if (!skb_queue_empty(&sk->sk_receive_queue))
			break;
		err = -EINVAL;
		if (sock->state != SS_LISTENING)
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
1967 1968 1969
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
Y
Ying Xue 已提交
1970 1971 1972 1973 1974
	}
	finish_wait(sk_sleep(sk), &wait);
	return err;
}

1975
/**
1976
 * tipc_accept - wait for connection request
P
Per Liden 已提交
1977 1978 1979
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
1980
 *
P
Per Liden 已提交
1981 1982
 * Returns 0 on success, errno otherwise
 */
1983
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
P
Per Liden 已提交
1984
{
1985
	struct sock *new_sk, *sk = sock->sk;
P
Per Liden 已提交
1986
	struct sk_buff *buf;
1987
	struct tipc_sock *new_tsock;
1988
	struct tipc_msg *msg;
Y
Ying Xue 已提交
1989
	long timeo;
1990
	int res;
P
Per Liden 已提交
1991

1992
	lock_sock(sk);
P
Per Liden 已提交
1993

1994 1995
	if (sock->state != SS_LISTENING) {
		res = -EINVAL;
P
Per Liden 已提交
1996 1997
		goto exit;
	}
Y
Ying Xue 已提交
1998 1999 2000 2001
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	res = tipc_wait_for_accept(sock, timeo);
	if (res)
		goto exit;
2002 2003 2004

	buf = skb_peek(&sk->sk_receive_queue);

2005
	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2006 2007
	if (res)
		goto exit;
P
Per Liden 已提交
2008

2009
	new_sk = new_sock->sk;
2010
	new_tsock = tipc_sk(new_sk);
2011
	msg = buf_msg(buf);
P
Per Liden 已提交
2012

2013 2014 2015 2016 2017 2018 2019
	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
2020
	tsk_rej_rx_queue(new_sk);
2021 2022

	/* Connect new socket to it's peer */
2023
	tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2024 2025
	new_sock->state = SS_CONNECTED;

2026
	tsk_set_importance(new_tsock, msg_importance(msg));
2027
	if (msg_named(msg)) {
2028 2029
		new_tsock->conn_type = msg_nametype(msg);
		new_tsock->conn_instance = msg_nameinst(msg);
P
Per Liden 已提交
2030
	}
2031 2032 2033 2034 2035 2036 2037 2038

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

2039
		tsk_advance_rx_queue(sk);
2040
		__tipc_send_stream(new_sock, &m, 0);
2041 2042 2043
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
2044
		skb_set_owner_r(buf, new_sk);
2045 2046
	}
	release_sock(new_sk);
P
Per Liden 已提交
2047
exit:
2048
	release_sock(sk);
P
Per Liden 已提交
2049 2050 2051 2052
	return res;
}

/**
2053
 * tipc_shutdown - shutdown socket connection
P
Per Liden 已提交
2054
 * @sock: socket structure
2055
 * @how: direction to close (must be SHUT_RDWR)
P
Per Liden 已提交
2056 2057
 *
 * Terminates connection (if necessary), then purges socket's receive queue.
2058
 *
P
Per Liden 已提交
2059 2060
 * Returns 0 on success, errno otherwise
 */
2061
static int tipc_shutdown(struct socket *sock, int how)
P
Per Liden 已提交
2062
{
2063
	struct sock *sk = sock->sk;
2064
	struct net *net = sock_net(sk);
2065
	struct tipc_sock *tsk = tipc_sk(sk);
2066
	struct sk_buff *skb;
2067
	u32 dnode;
P
Per Liden 已提交
2068 2069
	int res;

2070 2071
	if (how != SHUT_RDWR)
		return -EINVAL;
P
Per Liden 已提交
2072

2073
	lock_sock(sk);
P
Per Liden 已提交
2074 2075

	switch (sock->state) {
2076
	case SS_CONNECTING:
P
Per Liden 已提交
2077 2078 2079
	case SS_CONNECTED:

restart:
2080
		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2081 2082 2083 2084
		skb = __skb_dequeue(&sk->sk_receive_queue);
		if (skb) {
			if (TIPC_SKB_CB(skb)->handle != NULL) {
				kfree_skb(skb);
P
Per Liden 已提交
2085 2086
				goto restart;
			}
2087
			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
2088
					     TIPC_CONN_SHUTDOWN))
2089 2090
				tipc_link_xmit_skb(net, skb, dnode,
						   tsk->portid);
2091
		} else {
2092
			dnode = tsk_peer_node(tsk);
2093 2094

			skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2095
					      TIPC_CONN_MSG, SHORT_H_SIZE,
2096
					      0, dnode, tsk_own_node(tsk),
2097
					      tsk_peer_port(tsk),
2098
					      tsk->portid, TIPC_CONN_SHUTDOWN);
2099
			tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
P
Per Liden 已提交
2100
		}
2101
		tsk->connected = 0;
2102
		sock->state = SS_DISCONNECTING;
2103
		tipc_node_remove_conn(net, dnode, tsk->portid);
P
Per Liden 已提交
2104 2105 2106 2107
		/* fall through */

	case SS_DISCONNECTING:

2108
		/* Discard any unreceived messages */
2109
		__skb_queue_purge(&sk->sk_receive_queue);
2110 2111 2112

		/* Wake up anyone sleeping in poll */
		sk->sk_state_change(sk);
P
Per Liden 已提交
2113 2114 2115 2116 2117 2118 2119
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

2120
	release_sock(sk);
P
Per Liden 已提交
2121 2122 2123
	return res;
}

2124
static void tipc_sk_timeout(unsigned long data)
2125
{
2126 2127
	struct tipc_sock *tsk = (struct tipc_sock *)data;
	struct sock *sk = &tsk->sk;
2128
	struct sk_buff *skb = NULL;
2129
	u32 peer_port, peer_node;
2130
	u32 own_node = tsk_own_node(tsk);
2131

J
Jon Paul Maloy 已提交
2132
	bh_lock_sock(sk);
2133
	if (!tsk->connected) {
J
Jon Paul Maloy 已提交
2134 2135
		bh_unlock_sock(sk);
		goto exit;
2136
	}
2137 2138
	peer_port = tsk_peer_port(tsk);
	peer_node = tsk_peer_node(tsk);
2139

2140
	if (tsk->probing_state == TIPC_CONN_PROBING) {
2141
		/* Previous probe not answered -> self abort */
2142
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2143
				      TIPC_CONN_MSG, SHORT_H_SIZE, 0,
2144
				      own_node, peer_node, tsk->portid,
2145
				      peer_port, TIPC_ERR_NO_PORT);
2146
	} else {
2147 2148
		skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
				      INT_H_SIZE, 0, peer_node, own_node,
2149
				      peer_port, tsk->portid, TIPC_OK);
2150
		tsk->probing_state = TIPC_CONN_PROBING;
2151
		sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2152 2153
	}
	bh_unlock_sock(sk);
2154
	if (skb)
2155
		tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
J
Jon Paul Maloy 已提交
2156
exit:
2157
	sock_put(sk);
2158 2159
}

2160
static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2161 2162
			   struct tipc_name_seq const *seq)
{
2163
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2164 2165 2166
	struct publication *publ;
	u32 key;

2167
	if (tsk->connected)
J
Jon Paul Maloy 已提交
2168
		return -EINVAL;
2169 2170
	key = tsk->portid + tsk->pub_count + 1;
	if (key == tsk->portid)
J
Jon Paul Maloy 已提交
2171 2172
		return -EADDRINUSE;

2173
	publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2174
				    scope, tsk->portid, key);
J
Jon Paul Maloy 已提交
2175 2176 2177
	if (unlikely(!publ))
		return -EINVAL;

2178 2179 2180
	list_add(&publ->pport_list, &tsk->publications);
	tsk->pub_count++;
	tsk->published = 1;
J
Jon Paul Maloy 已提交
2181 2182 2183
	return 0;
}

2184
static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
J
Jon Paul Maloy 已提交
2185 2186
			    struct tipc_name_seq const *seq)
{
2187
	struct net *net = sock_net(&tsk->sk);
J
Jon Paul Maloy 已提交
2188 2189 2190 2191
	struct publication *publ;
	struct publication *safe;
	int rc = -EINVAL;

2192
	list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
J
Jon Paul Maloy 已提交
2193 2194 2195 2196 2197 2198 2199 2200 2201
		if (seq) {
			if (publ->scope != scope)
				continue;
			if (publ->type != seq->type)
				continue;
			if (publ->lower != seq->lower)
				continue;
			if (publ->upper != seq->upper)
				break;
2202
			tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2203 2204 2205 2206
					      publ->ref, publ->key);
			rc = 0;
			break;
		}
2207
		tipc_nametbl_withdraw(net, publ->type, publ->lower,
J
Jon Paul Maloy 已提交
2208 2209 2210
				      publ->ref, publ->key);
		rc = 0;
	}
2211 2212
	if (list_empty(&tsk->publications))
		tsk->published = 0;
J
Jon Paul Maloy 已提交
2213 2214 2215
	return rc;
}

2216 2217 2218
/* tipc_sk_reinit: set non-zero address in all existing sockets
 *                 when we go from standalone to network mode.
 */
2219
void tipc_sk_reinit(struct net *net)
2220
{
2221
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2222 2223 2224
	const struct bucket_table *tbl;
	struct rhash_head *pos;
	struct tipc_sock *tsk;
2225
	struct tipc_msg *msg;
2226
	int i;
2227

2228
	rcu_read_lock();
2229
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2230 2231 2232 2233
	for (i = 0; i < tbl->size; i++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
			spin_lock_bh(&tsk->sk.sk_lock.slock);
			msg = &tsk->phdr;
2234 2235
			msg_set_prevnode(msg, tn->own_addr);
			msg_set_orignode(msg, tn->own_addr);
2236 2237
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2238
	}
2239
	rcu_read_unlock();
2240 2241
}

2242
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2243
{
2244
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2245
	struct tipc_sock *tsk;
2246

2247
	rcu_read_lock();
2248
	tsk = rhashtable_lookup(&tn->sk_rht, &portid);
2249 2250 2251
	if (tsk)
		sock_hold(&tsk->sk);
	rcu_read_unlock();
2252

2253
	return tsk;
2254 2255
}

2256
static int tipc_sk_insert(struct tipc_sock *tsk)
2257
{
2258 2259 2260
	struct sock *sk = &tsk->sk;
	struct net *net = sock_net(sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2261 2262
	u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
	u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2263

2264 2265 2266 2267 2268 2269
	while (remaining--) {
		portid++;
		if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
			portid = TIPC_MIN_PORT;
		tsk->portid = portid;
		sock_hold(&tsk->sk);
2270
		if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
2271 2272
			return 0;
		sock_put(&tsk->sk);
2273 2274
	}

2275
	return -1;
2276 2277
}

2278
static void tipc_sk_remove(struct tipc_sock *tsk)
2279
{
2280
	struct sock *sk = &tsk->sk;
2281
	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2282

2283
	if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
2284 2285
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
2286 2287 2288
	}
}

2289
int tipc_sk_rht_init(struct net *net)
2290
{
2291
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2292 2293 2294 2295 2296 2297
	struct rhashtable_params rht_params = {
		.nelem_hint = 192,
		.head_offset = offsetof(struct tipc_sock, node),
		.key_offset = offsetof(struct tipc_sock, portid),
		.key_len = sizeof(u32), /* portid */
		.hashfn = jhash,
2298 2299
		.max_size = 1048576,
		.min_size = 256,
2300
	};
2301

2302
	return rhashtable_init(&tn->sk_rht, &rht_params);
2303 2304
}

2305
void tipc_sk_rht_destroy(struct net *net)
2306
{
2307 2308
	struct tipc_net *tn = net_generic(net, tipc_net_id);

2309 2310
	/* Wait for socket readers to complete */
	synchronize_net();
2311

2312
	rhashtable_destroy(&tn->sk_rht);
2313 2314
}

P
Per Liden 已提交
2315
/**
2316
 * tipc_setsockopt - set socket option
P
Per Liden 已提交
2317 2318 2319 2320 2321
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: pointer to new option value
 * @ol: length of option value
2322 2323
 *
 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
P
Per Liden 已提交
2324
 * (to ease compatibility).
2325
 *
P
Per Liden 已提交
2326 2327
 * Returns 0 on success, errno otherwise
 */
2328 2329
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, unsigned int ol)
P
Per Liden 已提交
2330
{
2331
	struct sock *sk = sock->sk;
2332
	struct tipc_sock *tsk = tipc_sk(sk);
P
Per Liden 已提交
2333 2334 2335
	u32 value;
	int res;

2336 2337
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return 0;
P
Per Liden 已提交
2338 2339 2340 2341
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
	if (ol < sizeof(value))
		return -EINVAL;
2342 2343
	res = get_user(value, (u32 __user *)ov);
	if (res)
P
Per Liden 已提交
2344 2345
		return res;

2346
	lock_sock(sk);
2347

P
Per Liden 已提交
2348 2349
	switch (opt) {
	case TIPC_IMPORTANCE:
2350
		res = tsk_set_importance(tsk, value);
P
Per Liden 已提交
2351 2352 2353
		break;
	case TIPC_SRC_DROPPABLE:
		if (sock->type != SOCK_STREAM)
2354
			tsk_set_unreliable(tsk, value);
2355
		else
P
Per Liden 已提交
2356 2357 2358
			res = -ENOPROTOOPT;
		break;
	case TIPC_DEST_DROPPABLE:
2359
		tsk_set_unreturnable(tsk, value);
P
Per Liden 已提交
2360 2361
		break;
	case TIPC_CONN_TIMEOUT:
2362
		tipc_sk(sk)->conn_timeout = value;
2363
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2364 2365 2366 2367 2368
		break;
	default:
		res = -EINVAL;
	}

2369 2370
	release_sock(sk);

P
Per Liden 已提交
2371 2372 2373 2374
	return res;
}

/**
2375
 * tipc_getsockopt - get socket option
P
Per Liden 已提交
2376 2377 2378 2379 2380
 * @sock: socket structure
 * @lvl: option level
 * @opt: option identifier
 * @ov: receptacle for option value
 * @ol: receptacle for length of option value
2381 2382
 *
 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
P
Per Liden 已提交
2383
 * (to ease compatibility).
2384
 *
P
Per Liden 已提交
2385 2386
 * Returns 0 on success, errno otherwise
 */
2387 2388
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
			   char __user *ov, int __user *ol)
P
Per Liden 已提交
2389
{
2390
	struct sock *sk = sock->sk;
2391
	struct tipc_sock *tsk = tipc_sk(sk);
2392
	int len;
P
Per Liden 已提交
2393
	u32 value;
2394
	int res;
P
Per Liden 已提交
2395

2396 2397
	if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
		return put_user(0, ol);
P
Per Liden 已提交
2398 2399
	if (lvl != SOL_TIPC)
		return -ENOPROTOOPT;
2400 2401
	res = get_user(len, ol);
	if (res)
2402
		return res;
P
Per Liden 已提交
2403

2404
	lock_sock(sk);
P
Per Liden 已提交
2405 2406 2407

	switch (opt) {
	case TIPC_IMPORTANCE:
2408
		value = tsk_importance(tsk);
P
Per Liden 已提交
2409 2410
		break;
	case TIPC_SRC_DROPPABLE:
2411
		value = tsk_unreliable(tsk);
P
Per Liden 已提交
2412 2413
		break;
	case TIPC_DEST_DROPPABLE:
2414
		value = tsk_unreturnable(tsk);
P
Per Liden 已提交
2415 2416
		break;
	case TIPC_CONN_TIMEOUT:
2417
		value = tsk->conn_timeout;
2418
		/* no need to set "res", since already 0 at this point */
P
Per Liden 已提交
2419
		break;
2420
	case TIPC_NODE_RECVQ_DEPTH:
2421
		value = 0; /* was tipc_queue_size, now obsolete */
2422
		break;
2423
	case TIPC_SOCK_RECVQ_DEPTH:
2424 2425
		value = skb_queue_len(&sk->sk_receive_queue);
		break;
P
Per Liden 已提交
2426 2427 2428 2429
	default:
		res = -EINVAL;
	}

2430 2431
	release_sock(sk);

2432 2433
	if (res)
		return res;	/* "get" failed */
P
Per Liden 已提交
2434

2435 2436 2437 2438 2439 2440 2441
	if (len < sizeof(value))
		return -EINVAL;

	if (copy_to_user(ov, &value, sizeof(value)))
		return -EFAULT;

	return put_user(sizeof(value), ol);
P
Per Liden 已提交
2442 2443
}

2444
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
E
Erik Hugne 已提交
2445
{
2446
	struct sock *sk = sock->sk;
E
Erik Hugne 已提交
2447 2448 2449 2450 2451 2452 2453
	struct tipc_sioc_ln_req lnr;
	void __user *argp = (void __user *)arg;

	switch (cmd) {
	case SIOCGETLINKNAME:
		if (copy_from_user(&lnr, argp, sizeof(lnr)))
			return -EFAULT;
2454 2455
		if (!tipc_node_get_linkname(sock_net(sk),
					    lnr.bearer_id & 0xffff, lnr.peer,
E
Erik Hugne 已提交
2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
					    lnr.linkname, TIPC_MAX_LINK_NAME)) {
			if (copy_to_user(argp, &lnr, sizeof(lnr)))
				return -EFAULT;
			return 0;
		}
		return -EADDRNOTAVAIL;
	default:
		return -ENOIOCTLCMD;
	}
}

2467 2468
/* Protocol switches for the various types of TIPC sockets */

2469
static const struct proto_ops msg_ops = {
2470
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2471
	.family		= AF_TIPC,
2472 2473 2474
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2475
	.socketpair	= sock_no_socketpair,
2476
	.accept		= sock_no_accept,
2477 2478
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2479
	.ioctl		= tipc_ioctl,
2480
	.listen		= sock_no_listen,
2481 2482 2483 2484 2485
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_sendmsg,
	.recvmsg	= tipc_recvmsg,
2486 2487
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2488 2489
};

2490
static const struct proto_ops packet_ops = {
2491
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2492
	.family		= AF_TIPC,
2493 2494 2495
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2496
	.socketpair	= sock_no_socketpair,
2497 2498 2499
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2500
	.ioctl		= tipc_ioctl,
2501 2502 2503 2504 2505 2506
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_packet,
	.recvmsg	= tipc_recvmsg,
2507 2508
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2509 2510
};

2511
static const struct proto_ops stream_ops = {
2512
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2513
	.family		= AF_TIPC,
2514 2515 2516
	.release	= tipc_release,
	.bind		= tipc_bind,
	.connect	= tipc_connect,
2517
	.socketpair	= sock_no_socketpair,
2518 2519 2520
	.accept		= tipc_accept,
	.getname	= tipc_getname,
	.poll		= tipc_poll,
E
Erik Hugne 已提交
2521
	.ioctl		= tipc_ioctl,
2522 2523 2524 2525 2526 2527
	.listen		= tipc_listen,
	.shutdown	= tipc_shutdown,
	.setsockopt	= tipc_setsockopt,
	.getsockopt	= tipc_getsockopt,
	.sendmsg	= tipc_send_stream,
	.recvmsg	= tipc_recv_stream,
2528 2529
	.mmap		= sock_no_mmap,
	.sendpage	= sock_no_sendpage
P
Per Liden 已提交
2530 2531
};

2532
static const struct net_proto_family tipc_family_ops = {
2533
	.owner		= THIS_MODULE,
P
Per Liden 已提交
2534
	.family		= AF_TIPC,
2535
	.create		= tipc_sk_create
P
Per Liden 已提交
2536 2537 2538 2539 2540
};

static struct proto tipc_proto = {
	.name		= "TIPC",
	.owner		= THIS_MODULE,
2541 2542
	.obj_size	= sizeof(struct tipc_sock),
	.sysctl_rmem	= sysctl_tipc_rmem
P
Per Liden 已提交
2543 2544 2545
};

/**
2546
 * tipc_socket_init - initialize TIPC socket interface
2547
 *
P
Per Liden 已提交
2548 2549
 * Returns 0 on success, errno otherwise
 */
2550
int tipc_socket_init(void)
P
Per Liden 已提交
2551 2552 2553
{
	int res;

2554
	res = proto_register(&tipc_proto, 1);
P
Per Liden 已提交
2555
	if (res) {
2556
		pr_err("Failed to register TIPC protocol type\n");
P
Per Liden 已提交
2557 2558 2559 2560 2561
		goto out;
	}

	res = sock_register(&tipc_family_ops);
	if (res) {
2562
		pr_err("Failed to register TIPC socket type\n");
P
Per Liden 已提交
2563 2564 2565 2566 2567 2568 2569 2570
		proto_unregister(&tipc_proto);
		goto out;
	}
 out:
	return res;
}

/**
2571
 * tipc_socket_stop - stop TIPC socket interface
P
Per Liden 已提交
2572
 */
2573
void tipc_socket_stop(void)
P
Per Liden 已提交
2574 2575 2576 2577
{
	sock_unregister(tipc_family_ops.family);
	proto_unregister(&tipc_proto);
}
2578 2579

/* Caller should hold socket lock for the passed tipc socket. */
2580
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
{
	u32 peer_node;
	u32 peer_port;
	struct nlattr *nest;

	peer_node = tsk_peer_node(tsk);
	peer_port = tsk_peer_port(tsk);

	nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);

	if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
		goto msg_full;
	if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
		goto msg_full;

	if (tsk->conn_type != 0) {
		if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
			goto msg_full;
		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
			goto msg_full;
	}
	nla_nest_end(skb, nest);

	return 0;

msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2615 2616
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
			    struct tipc_sock *tsk)
2617 2618 2619 2620
{
	int err;
	void *hdr;
	struct nlattr *attrs;
2621 2622
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2623 2624

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2625
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2626 2627 2628 2629 2630 2631
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
	if (!attrs)
		goto genlmsg_cancel;
2632
	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2633
		goto attr_msg_cancel;
2634
	if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
		goto attr_msg_cancel;

	if (tsk->connected) {
		err = __tipc_nl_add_sk_con(skb, tsk);
		if (err)
			goto attr_msg_cancel;
	} else if (!list_empty(&tsk->publications)) {
		if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
			goto attr_msg_cancel;
	}
	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
	struct tipc_sock *tsk;
2662 2663
	const struct bucket_table *tbl;
	struct rhash_head *pos;
2664 2665
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2666 2667
	u32 tbl_id = cb->args[0];
	u32 prev_portid = cb->args[1];
2668

2669
	rcu_read_lock();
2670
	tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2671 2672
	for (; tbl_id < tbl->size; tbl_id++) {
		rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2673
			spin_lock_bh(&tsk->sk.sk_lock.slock);
2674 2675 2676 2677 2678
			if (prev_portid && prev_portid != tsk->portid) {
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				continue;
			}

2679
			err = __tipc_nl_add_sk(skb, cb, tsk);
2680 2681 2682 2683 2684 2685
			if (err) {
				prev_portid = tsk->portid;
				spin_unlock_bh(&tsk->sk.sk_lock.slock);
				goto out;
			}
			prev_portid = 0;
2686 2687
			spin_unlock_bh(&tsk->sk.sk_lock.slock);
		}
2688
	}
2689
out:
2690
	rcu_read_unlock();
2691 2692
	cb->args[0] = tbl_id;
	cb->args[1] = prev_portid;
2693 2694 2695

	return skb->len;
}
2696 2697

/* Caller should hold socket lock for the passed tipc socket. */
2698 2699 2700
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
				 struct netlink_callback *cb,
				 struct publication *publ)
2701 2702 2703 2704 2705
{
	void *hdr;
	struct nlattr *attrs;

	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2706
			  &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736
	if (!hdr)
		goto msg_cancel;

	attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
	if (!attrs)
		goto genlmsg_cancel;

	if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
		goto attr_msg_cancel;
	if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
		goto attr_msg_cancel;

	nla_nest_end(skb, attrs);
	genlmsg_end(skb, hdr);

	return 0;

attr_msg_cancel:
	nla_nest_cancel(skb, attrs);
genlmsg_cancel:
	genlmsg_cancel(skb, hdr);
msg_cancel:
	return -EMSGSIZE;
}

/* Caller should hold socket lock for the passed tipc socket. */
2737 2738 2739
static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
				  struct netlink_callback *cb,
				  struct tipc_sock *tsk, u32 *last_publ)
2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
{
	int err;
	struct publication *p;

	if (*last_publ) {
		list_for_each_entry(p, &tsk->publications, pport_list) {
			if (p->key == *last_publ)
				break;
		}
		if (p->key != *last_publ) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			*last_publ = 0;
			return -EPIPE;
		}
	} else {
		p = list_first_entry(&tsk->publications, struct publication,
				     pport_list);
	}

	list_for_each_entry_from(p, &tsk->publications, pport_list) {
		err = __tipc_nl_add_sk_publ(skb, cb, p);
		if (err) {
			*last_publ = p->key;
			return err;
		}
	}
	*last_publ = 0;

	return 0;
}

int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
2780
	u32 tsk_portid = cb->args[0];
2781 2782
	u32 last_publ = cb->args[1];
	u32 done = cb->args[2];
2783
	struct net *net = sock_net(skb->sk);
2784 2785
	struct tipc_sock *tsk;

2786
	if (!tsk_portid) {
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
		struct nlattr **attrs;
		struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];

		err = tipc_nlmsg_parse(cb->nlh, &attrs);
		if (err)
			return err;

		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
				       attrs[TIPC_NLA_SOCK],
				       tipc_nl_sock_policy);
		if (err)
			return err;

		if (!sock[TIPC_NLA_SOCK_REF])
			return -EINVAL;

2803
		tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2804 2805 2806 2807 2808
	}

	if (done)
		return 0;

2809
	tsk = tipc_sk_lookup(net, tsk_portid);
2810 2811 2812 2813 2814 2815 2816 2817
	if (!tsk)
		return -EINVAL;

	lock_sock(&tsk->sk);
	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
	if (!err)
		done = 1;
	release_sock(&tsk->sk);
2818
	sock_put(&tsk->sk);
2819

2820
	cb->args[0] = tsk_portid;
2821 2822 2823 2824 2825
	cb->args[1] = last_publ;
	cb->args[2] = done;

	return skb->len;
}