node.c 30.9 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/node.c: TIPC node management routines
3
 *
4
 * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
5
 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "link.h"
P
Per Liden 已提交
39 40
#include "node.h"
#include "name_distr.h"
41
#include "socket.h"
42
#include "bcast.h"
43
#include "discover.h"
P
Per Liden 已提交
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/* Node FSM states and events:
 */
enum {
	SELF_DOWN_PEER_DOWN    = 0xdd,
	SELF_UP_PEER_UP        = 0xaa,
	SELF_DOWN_PEER_LEAVING = 0xd1,
	SELF_UP_PEER_COMING    = 0xac,
	SELF_COMING_PEER_UP    = 0xca,
	SELF_LEAVING_PEER_DOWN = 0x1d,
	NODE_FAILINGOVER       = 0xf0,
	NODE_SYNCHING          = 0xcc
};

enum {
	SELF_ESTABL_CONTACT_EVT = 0xece,
	SELF_LOST_CONTACT_EVT   = 0x1ce,
	PEER_ESTABL_CONTACT_EVT = 0x9ece,
	PEER_LOST_CONTACT_EVT   = 0x91ce,
	NODE_FAILOVER_BEGIN_EVT = 0xfbe,
	NODE_FAILOVER_END_EVT   = 0xfee,
	NODE_SYNCH_BEGIN_EVT    = 0xcbe,
	NODE_SYNCH_END_EVT      = 0xcee
};

static void tipc_node_link_down(struct tipc_node *n, int bearer_id);
70 71
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
72
static void tipc_node_delete(struct tipc_node *node);
73
static void tipc_node_timeout(unsigned long data);
74
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
P
Per Liden 已提交
75

76 77 78 79 80 81 82
struct tipc_sock_conn {
	u32 port;
	u32 peer_port;
	u32 peer_node;
	struct list_head list;
};

83 84 85 86 87 88
static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
	[TIPC_NLA_NODE_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_NODE_ADDR]		= { .type = NLA_U32 },
	[TIPC_NLA_NODE_UP]		= { .type = NLA_FLAG }
};

89 90 91 92 93 94
/*
 * A trivial power-of-two bitmask technique is used for speed, since this
 * operation is done for every incoming TIPC packet. The number of hash table
 * entries has been chosen so that no hash chain exceeds 8 nodes and will
 * usually be much smaller (typically only a single node).
 */
95
static unsigned int tipc_hashfn(u32 addr)
96 97 98 99
{
	return addr & (NODE_HTABLE_SIZE - 1);
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static void tipc_node_kref_release(struct kref *kref)
{
	struct tipc_node *node = container_of(kref, struct tipc_node, kref);

	tipc_node_delete(node);
}

void tipc_node_put(struct tipc_node *node)
{
	kref_put(&node->kref, tipc_node_kref_release);
}

static void tipc_node_get(struct tipc_node *node)
{
	kref_get(&node->kref);
}

117
/*
118 119
 * tipc_node_find - locate specified node object, if it exists
 */
120
struct tipc_node *tipc_node_find(struct net *net, u32 addr)
121
{
122
	struct tipc_net *tn = net_generic(net, tipc_net_id);
123 124
	struct tipc_node *node;

125
	if (unlikely(!in_own_cluster_exact(net, addr)))
126 127
		return NULL;

128
	rcu_read_lock();
129 130
	hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
				 hash) {
131
		if (node->addr == addr) {
132
			tipc_node_get(node);
133
			rcu_read_unlock();
134
			return node;
135
		}
136
	}
137
	rcu_read_unlock();
138 139 140
	return NULL;
}

141
struct tipc_node *tipc_node_create(struct net *net, u32 addr)
P
Per Liden 已提交
142
{
143
	struct tipc_net *tn = net_generic(net, tipc_net_id);
144
	struct tipc_node *n_ptr, *temp_node;
P
Per Liden 已提交
145

146
	spin_lock_bh(&tn->node_list_lock);
147 148 149
	n_ptr = tipc_node_find(net, addr);
	if (n_ptr)
		goto exit;
150
	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
151
	if (!n_ptr) {
152
		pr_warn("Node creation failed, no memory\n");
153
		goto exit;
154 155
	}
	n_ptr->addr = addr;
156
	n_ptr->net = net;
157
	kref_init(&n_ptr->kref);
158
	spin_lock_init(&n_ptr->lock);
159 160
	INIT_HLIST_NODE(&n_ptr->hash);
	INIT_LIST_HEAD(&n_ptr->list);
161
	INIT_LIST_HEAD(&n_ptr->publ_list);
162
	INIT_LIST_HEAD(&n_ptr->conn_sks);
163
	skb_queue_head_init(&n_ptr->bclink.namedq);
J
Jon Paul Maloy 已提交
164
	__skb_queue_head_init(&n_ptr->bclink.deferdq);
165 166
	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
167 168 169
		if (n_ptr->addr < temp_node->addr)
			break;
	}
170
	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
171
	n_ptr->state = SELF_DOWN_PEER_LEAVING;
172
	n_ptr->signature = INVALID_NODE_SIG;
173 174
	n_ptr->active_links[0] = INVALID_BEARER_ID;
	n_ptr->active_links[1] = INVALID_BEARER_ID;
175
	tipc_node_get(n_ptr);
176 177
	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
	n_ptr->keepalive_intv = U32_MAX;
178
exit:
179
	spin_unlock_bh(&tn->node_list_lock);
P
Per Liden 已提交
180 181 182
	return n_ptr;
}

183 184 185 186 187 188 189 190 191 192 193 194 195 196
static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
{
	unsigned long tol = l->tolerance;
	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
	unsigned long keepalive_intv = msecs_to_jiffies(intv);

	/* Link with lowest tolerance determines timer interval */
	if (keepalive_intv < n->keepalive_intv)
		n->keepalive_intv = keepalive_intv;

	/* Ensure link's abort limit corresponds to current interval */
	l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
}

197
static void tipc_node_delete(struct tipc_node *node)
P
Per Liden 已提交
198
{
199 200 201
	list_del_rcu(&node->list);
	hlist_del_rcu(&node->hash);
	kfree_rcu(node, rcu);
P
Per Liden 已提交
202 203
}

204
void tipc_node_stop(struct net *net)
205
{
206
	struct tipc_net *tn = net_generic(net, tipc_net_id);
207 208
	struct tipc_node *node, *t_node;

209
	spin_lock_bh(&tn->node_list_lock);
210 211 212
	list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
		if (del_timer(&node->timer))
			tipc_node_put(node);
213
		tipc_node_put(node);
214
	}
215
	spin_unlock_bh(&tn->node_list_lock);
216 217
}

218
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
219 220 221
{
	struct tipc_node *node;
	struct tipc_sock_conn *conn;
222
	int err = 0;
223

224
	if (in_own_node(net, dnode))
225 226
		return 0;

227
	node = tipc_node_find(net, dnode);
228 229 230 231 232
	if (!node) {
		pr_warn("Connecting sock to node 0x%x failed\n", dnode);
		return -EHOSTUNREACH;
	}
	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
233 234 235 236
	if (!conn) {
		err = -EHOSTUNREACH;
		goto exit;
	}
237 238 239 240 241 242 243
	conn->peer_node = dnode;
	conn->port = port;
	conn->peer_port = peer_port;

	tipc_node_lock(node);
	list_add_tail(&conn->list, &node->conn_sks);
	tipc_node_unlock(node);
244 245 246
exit:
	tipc_node_put(node);
	return err;
247 248
}

249
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
250 251 252 253
{
	struct tipc_node *node;
	struct tipc_sock_conn *conn, *safe;

254
	if (in_own_node(net, dnode))
255 256
		return;

257
	node = tipc_node_find(net, dnode);
258 259 260 261 262 263 264 265 266 267 268
	if (!node)
		return;

	tipc_node_lock(node);
	list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
		if (port != conn->port)
			continue;
		list_del(&conn->list);
		kfree(conn);
	}
	tipc_node_unlock(node);
269
	tipc_node_put(node);
270 271
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
/* tipc_node_timeout - handle expiration of node timer
 */
static void tipc_node_timeout(unsigned long data)
{
	struct tipc_node *n = (struct tipc_node *)data;
	struct sk_buff_head xmitq;
	struct tipc_link *l;
	struct tipc_media_addr *maddr;
	int bearer_id;
	int rc = 0;

	__skb_queue_head_init(&xmitq);

	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
		tipc_node_lock(n);
		l = n->links[bearer_id].link;
		if (l) {
			/* Link tolerance may change asynchronously: */
			tipc_node_calculate_timer(n, l);
			rc = tipc_link_timeout(l, &xmitq);
			if (rc & TIPC_LINK_DOWN_EVT)
293
				tipc_node_link_down(n, bearer_id);
294 295 296 297 298 299 300 301 302 303
		}
		tipc_node_unlock(n);
		maddr = &n->links[bearer_id].maddr;
		tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
	}
	if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
		tipc_node_get(n);
	tipc_node_put(n);
}

P
Per Liden 已提交
304
/**
305
 * tipc_node_link_up - handle addition of link
306
 *
P
Per Liden 已提交
307 308
 * Link becomes active (alone or shared) or standby, depending on its priority.
 */
309 310
static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
			      struct sk_buff_head *xmitq)
P
Per Liden 已提交
311
{
312 313
	int *slot0 = &n->active_links[0];
	int *slot1 = &n->active_links[1];
314 315
	struct tipc_link *ol = node_active_link(n, 0);
	struct tipc_link *nl = n->links[bearer_id].link;
316

317 318 319 320
	if (n->working_links > 1) {
		pr_warn("Attempt to establish 3rd link to %x\n", n->addr);
		return;
	}
321 322
	n->working_links++;
	n->action_flags |= TIPC_NOTIFY_LINK_UP;
323 324 325 326
	n->link_id = nl->peer_bearer_id << 16 | bearer_id;

	/* Leave room for tunnel header when returning 'mtu' to users: */
	n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
Y
Ying Xue 已提交
327

328 329
	tipc_bearer_add_dest(n->net, bearer_id, n->addr);

330
	pr_debug("Established link <%s> on network plane %c\n",
331
		 nl->name, nl->net_plane);
332

333 334
	/* First link? => give it both slots */
	if (!ol) {
335 336
		*slot0 = bearer_id;
		*slot1 = bearer_id;
337
		nl->exec_mode = TIPC_LINK_OPEN;
338
		tipc_link_build_bcast_sync_msg(nl, xmitq);
339 340
		node_established_contact(n);
		return;
P
Per Liden 已提交
341
	}
342

343 344 345
	/* Second link => redistribute slots */
	if (nl->priority > ol->priority) {
		pr_debug("Old link <%s> becomes standby\n", ol->name);
346
		*slot0 = bearer_id;
347 348 349 350 351
		*slot1 = bearer_id;
	} else if (nl->priority == ol->priority) {
		*slot0 = bearer_id;
	} else {
		pr_debug("New link <%s> is standby\n", nl->name);
P
Per Liden 已提交
352 353
	}

354 355
	/* Prepare synchronization with first link */
	tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
P
Per Liden 已提交
356 357 358
}

/**
359
 * tipc_node_link_down - handle loss of link
P
Per Liden 已提交
360
 */
361
static void tipc_node_link_down(struct tipc_node *n, int bearer_id)
P
Per Liden 已提交
362
{
363 364
	int *slot0 = &n->active_links[0];
	int *slot1 = &n->active_links[1];
365
	struct tipc_media_addr *maddr = &n->links[bearer_id].maddr;
366
	int i, highest = 0;
367 368
	struct tipc_link *l, *_l, *tnl;
	struct sk_buff_head xmitq;
P
Per Liden 已提交
369

370
	l = n->links[bearer_id].link;
371 372 373
	if (!l || !tipc_link_is_up(l))
		return;

374 375
	__skb_queue_head_init(&xmitq);

376 377
	n->working_links--;
	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
378
	n->link_id = l->peer_bearer_id << 16 | bearer_id;
379

380 381
	tipc_bearer_remove_dest(n->net, l->bearer_id, n->addr);

382
	pr_debug("Lost link <%s> on network plane %c\n",
383
		 l->name, l->net_plane);
384

385 386 387 388 389 390 391
	/* Select new active link if any available */
	*slot0 = INVALID_BEARER_ID;
	*slot1 = INVALID_BEARER_ID;
	for (i = 0; i < MAX_BEARERS; i++) {
		_l = n->links[i].link;
		if (!_l || !tipc_link_is_up(_l))
			continue;
392 393
		if (_l == l)
			continue;
394 395 396 397 398 399 400 401 402 403
		if (_l->priority < highest)
			continue;
		if (_l->priority > highest) {
			highest = _l->priority;
			*slot0 = i;
			*slot1 = i;
			continue;
		}
		*slot1 = i;
	}
404

405 406 407 408 409
	if (!tipc_node_is_up(n)) {
		tipc_link_reset(l);
		node_lost_contact(n);
		return;
	}
410

411 412 413 414 415
	/* There is still a working link => initiate failover */
	tnl = node_active_link(n, 0);
	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
	n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, &xmitq);
416
	tipc_link_reset(l);
417
	tipc_bearer_xmit(n->net, tnl->bearer_id, &xmitq, maddr);
P
Per Liden 已提交
418 419
}

420
bool tipc_node_is_up(struct tipc_node *n)
P
Per Liden 已提交
421
{
422
	return n->active_links[0] != INVALID_BEARER_ID;
P
Per Liden 已提交
423 424
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
void tipc_node_check_dest(struct tipc_node *n, struct tipc_bearer *b,
			  bool *link_up, bool *addr_match,
			  struct tipc_media_addr *maddr)
{
	struct tipc_link *l = n->links[b->identity].link;
	struct tipc_media_addr *curr = &n->links[b->identity].maddr;

	*link_up = l && tipc_link_is_up(l);
	*addr_match = l && !memcmp(curr, maddr, sizeof(*maddr));
}

bool tipc_node_update_dest(struct tipc_node *n,  struct tipc_bearer *b,
			   struct tipc_media_addr *maddr)
{
	struct tipc_link *l = n->links[b->identity].link;
	struct tipc_media_addr *curr = &n->links[b->identity].maddr;
441
	struct sk_buff_head *inputq = &n->links[b->identity].inputq;
442

443
	if (!l) {
444
		l = tipc_link_create(n, b, maddr, inputq, &n->bclink.namedq);
445 446 447 448 449 450 451 452
		if (!l)
			return false;
		tipc_node_calculate_timer(n, l);
		if (n->link_cnt == 1) {
			if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
				tipc_node_get(n);
		}
	}
453 454
	memcpy(&l->media_addr, maddr, sizeof(*maddr));
	memcpy(curr, maddr, sizeof(*maddr));
455
	tipc_node_link_down(n, b->identity);
456 457 458
	return true;
}

459 460 461 462 463 464 465 466 467 468 469
void tipc_node_delete_links(struct net *net, int bearer_id)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *l;
	struct tipc_node *n;

	rcu_read_lock();
	list_for_each_entry_rcu(n, &tn->node_list, list) {
		tipc_node_lock(n);
		l = n->links[bearer_id].link;
		if (l) {
470
			tipc_node_link_down(n, bearer_id);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
			n->links[bearer_id].link = NULL;
			n->link_cnt--;
		}
		tipc_node_unlock(n);
		kfree(l);
	}
	rcu_read_unlock();
}

static void tipc_node_reset_links(struct tipc_node *n)
{
	char addr_string[16];
	u32 i;

	tipc_node_lock(n);

	pr_warn("Resetting all links to %s\n",
		tipc_addr_string_fill(addr_string, n->addr));

	for (i = 0; i < MAX_BEARERS; i++) {
491 492 493
		if (!n->links[i].link)
			continue;
		tipc_node_link_down(n, i);
494 495 496 497
	}
	tipc_node_unlock(n);
}

498
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
P
Per Liden 已提交
499
{
500
	n_ptr->links[l_ptr->bearer_id].link = l_ptr;
501
	n_ptr->link_cnt++;
P
Per Liden 已提交
502 503
}

504
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
P
Per Liden 已提交
505
{
506 507 508
	int i;

	for (i = 0; i < MAX_BEARERS; i++) {
509
		if (l_ptr != n_ptr->links[i].link)
J
Jon Paul Maloy 已提交
510
			continue;
511
		n_ptr->links[i].link = NULL;
J
Jon Paul Maloy 已提交
512
		n_ptr->link_cnt--;
513
	}
P
Per Liden 已提交
514 515
}

516 517 518
/* tipc_node_fsm_evt - node finite state machine
 * Determines when contact is allowed with peer node
 */
519
static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
{
	int state = n->state;

	switch (state) {
	case SELF_DOWN_PEER_DOWN:
		switch (evt) {
		case SELF_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_COMING;
			break;
		case PEER_ESTABL_CONTACT_EVT:
			state = SELF_COMING_PEER_UP;
			break;
		case SELF_LOST_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
J
Jon Paul Maloy 已提交
535 536 537 538
		case NODE_SYNCH_END_EVT:
		case NODE_SYNCH_BEGIN_EVT:
		case NODE_FAILOVER_BEGIN_EVT:
		case NODE_FAILOVER_END_EVT:
539
		default:
J
Jon Paul Maloy 已提交
540
			goto illegal_evt;
541 542 543 544 545 546 547 548 549 550
		}
		break;
	case SELF_UP_PEER_UP:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
J
Jon Paul Maloy 已提交
551 552 553 554 555 556
		case NODE_SYNCH_BEGIN_EVT:
			state = NODE_SYNCHING;
			break;
		case NODE_FAILOVER_BEGIN_EVT:
			state = NODE_FAILINGOVER;
			break;
557 558
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
J
Jon Paul Maloy 已提交
559 560
		case NODE_SYNCH_END_EVT:
		case NODE_FAILOVER_END_EVT:
561 562
			break;
		default:
J
Jon Paul Maloy 已提交
563
			goto illegal_evt;
564 565 566 567 568 569 570 571 572 573 574
		}
		break;
	case SELF_DOWN_PEER_LEAVING:
		switch (evt) {
		case PEER_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
		case SELF_LOST_CONTACT_EVT:
			break;
J
Jon Paul Maloy 已提交
575 576 577 578
		case NODE_SYNCH_END_EVT:
		case NODE_SYNCH_BEGIN_EVT:
		case NODE_FAILOVER_BEGIN_EVT:
		case NODE_FAILOVER_END_EVT:
579
		default:
J
Jon Paul Maloy 已提交
580
			goto illegal_evt;
581 582 583 584 585 586 587 588 589 590 591 592 593
		}
		break;
	case SELF_UP_PEER_COMING:
		switch (evt) {
		case PEER_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
J
Jon Paul Maloy 已提交
594 595 596 597
		case NODE_SYNCH_END_EVT:
		case NODE_SYNCH_BEGIN_EVT:
		case NODE_FAILOVER_BEGIN_EVT:
		case NODE_FAILOVER_END_EVT:
598
		default:
J
Jon Paul Maloy 已提交
599
			goto illegal_evt;
600 601 602 603 604 605 606 607 608 609 610 611 612
		}
		break;
	case SELF_COMING_PEER_UP:
		switch (evt) {
		case SELF_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case SELF_LOST_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
J
Jon Paul Maloy 已提交
613 614 615 616
		case NODE_SYNCH_END_EVT:
		case NODE_SYNCH_BEGIN_EVT:
		case NODE_FAILOVER_BEGIN_EVT:
		case NODE_FAILOVER_END_EVT:
617
		default:
J
Jon Paul Maloy 已提交
618
			goto illegal_evt;
619 620 621 622 623 624 625 626 627 628 629
		}
		break;
	case SELF_LEAVING_PEER_DOWN:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
J
Jon Paul Maloy 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
		case NODE_SYNCH_END_EVT:
		case NODE_SYNCH_BEGIN_EVT:
		case NODE_FAILOVER_BEGIN_EVT:
		case NODE_FAILOVER_END_EVT:
		default:
			goto illegal_evt;
		}
		break;
	case NODE_FAILINGOVER:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case NODE_FAILOVER_END_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case NODE_FAILOVER_BEGIN_EVT:
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
		case NODE_SYNCH_BEGIN_EVT:
		case NODE_SYNCH_END_EVT:
655
		default:
J
Jon Paul Maloy 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
			goto illegal_evt;
		}
		break;
	case NODE_SYNCHING:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case NODE_SYNCH_END_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case NODE_FAILOVER_BEGIN_EVT:
			state = NODE_FAILINGOVER;
			break;
		case NODE_SYNCH_BEGIN_EVT:
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
		case NODE_FAILOVER_END_EVT:
		default:
			goto illegal_evt;
680 681 682 683 684 685 686
		}
		break;
	default:
		pr_err("Unknown node fsm state %x\n", state);
		break;
	}
	n->state = state;
J
Jon Paul Maloy 已提交
687 688 689 690
	return;

illegal_evt:
	pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
691 692
}

693
bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
694 695 696 697 698
{
	int state = n->state;

	if (likely(state == SELF_UP_PEER_UP))
		return true;
699

700 701
	if (state == SELF_LEAVING_PEER_DOWN)
		return false;
702 703

	if (state == SELF_DOWN_PEER_LEAVING) {
704
		if (msg_peer_node_is_up(hdr))
705 706
			return false;
	}
707 708

	return true;
709 710
}

711
static void node_established_contact(struct tipc_node *n_ptr)
P
Per Liden 已提交
712
{
713
	tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
Y
Ying Xue 已提交
714
	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
715
	n_ptr->bclink.oos_state = 0;
716 717
	n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
	tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
P
Per Liden 已提交
718 719
}

720
static void node_lost_contact(struct tipc_node *n_ptr)
P
Per Liden 已提交
721 722
{
	char addr_string[16];
723 724 725 726 727
	struct tipc_sock_conn *conn, *safe;
	struct list_head *conns = &n_ptr->conn_sks;
	struct sk_buff *skb;
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
	uint i;
P
Per Liden 已提交
728

729 730
	pr_debug("Lost contact with %s\n",
		 tipc_addr_string_fill(addr_string, n_ptr->addr));
731 732

	/* Flush broadcast link info associated with lost node */
733
	if (n_ptr->bclink.recv_permitted) {
J
Jon Paul Maloy 已提交
734
		__skb_queue_purge(&n_ptr->bclink.deferdq);
735

736 737 738
		if (n_ptr->bclink.reasm_buf) {
			kfree_skb(n_ptr->bclink.reasm_buf);
			n_ptr->bclink.reasm_buf = NULL;
739 740
		}

741
		tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
742
		tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
P
Per Liden 已提交
743

744
		n_ptr->bclink.recv_permitted = false;
745
	}
P
Per Liden 已提交
746

747
	/* Abort any ongoing link failover */
P
Per Liden 已提交
748
	for (i = 0; i < MAX_BEARERS; i++) {
749
		struct tipc_link *l_ptr = n_ptr->links[i].link;
750
		if (!l_ptr)
P
Per Liden 已提交
751
			continue;
752
		l_ptr->exec_mode = TIPC_LINK_OPEN;
753 754
		kfree_skb(l_ptr->failover_reasm_skb);
		l_ptr->failover_reasm_skb = NULL;
755
		tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
756
	}
757
	/* Prevent re-contact with node until cleanup is done */
758
	tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775

	/* Notify publications from this node */
	n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;

	/* Notify sockets connected to node */
	list_for_each_entry_safe(conn, safe, conns, list) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
				      SHORT_H_SIZE, 0, tn->own_addr,
				      conn->peer_node, conn->port,
				      conn->peer_port, TIPC_ERR_NO_NODE);
		if (likely(skb)) {
			skb_queue_tail(n_ptr->inputq, skb);
			n_ptr->action_flags |= TIPC_MSG_EVT;
		}
		list_del(&conn->list);
		kfree(conn);
	}
P
Per Liden 已提交
776 777
}

E
Erik Hugne 已提交
778 779 780 781 782 783 784 785 786
/**
 * tipc_node_get_linkname - get the name of a link
 *
 * @bearer_id: id of the bearer
 * @node: peer node address
 * @linkname: link name output buffer
 *
 * Returns 0 on success
 */
787 788
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
			   char *linkname, size_t len)
E
Erik Hugne 已提交
789 790
{
	struct tipc_link *link;
791
	int err = -EINVAL;
792
	struct tipc_node *node = tipc_node_find(net, addr);
E
Erik Hugne 已提交
793

794 795 796 797 798 799
	if (!node)
		return err;

	if (bearer_id >= MAX_BEARERS)
		goto exit;

E
Erik Hugne 已提交
800
	tipc_node_lock(node);
801
	link = node->links[bearer_id].link;
E
Erik Hugne 已提交
802 803
	if (link) {
		strncpy(linkname, link->name, len);
804
		err = 0;
E
Erik Hugne 已提交
805
	}
806
exit:
E
Erik Hugne 已提交
807
	tipc_node_unlock(node);
808 809
	tipc_node_put(node);
	return err;
E
Erik Hugne 已提交
810
}
811 812 813

void tipc_node_unlock(struct tipc_node *node)
{
814
	struct net *net = node->net;
815
	u32 addr = 0;
816
	u32 flags = node->action_flags;
Y
Ying Xue 已提交
817
	u32 link_id = 0;
818
	struct list_head *publ_list;
819
	struct sk_buff_head *inputq = node->inputq;
820
	struct sk_buff_head *namedq;
821

822 823
	if (likely(!flags || (flags == TIPC_MSG_EVT))) {
		node->action_flags = 0;
824
		spin_unlock_bh(&node->lock);
825 826
		if (flags == TIPC_MSG_EVT)
			tipc_sk_rcv(net, inputq);
827 828 829
		return;
	}

Y
Ying Xue 已提交
830 831
	addr = node->addr;
	link_id = node->link_id;
832
	namedq = node->namedq;
833
	publ_list = &node->publ_list;
Y
Ying Xue 已提交
834

835 836 837 838
	node->action_flags &= ~(TIPC_MSG_EVT |
				TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
				TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
839
				TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
Y
Ying Xue 已提交
840

841 842
	spin_unlock_bh(&node->lock);

843 844
	if (flags & TIPC_NOTIFY_NODE_DOWN)
		tipc_publ_notify(net, publ_list, addr);
845

846
	if (flags & TIPC_WAKEUP_BCAST_USERS)
847
		tipc_bclink_wakeup_users(net);
848

Y
Ying Xue 已提交
849
	if (flags & TIPC_NOTIFY_NODE_UP)
850
		tipc_named_node_up(net, addr);
Y
Ying Xue 已提交
851 852

	if (flags & TIPC_NOTIFY_LINK_UP)
853
		tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
Y
Ying Xue 已提交
854 855 856
				     TIPC_NODE_SCOPE, link_id, addr);

	if (flags & TIPC_NOTIFY_LINK_DOWN)
857
		tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
Y
Ying Xue 已提交
858
				      link_id, addr);
859 860 861 862 863 864

	if (flags & TIPC_MSG_EVT)
		tipc_sk_rcv(net, inputq);

	if (flags & TIPC_NAMED_MSG_EVT)
		tipc_named_rcv(net, namedq);
865 866 867

	if (flags & TIPC_BCAST_MSG_EVT)
		tipc_bclink_input(net);
868 869

	if (flags & TIPC_BCAST_RESET)
870
		tipc_node_reset_links(node);
871
}
872 873

/* Caller should hold node lock for the passed node */
874
static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
875 876 877 878
{
	void *hdr;
	struct nlattr *attrs;

879
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
			  NLM_F_MULTI, TIPC_NL_NODE_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
	if (!attrs)
		goto msg_full;

	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
		goto attr_msg_full;
	if (tipc_node_is_up(node))
		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
			goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
					       int *bearer_id,
					       struct tipc_media_addr **maddr)
{
	int id = n->active_links[sel & 1];

	if (unlikely(id < 0))
		return NULL;

	*bearer_id = id;
	*maddr = &n->links[id].maddr;
	return n->links[id].link;
}

/**
 * tipc_node_xmit() is the general link level function for message sending
 * @net: the applicable net namespace
 * @list: chain of buffers containing message
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
		   u32 dnode, int selector)
{
	struct tipc_link *l = NULL;
	struct tipc_node *n;
	struct sk_buff_head xmitq;
	struct tipc_media_addr *maddr;
	int bearer_id;
	int rc = -EHOSTUNREACH;

	__skb_queue_head_init(&xmitq);
	n = tipc_node_find(net, dnode);
	if (likely(n)) {
		tipc_node_lock(n);
		l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
		if (likely(l))
			rc = tipc_link_xmit(l, list, &xmitq);
		if (unlikely(rc == -ENOBUFS))
948
			tipc_node_link_down(n, bearer_id);
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
		tipc_node_unlock(n);
		tipc_node_put(n);
	}
	if (likely(!rc)) {
		tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
		return 0;
	}
	if (likely(in_own_node(net, dnode))) {
		tipc_sk_rcv(net, list);
		return 0;
	}
	return rc;
}

/* tipc_node_xmit_skb(): send single buffer to destination
 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
 * messages, which will not be rejected
 * The only exception is datagram messages rerouted after secondary
 * lookup, which are rare and safe to dispose of anyway.
 * TODO: Return real return value, and let callers use
 * tipc_wait_for_sendpkt() where applicable
 */
int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
		       u32 selector)
{
	struct sk_buff_head head;
	int rc;

	skb_queue_head_init(&head);
	__skb_queue_tail(&head, skb);
	rc = tipc_node_xmit(net, &head, dnode, selector);
	if (rc == -ELINKCONG)
		kfree_skb(skb);
	return 0;
}

985 986 987 988 989
/**
 * tipc_node_check_state - check and if necessary update node state
 * @skb: TIPC packet
 * @bearer_id: identity of bearer delivering the packet
 * Returns true if state is ok, otherwise consumes buffer and returns false
990
 */
991 992
static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
				  int bearer_id)
993 994
{
	struct tipc_msg *hdr = buf_msg(skb);
995 996
	int usr = msg_user(hdr);
	int mtyp = msg_type(hdr);
997
	u16 oseqno = msg_seqno(hdr);
998 999 1000 1001 1002 1003 1004
	u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
	u16 exp_pkts = msg_msgcnt(hdr);
	u16 rcv_nxt, syncpt, dlv_nxt;
	int state = n->state;
	struct tipc_link *l, *pl = NULL;
	struct sk_buff_head;
	int i;
1005

1006 1007 1008 1009
	l = n->links[bearer_id].link;
	if (!l)
		return false;
	rcv_nxt = l->rcv_nxt;
1010 1011


1012 1013
	if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
		return true;
1014

1015 1016 1017 1018 1019 1020 1021
	/* Find parallel link, if any */
	for (i = 0; i < MAX_BEARERS; i++) {
		if ((i != bearer_id) && n->links[i].link) {
			pl = n->links[i].link;
			break;
		}
	}
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	/* Update node accesibility if applicable */
	if (state == SELF_UP_PEER_COMING) {
		if (!tipc_link_is_up(l))
			return true;
		if (!msg_peer_link_is_up(hdr))
			return true;
		tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
	}

	if (state == SELF_DOWN_PEER_LEAVING) {
		if (msg_peer_node_is_up(hdr))
			return false;
		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
	}

	/* Ignore duplicate packets */
	if (less(oseqno, rcv_nxt))
		return true;

	/* Initiate or update failover mode if applicable */
	if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
		syncpt = oseqno + exp_pkts - 1;
		if (pl && tipc_link_is_up(pl)) {
			tipc_node_link_down(n, pl->bearer_id);
1047 1048
			pl->exec_mode = TIPC_LINK_BLOCKED;
		}
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		/* If pkts arrive out of order, use lowest calculated syncpt */
		if (less(syncpt, n->sync_point))
			n->sync_point = syncpt;
	}

	/* Open parallel link when tunnel link reaches synch point */
	if ((n->state == NODE_FAILINGOVER) && (more(rcv_nxt, n->sync_point))) {
		tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
		if (pl)
			pl->exec_mode = TIPC_LINK_OPEN;
		return true;
	}

	/* Initiate or update synch mode if applicable */
	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
		syncpt = iseqno + exp_pkts - 1;
		if (n->state == SELF_UP_PEER_UP) {
			n->sync_point = syncpt;
			tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
		}
		l->exec_mode = TIPC_LINK_TUNNEL;
		if (less(syncpt, n->sync_point))
			n->sync_point = syncpt;
1072
	}
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089

	/* Open tunnel link when parallel link reaches synch point */
	if ((n->state == NODE_SYNCHING) && (l->exec_mode == TIPC_LINK_TUNNEL)) {
		if (pl)
			dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
		if (!pl || more(dlv_nxt, n->sync_point)) {
			tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
			l->exec_mode = TIPC_LINK_OPEN;
			return true;
		}
		if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
			return true;
		if (usr == LINK_PROTOCOL)
			return true;
		return false;
	}
	return true;
1090 1091
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
/**
 * tipc_rcv - process TIPC packets/messages arriving from off-node
 * @net: the applicable net namespace
 * @skb: TIPC packet
 * @bearer: pointer to bearer message arrived on
 *
 * Invoked with no locks held. Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
{
	struct sk_buff_head xmitq;
	struct tipc_node *n;
1105 1106
	struct tipc_msg *hdr = buf_msg(skb);
	int usr = msg_user(hdr);
1107
	int bearer_id = b->identity;
1108
	struct tipc_link_entry *le;
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	int rc = 0;

	__skb_queue_head_init(&xmitq);

	/* Ensure message is well-formed */
	if (unlikely(!tipc_msg_validate(skb)))
		goto discard;

	/* Handle arrival of a non-unicast link packet */
	if (unlikely(msg_non_seq(hdr))) {
1119
		if (usr ==  LINK_CONFIG)
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
			tipc_disc_rcv(net, skb, b);
		else
			tipc_bclink_rcv(net, skb);
		return;
	}

	/* Locate neighboring node that sent packet */
	n = tipc_node_find(net, msg_prevnode(hdr));
	if (unlikely(!n))
		goto discard;
1130
	le = &n->links[bearer_id];
1131

1132
	tipc_node_lock(n);
1133

1134 1135
	/* Is reception permitted at the moment ? */
	if (!tipc_node_filter_pkt(n, hdr))
1136 1137
		goto unlock;

1138
	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1139 1140 1141 1142 1143 1144
		tipc_bclink_sync_state(n, hdr);

	/* Release acked broadcast messages */
	if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
		tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));

1145 1146 1147 1148 1149
	/* Check and if necessary update node state */
	if (likely(tipc_node_check_state(n, skb, bearer_id))) {
		rc = tipc_link_rcv(le->link, skb, &xmitq);
		skb = NULL;
	}
1150 1151

	if (unlikely(rc & TIPC_LINK_UP_EVT))
1152 1153
		tipc_node_link_up(n, bearer_id, &xmitq);

1154
	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
1155
		tipc_node_link_down(n, bearer_id);
1156 1157
unlock:
	tipc_node_unlock(n);
1158 1159 1160 1161 1162 1163 1164

	if (!skb_queue_empty(&le->inputq))
		tipc_sk_rcv(net, &le->inputq);

	if (!skb_queue_empty(&xmitq))
		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);

1165 1166 1167 1168 1169
	tipc_node_put(n);
discard:
	kfree_skb(skb);
}

1170 1171 1172
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
1173 1174
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	int done = cb->args[0];
	int last_addr = cb->args[1];
	struct tipc_node *node;
	struct tipc_nl_msg msg;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	if (last_addr) {
		node = tipc_node_find(net, last_addr);
		if (!node) {
			rcu_read_unlock();
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the NLMSG_DONE message having
			 * the NLM_F_DUMP_INTR flag set if the node state
			 * changed while we released the lock.
			 */
			cb->prev_seq = 1;
			return -EPIPE;
		}
		tipc_node_put(node);
1203 1204
	}

1205
	list_for_each_entry_rcu(node, &tn->node_list, list) {
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
		if (last_addr) {
			if (node->addr == last_addr)
				last_addr = 0;
			else
				continue;
		}

		tipc_node_lock(node);
		err = __tipc_nl_add_node(&msg, node);
		if (err) {
			last_addr = node->addr;
			tipc_node_unlock(node);
			goto out;
		}

		tipc_node_unlock(node);
	}
	done = 1;
out:
	cb->args[0] = done;
	cb->args[1] = last_addr;
	rcu_read_unlock();

	return skb->len;
}