node.c 26.7 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/node.c: TIPC node management routines
3
 *
4
 * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
5
 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "link.h"
P
Per Liden 已提交
39 40
#include "node.h"
#include "name_distr.h"
41
#include "socket.h"
42
#include "bcast.h"
43
#include "discover.h"
P
Per Liden 已提交
44

45 46
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
47
static void tipc_node_delete(struct tipc_node *node);
48
static void tipc_node_timeout(unsigned long data);
49
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
P
Per Liden 已提交
50

51 52 53 54 55 56 57
struct tipc_sock_conn {
	u32 port;
	u32 peer_port;
	u32 peer_node;
	struct list_head list;
};

58 59 60 61 62 63
static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
	[TIPC_NLA_NODE_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_NODE_ADDR]		= { .type = NLA_U32 },
	[TIPC_NLA_NODE_UP]		= { .type = NLA_FLAG }
};

64 65 66 67 68 69
/*
 * A trivial power-of-two bitmask technique is used for speed, since this
 * operation is done for every incoming TIPC packet. The number of hash table
 * entries has been chosen so that no hash chain exceeds 8 nodes and will
 * usually be much smaller (typically only a single node).
 */
70
static unsigned int tipc_hashfn(u32 addr)
71 72 73 74
{
	return addr & (NODE_HTABLE_SIZE - 1);
}

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static void tipc_node_kref_release(struct kref *kref)
{
	struct tipc_node *node = container_of(kref, struct tipc_node, kref);

	tipc_node_delete(node);
}

void tipc_node_put(struct tipc_node *node)
{
	kref_put(&node->kref, tipc_node_kref_release);
}

static void tipc_node_get(struct tipc_node *node)
{
	kref_get(&node->kref);
}

92
/*
93 94
 * tipc_node_find - locate specified node object, if it exists
 */
95
struct tipc_node *tipc_node_find(struct net *net, u32 addr)
96
{
97
	struct tipc_net *tn = net_generic(net, tipc_net_id);
98 99
	struct tipc_node *node;

100
	if (unlikely(!in_own_cluster_exact(net, addr)))
101 102
		return NULL;

103
	rcu_read_lock();
104 105
	hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
				 hash) {
106
		if (node->addr == addr) {
107
			tipc_node_get(node);
108
			rcu_read_unlock();
109
			return node;
110
		}
111
	}
112
	rcu_read_unlock();
113 114 115
	return NULL;
}

116
struct tipc_node *tipc_node_create(struct net *net, u32 addr)
P
Per Liden 已提交
117
{
118
	struct tipc_net *tn = net_generic(net, tipc_net_id);
119
	struct tipc_node *n_ptr, *temp_node;
P
Per Liden 已提交
120

121
	spin_lock_bh(&tn->node_list_lock);
122 123 124
	n_ptr = tipc_node_find(net, addr);
	if (n_ptr)
		goto exit;
125
	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
126
	if (!n_ptr) {
127
		pr_warn("Node creation failed, no memory\n");
128
		goto exit;
129 130
	}
	n_ptr->addr = addr;
131
	n_ptr->net = net;
132
	kref_init(&n_ptr->kref);
133
	spin_lock_init(&n_ptr->lock);
134 135
	INIT_HLIST_NODE(&n_ptr->hash);
	INIT_LIST_HEAD(&n_ptr->list);
136
	INIT_LIST_HEAD(&n_ptr->publ_list);
137
	INIT_LIST_HEAD(&n_ptr->conn_sks);
138
	skb_queue_head_init(&n_ptr->bclink.namedq);
J
Jon Paul Maloy 已提交
139
	__skb_queue_head_init(&n_ptr->bclink.deferdq);
140 141
	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
142 143 144
		if (n_ptr->addr < temp_node->addr)
			break;
	}
145
	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
146
	n_ptr->state = SELF_DOWN_PEER_LEAVING;
147
	n_ptr->signature = INVALID_NODE_SIG;
148 149
	n_ptr->active_links[0] = INVALID_BEARER_ID;
	n_ptr->active_links[1] = INVALID_BEARER_ID;
150
	tipc_node_get(n_ptr);
151 152
	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
	n_ptr->keepalive_intv = U32_MAX;
153
exit:
154
	spin_unlock_bh(&tn->node_list_lock);
P
Per Liden 已提交
155 156 157
	return n_ptr;
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171
static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
{
	unsigned long tol = l->tolerance;
	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
	unsigned long keepalive_intv = msecs_to_jiffies(intv);

	/* Link with lowest tolerance determines timer interval */
	if (keepalive_intv < n->keepalive_intv)
		n->keepalive_intv = keepalive_intv;

	/* Ensure link's abort limit corresponds to current interval */
	l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
}

172
static void tipc_node_delete(struct tipc_node *node)
P
Per Liden 已提交
173
{
174 175 176
	list_del_rcu(&node->list);
	hlist_del_rcu(&node->hash);
	kfree_rcu(node, rcu);
P
Per Liden 已提交
177 178
}

179
void tipc_node_stop(struct net *net)
180
{
181
	struct tipc_net *tn = net_generic(net, tipc_net_id);
182 183
	struct tipc_node *node, *t_node;

184
	spin_lock_bh(&tn->node_list_lock);
185 186 187
	list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
		if (del_timer(&node->timer))
			tipc_node_put(node);
188
		tipc_node_put(node);
189
	}
190
	spin_unlock_bh(&tn->node_list_lock);
191 192
}

193
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
194 195 196
{
	struct tipc_node *node;
	struct tipc_sock_conn *conn;
197
	int err = 0;
198

199
	if (in_own_node(net, dnode))
200 201
		return 0;

202
	node = tipc_node_find(net, dnode);
203 204 205 206 207
	if (!node) {
		pr_warn("Connecting sock to node 0x%x failed\n", dnode);
		return -EHOSTUNREACH;
	}
	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
208 209 210 211
	if (!conn) {
		err = -EHOSTUNREACH;
		goto exit;
	}
212 213 214 215 216 217 218
	conn->peer_node = dnode;
	conn->port = port;
	conn->peer_port = peer_port;

	tipc_node_lock(node);
	list_add_tail(&conn->list, &node->conn_sks);
	tipc_node_unlock(node);
219 220 221
exit:
	tipc_node_put(node);
	return err;
222 223
}

224
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
225 226 227 228
{
	struct tipc_node *node;
	struct tipc_sock_conn *conn, *safe;

229
	if (in_own_node(net, dnode))
230 231
		return;

232
	node = tipc_node_find(net, dnode);
233 234 235 236 237 238 239 240 241 242 243
	if (!node)
		return;

	tipc_node_lock(node);
	list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
		if (port != conn->port)
			continue;
		list_del(&conn->list);
		kfree(conn);
	}
	tipc_node_unlock(node);
244
	tipc_node_put(node);
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
/* tipc_node_timeout - handle expiration of node timer
 */
static void tipc_node_timeout(unsigned long data)
{
	struct tipc_node *n = (struct tipc_node *)data;
	struct sk_buff_head xmitq;
	struct tipc_link *l;
	struct tipc_media_addr *maddr;
	int bearer_id;
	int rc = 0;

	__skb_queue_head_init(&xmitq);

	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
		tipc_node_lock(n);
		l = n->links[bearer_id].link;
		if (l) {
			/* Link tolerance may change asynchronously: */
			tipc_node_calculate_timer(n, l);
			rc = tipc_link_timeout(l, &xmitq);
			if (rc & TIPC_LINK_DOWN_EVT)
268
				tipc_node_link_down(n, bearer_id);
269 270 271 272 273 274 275 276 277 278
		}
		tipc_node_unlock(n);
		maddr = &n->links[bearer_id].maddr;
		tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
	}
	if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
		tipc_node_get(n);
	tipc_node_put(n);
}

P
Per Liden 已提交
279
/**
280
 * tipc_node_link_up - handle addition of link
281
 *
P
Per Liden 已提交
282 283
 * Link becomes active (alone or shared) or standby, depending on its priority.
 */
284
void tipc_node_link_up(struct tipc_node *n, int bearer_id)
P
Per Liden 已提交
285
{
286 287 288 289
	int *slot0 = &n->active_links[0];
	int *slot1 = &n->active_links[1];
	struct tipc_link_entry *links = n->links;
	struct tipc_link *l = n->links[bearer_id].link;
P
Per Liden 已提交
290

291
	/* Leave room for tunnel header when returning 'mtu' to users: */
292
	links[bearer_id].mtu = l->mtu - INT_H_SIZE;
293 294 295 296

	n->working_links++;
	n->action_flags |= TIPC_NOTIFY_LINK_UP;
	n->link_id = l->peer_bearer_id << 16 | l->bearer_id;
Y
Ying Xue 已提交
297

298 299
	tipc_bearer_add_dest(n->net, bearer_id, n->addr);

300
	pr_debug("Established link <%s> on network plane %c\n",
301
		 l->name, l->net_plane);
302

303
	/* No active links ? => take both active slots */
304
	if (!tipc_node_is_up(n)) {
305 306
		*slot0 = bearer_id;
		*slot1 = bearer_id;
307 308
		node_established_contact(n);
		return;
P
Per Liden 已提交
309
	}
310 311 312

	/* Lower prio than current active ? => no slot */
	if (l->priority < links[*slot0].link->priority) {
313 314
		pr_debug("New link <%s> becomes standby\n", l->name);
		return;
P
Per Liden 已提交
315
	}
316
	tipc_link_dup_queue_xmit(links[*slot0].link, l);
317

318 319 320
	/* Same prio as current active ? => take one slot */
	if (l->priority == links[*slot0].link->priority) {
		*slot0 = bearer_id;
321
		return;
P
Per Liden 已提交
322 323
	}

324 325 326 327
	/* Higher prio than current active => take both active slots */
	pr_debug("Old link <%s> now standby\n", links[*slot0].link->name);
	*slot0 = bearer_id;
	*slot1 = bearer_id;
P
Per Liden 已提交
328 329 330
}

/**
331
 * tipc_node_link_down - handle loss of link
P
Per Liden 已提交
332
 */
333
void tipc_node_link_down(struct tipc_node *n, int bearer_id)
P
Per Liden 已提交
334
{
335 336 337 338
	int *slot0 = &n->active_links[0];
	int *slot1 = &n->active_links[1];
	int i, highest = 0;
	struct tipc_link *l, *_l;
P
Per Liden 已提交
339

340
	l = n->links[bearer_id].link;
341 342 343
	if (!l || !tipc_link_is_up(l))
		return;

344 345 346
	n->working_links--;
	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
	n->link_id = l->peer_bearer_id << 16 | l->bearer_id;
347

348 349
	tipc_bearer_remove_dest(n->net, l->bearer_id, n->addr);

350
	pr_debug("Lost link <%s> on network plane %c\n",
351
		 l->name, l->net_plane);
352

353 354 355 356 357 358 359
	/* Select new active link if any available */
	*slot0 = INVALID_BEARER_ID;
	*slot1 = INVALID_BEARER_ID;
	for (i = 0; i < MAX_BEARERS; i++) {
		_l = n->links[i].link;
		if (!_l || !tipc_link_is_up(_l))
			continue;
360 361
		if (_l == l)
			continue;
362 363 364 365 366 367 368 369 370 371
		if (_l->priority < highest)
			continue;
		if (_l->priority > highest) {
			highest = _l->priority;
			*slot0 = i;
			*slot1 = i;
			continue;
		}
		*slot1 = i;
	}
372

373 374
	if (tipc_node_is_up(n))
		tipc_link_failover_send_queue(l);
375 376 377 378

	tipc_link_reset(l);

	if (!tipc_node_is_up(n))
379
		node_lost_contact(n);
P
Per Liden 已提交
380 381
}

382
bool tipc_node_is_up(struct tipc_node *n)
P
Per Liden 已提交
383
{
384
	return n->active_links[0] != INVALID_BEARER_ID;
P
Per Liden 已提交
385 386
}

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
void tipc_node_check_dest(struct tipc_node *n, struct tipc_bearer *b,
			  bool *link_up, bool *addr_match,
			  struct tipc_media_addr *maddr)
{
	struct tipc_link *l = n->links[b->identity].link;
	struct tipc_media_addr *curr = &n->links[b->identity].maddr;

	*link_up = l && tipc_link_is_up(l);
	*addr_match = l && !memcmp(curr, maddr, sizeof(*maddr));
}

bool tipc_node_update_dest(struct tipc_node *n,  struct tipc_bearer *b,
			   struct tipc_media_addr *maddr)
{
	struct tipc_link *l = n->links[b->identity].link;
	struct tipc_media_addr *curr = &n->links[b->identity].maddr;
403
	struct sk_buff_head *inputq = &n->links[b->identity].inputq;
404

405
	if (!l) {
406
		l = tipc_link_create(n, b, maddr, inputq, &n->bclink.namedq);
407 408 409 410 411 412 413 414
		if (!l)
			return false;
		tipc_node_calculate_timer(n, l);
		if (n->link_cnt == 1) {
			if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
				tipc_node_get(n);
		}
	}
415 416
	memcpy(&l->media_addr, maddr, sizeof(*maddr));
	memcpy(curr, maddr, sizeof(*maddr));
417
	tipc_node_link_down(n, b->identity);
418 419 420
	return true;
}

421 422 423 424 425 426 427 428 429 430 431
void tipc_node_delete_links(struct net *net, int bearer_id)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *l;
	struct tipc_node *n;

	rcu_read_lock();
	list_for_each_entry_rcu(n, &tn->node_list, list) {
		tipc_node_lock(n);
		l = n->links[bearer_id].link;
		if (l) {
432
			tipc_node_link_down(n, bearer_id);
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
			n->links[bearer_id].link = NULL;
			n->link_cnt--;
		}
		tipc_node_unlock(n);
		kfree(l);
	}
	rcu_read_unlock();
}

static void tipc_node_reset_links(struct tipc_node *n)
{
	char addr_string[16];
	u32 i;

	tipc_node_lock(n);

	pr_warn("Resetting all links to %s\n",
		tipc_addr_string_fill(addr_string, n->addr));

	for (i = 0; i < MAX_BEARERS; i++) {
453 454 455
		if (!n->links[i].link)
			continue;
		tipc_node_link_down(n, i);
456 457 458 459
	}
	tipc_node_unlock(n);
}

460
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
P
Per Liden 已提交
461
{
462
	n_ptr->links[l_ptr->bearer_id].link = l_ptr;
463
	n_ptr->link_cnt++;
P
Per Liden 已提交
464 465
}

466
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
P
Per Liden 已提交
467
{
468 469 470
	int i;

	for (i = 0; i < MAX_BEARERS; i++) {
471
		if (l_ptr != n_ptr->links[i].link)
J
Jon Paul Maloy 已提交
472
			continue;
473
		n_ptr->links[i].link = NULL;
J
Jon Paul Maloy 已提交
474
		n_ptr->link_cnt--;
475
	}
P
Per Liden 已提交
476 477
}

478 479 480
/* tipc_node_fsm_evt - node finite state machine
 * Determines when contact is allowed with peer node
 */
481
static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
{
	int state = n->state;

	switch (state) {
	case SELF_DOWN_PEER_DOWN:
		switch (evt) {
		case SELF_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_COMING;
			break;
		case PEER_ESTABL_CONTACT_EVT:
			state = SELF_COMING_PEER_UP;
			break;
		case SELF_LOST_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_UP_PEER_UP:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_DOWN_PEER_LEAVING:
		switch (evt) {
		case PEER_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
		case SELF_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_UP_PEER_COMING:
		switch (evt) {
		case PEER_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_COMING_PEER_UP:
		switch (evt) {
		case SELF_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case SELF_LOST_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_LEAVING_PEER_DOWN:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	default:
		pr_err("Unknown node fsm state %x\n", state);
		break;
	}

	n->state = state;
}

580 581
bool tipc_node_filter_skb(struct tipc_node *n, struct tipc_link *l,
			  struct tipc_msg *hdr)
582 583 584 585 586
{
	int state = n->state;

	if (likely(state == SELF_UP_PEER_UP))
		return true;
587

588 589
	if (state == SELF_DOWN_PEER_DOWN)
		return true;
590 591 592 593 594

	if (state == SELF_UP_PEER_COMING) {
		/* If not traffic msg, peer may still be ESTABLISHING */
		if (tipc_link_is_up(l) && msg_is_traffic(hdr))
			tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
595
		return true;
596 597
	}

598 599
	if (state == SELF_COMING_PEER_UP)
		return true;
600

601 602
	if (state == SELF_LEAVING_PEER_DOWN)
		return false;
603 604 605 606 607 608 609

	if (state == SELF_DOWN_PEER_LEAVING) {
		if (msg_peer_is_up(hdr))
			return false;
		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
		return true;
	}
610 611 612
	return false;
}

613
static void node_established_contact(struct tipc_node *n_ptr)
P
Per Liden 已提交
614
{
615
	tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
Y
Ying Xue 已提交
616
	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
617
	n_ptr->bclink.oos_state = 0;
618 619
	n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
	tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
P
Per Liden 已提交
620 621
}

622
static void node_lost_contact(struct tipc_node *n_ptr)
P
Per Liden 已提交
623 624
{
	char addr_string[16];
625 626 627 628 629
	struct tipc_sock_conn *conn, *safe;
	struct list_head *conns = &n_ptr->conn_sks;
	struct sk_buff *skb;
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
	uint i;
P
Per Liden 已提交
630

631 632
	pr_debug("Lost contact with %s\n",
		 tipc_addr_string_fill(addr_string, n_ptr->addr));
633 634

	/* Flush broadcast link info associated with lost node */
635
	if (n_ptr->bclink.recv_permitted) {
J
Jon Paul Maloy 已提交
636
		__skb_queue_purge(&n_ptr->bclink.deferdq);
637

638 639 640
		if (n_ptr->bclink.reasm_buf) {
			kfree_skb(n_ptr->bclink.reasm_buf);
			n_ptr->bclink.reasm_buf = NULL;
641 642
		}

643
		tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
644
		tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
P
Per Liden 已提交
645

646
		n_ptr->bclink.recv_permitted = false;
647
	}
P
Per Liden 已提交
648

649
	/* Abort any ongoing link failover */
P
Per Liden 已提交
650
	for (i = 0; i < MAX_BEARERS; i++) {
651
		struct tipc_link *l_ptr = n_ptr->links[i].link;
652
		if (!l_ptr)
P
Per Liden 已提交
653
			continue;
654
		l_ptr->exec_mode = TIPC_LINK_OPEN;
655 656 657 658
		l_ptr->failover_checkpt = 0;
		l_ptr->failover_pkts = 0;
		kfree_skb(l_ptr->failover_skb);
		l_ptr->failover_skb = NULL;
659
		tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
660
	}
661
	/* Prevent re-contact with node until cleanup is done */
662
	tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679

	/* Notify publications from this node */
	n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;

	/* Notify sockets connected to node */
	list_for_each_entry_safe(conn, safe, conns, list) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
				      SHORT_H_SIZE, 0, tn->own_addr,
				      conn->peer_node, conn->port,
				      conn->peer_port, TIPC_ERR_NO_NODE);
		if (likely(skb)) {
			skb_queue_tail(n_ptr->inputq, skb);
			n_ptr->action_flags |= TIPC_MSG_EVT;
		}
		list_del(&conn->list);
		kfree(conn);
	}
P
Per Liden 已提交
680 681
}

E
Erik Hugne 已提交
682 683 684 685 686 687 688 689 690
/**
 * tipc_node_get_linkname - get the name of a link
 *
 * @bearer_id: id of the bearer
 * @node: peer node address
 * @linkname: link name output buffer
 *
 * Returns 0 on success
 */
691 692
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
			   char *linkname, size_t len)
E
Erik Hugne 已提交
693 694
{
	struct tipc_link *link;
695
	int err = -EINVAL;
696
	struct tipc_node *node = tipc_node_find(net, addr);
E
Erik Hugne 已提交
697

698 699 700 701 702 703
	if (!node)
		return err;

	if (bearer_id >= MAX_BEARERS)
		goto exit;

E
Erik Hugne 已提交
704
	tipc_node_lock(node);
705
	link = node->links[bearer_id].link;
E
Erik Hugne 已提交
706 707
	if (link) {
		strncpy(linkname, link->name, len);
708
		err = 0;
E
Erik Hugne 已提交
709
	}
710
exit:
E
Erik Hugne 已提交
711
	tipc_node_unlock(node);
712 713
	tipc_node_put(node);
	return err;
E
Erik Hugne 已提交
714
}
715 716 717

void tipc_node_unlock(struct tipc_node *node)
{
718
	struct net *net = node->net;
719
	u32 addr = 0;
720
	u32 flags = node->action_flags;
Y
Ying Xue 已提交
721
	u32 link_id = 0;
722
	struct list_head *publ_list;
723
	struct sk_buff_head *inputq = node->inputq;
724
	struct sk_buff_head *namedq;
725

726 727
	if (likely(!flags || (flags == TIPC_MSG_EVT))) {
		node->action_flags = 0;
728
		spin_unlock_bh(&node->lock);
729 730
		if (flags == TIPC_MSG_EVT)
			tipc_sk_rcv(net, inputq);
731 732 733
		return;
	}

Y
Ying Xue 已提交
734 735
	addr = node->addr;
	link_id = node->link_id;
736
	namedq = node->namedq;
737
	publ_list = &node->publ_list;
Y
Ying Xue 已提交
738

739 740 741 742
	node->action_flags &= ~(TIPC_MSG_EVT |
				TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
				TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
743
				TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
Y
Ying Xue 已提交
744

745 746
	spin_unlock_bh(&node->lock);

747 748
	if (flags & TIPC_NOTIFY_NODE_DOWN)
		tipc_publ_notify(net, publ_list, addr);
749

750
	if (flags & TIPC_WAKEUP_BCAST_USERS)
751
		tipc_bclink_wakeup_users(net);
752

Y
Ying Xue 已提交
753
	if (flags & TIPC_NOTIFY_NODE_UP)
754
		tipc_named_node_up(net, addr);
Y
Ying Xue 已提交
755 756

	if (flags & TIPC_NOTIFY_LINK_UP)
757
		tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
Y
Ying Xue 已提交
758 759 760
				     TIPC_NODE_SCOPE, link_id, addr);

	if (flags & TIPC_NOTIFY_LINK_DOWN)
761
		tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
Y
Ying Xue 已提交
762
				      link_id, addr);
763 764 765 766 767 768

	if (flags & TIPC_MSG_EVT)
		tipc_sk_rcv(net, inputq);

	if (flags & TIPC_NAMED_MSG_EVT)
		tipc_named_rcv(net, namedq);
769 770 771

	if (flags & TIPC_BCAST_MSG_EVT)
		tipc_bclink_input(net);
772 773

	if (flags & TIPC_BCAST_RESET)
774
		tipc_node_reset_links(node);
775
}
776 777

/* Caller should hold node lock for the passed node */
778
static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
779 780 781 782
{
	void *hdr;
	struct nlattr *attrs;

783
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
			  NLM_F_MULTI, TIPC_NL_NODE_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
	if (!attrs)
		goto msg_full;

	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
		goto attr_msg_full;
	if (tipc_node_is_up(node))
		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
			goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
					       int *bearer_id,
					       struct tipc_media_addr **maddr)
{
	int id = n->active_links[sel & 1];

	if (unlikely(id < 0))
		return NULL;

	*bearer_id = id;
	*maddr = &n->links[id].maddr;
	return n->links[id].link;
}

/**
 * tipc_node_xmit() is the general link level function for message sending
 * @net: the applicable net namespace
 * @list: chain of buffers containing message
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
		   u32 dnode, int selector)
{
	struct tipc_link *l = NULL;
	struct tipc_node *n;
	struct sk_buff_head xmitq;
	struct tipc_media_addr *maddr;
	int bearer_id;
	int rc = -EHOSTUNREACH;

	__skb_queue_head_init(&xmitq);
	n = tipc_node_find(net, dnode);
	if (likely(n)) {
		tipc_node_lock(n);
		l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
		if (likely(l))
			rc = tipc_link_xmit(l, list, &xmitq);
		if (unlikely(rc == -ENOBUFS))
852
			tipc_node_link_down(n, bearer_id);
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
		tipc_node_unlock(n);
		tipc_node_put(n);
	}
	if (likely(!rc)) {
		tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
		return 0;
	}
	if (likely(in_own_node(net, dnode))) {
		tipc_sk_rcv(net, list);
		return 0;
	}
	return rc;
}

/* tipc_node_xmit_skb(): send single buffer to destination
 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
 * messages, which will not be rejected
 * The only exception is datagram messages rerouted after secondary
 * lookup, which are rare and safe to dispose of anyway.
 * TODO: Return real return value, and let callers use
 * tipc_wait_for_sendpkt() where applicable
 */
int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
		       u32 selector)
{
	struct sk_buff_head head;
	int rc;

	skb_queue_head_init(&head);
	__skb_queue_tail(&head, skb);
	rc = tipc_node_xmit(net, &head, dnode, selector);
	if (rc == -ELINKCONG)
		kfree_skb(skb);
	return 0;
}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
/* tipc_node_tnl_init(): handle a received TUNNEL_PROTOCOL packet,
 * in order to control parallel link failover or synchronization
 */
static void tipc_node_tnl_init(struct tipc_node *n, int bearer_id,
			       struct sk_buff *skb)
{
	struct tipc_link *tnl, *pl;
	struct tipc_msg *hdr = buf_msg(skb);
	u16 oseqno = msg_seqno(hdr);
	int pb_id = msg_bearer_id(hdr);

	if (pb_id >= MAX_BEARERS)
		return;

	tnl = n->links[bearer_id].link;
	if (!tnl)
		return;

	/* Ignore if duplicate */
	if (less(oseqno, tnl->rcv_nxt))
		return;

	pl = n->links[pb_id].link;
	if (!pl)
		return;

	if (msg_type(hdr) == FAILOVER_MSG) {
		if (tipc_link_is_up(pl)) {
917
			tipc_node_link_down(n, pb_id);
918 919 920 921 922
			pl->exec_mode = TIPC_LINK_BLOCKED;
		}
	}
}

923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
/**
 * tipc_rcv - process TIPC packets/messages arriving from off-node
 * @net: the applicable net namespace
 * @skb: TIPC packet
 * @bearer: pointer to bearer message arrived on
 *
 * Invoked with no locks held. Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
{
	struct sk_buff_head xmitq;
	struct tipc_node *n;
	struct tipc_link *l;
	struct tipc_msg *hdr;
	struct tipc_media_addr *maddr;
	int bearer_id = b->identity;
	int rc = 0;
941
	int usr;
942 943 944 945 946 947 948 949 950

	__skb_queue_head_init(&xmitq);

	/* Ensure message is well-formed */
	if (unlikely(!tipc_msg_validate(skb)))
		goto discard;

	/* Handle arrival of a non-unicast link packet */
	hdr = buf_msg(skb);
951
	usr = msg_user(hdr);
952
	if (unlikely(msg_non_seq(hdr))) {
953
		if (usr ==  LINK_CONFIG)
954 955 956 957 958 959 960 961 962 963 964 965
			tipc_disc_rcv(net, skb, b);
		else
			tipc_bclink_rcv(net, skb);
		return;
	}

	/* Locate neighboring node that sent packet */
	n = tipc_node_find(net, msg_prevnode(hdr));
	if (unlikely(!n))
		goto discard;
	tipc_node_lock(n);

966 967 968 969
	/* Prepare links for tunneled reception if applicable */
	if (unlikely(usr == TUNNEL_PROTOCOL))
		tipc_node_tnl_init(n, bearer_id, skb);

970 971 972 973 974 975 976 977 978 979
	/* Locate link endpoint that should handle packet */
	l = n->links[bearer_id].link;
	if (unlikely(!l))
		goto unlock;

	/* Is reception of this packet permitted at the moment ? */
	if (unlikely(n->state != SELF_UP_PEER_UP))
		if (!tipc_node_filter_skb(n, l, hdr))
			goto unlock;

980
	if (unlikely(usr == LINK_PROTOCOL))
981 982 983 984 985 986 987 988 989 990
		tipc_bclink_sync_state(n, hdr);

	/* Release acked broadcast messages */
	if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
		tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));

	/* Check protocol and update link state */
	rc = tipc_link_rcv(l, skb, &xmitq);

	if (unlikely(rc & TIPC_LINK_UP_EVT))
991
		tipc_node_link_up(n, bearer_id);
992
	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
993
		tipc_node_link_down(n, bearer_id);
994 995 996 997 998 999 1000 1001 1002 1003 1004
	skb = NULL;
unlock:
	tipc_node_unlock(n);
	tipc_sk_rcv(net, &n->links[bearer_id].inputq);
	maddr = &n->links[bearer_id].maddr;
	tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
	tipc_node_put(n);
discard:
	kfree_skb(skb);
}

1005 1006 1007
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
1008 1009
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	int done = cb->args[0];
	int last_addr = cb->args[1];
	struct tipc_node *node;
	struct tipc_nl_msg msg;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	if (last_addr) {
		node = tipc_node_find(net, last_addr);
		if (!node) {
			rcu_read_unlock();
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the NLMSG_DONE message having
			 * the NLM_F_DUMP_INTR flag set if the node state
			 * changed while we released the lock.
			 */
			cb->prev_seq = 1;
			return -EPIPE;
		}
		tipc_node_put(node);
1038 1039
	}

1040
	list_for_each_entry_rcu(node, &tn->node_list, list) {
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
		if (last_addr) {
			if (node->addr == last_addr)
				last_addr = 0;
			else
				continue;
		}

		tipc_node_lock(node);
		err = __tipc_nl_add_node(&msg, node);
		if (err) {
			last_addr = node->addr;
			tipc_node_unlock(node);
			goto out;
		}

		tipc_node_unlock(node);
	}
	done = 1;
out:
	cb->args[0] = done;
	cb->args[1] = last_addr;
	rcu_read_unlock();

	return skb->len;
}