node.c 24.8 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/node.c: TIPC node management routines
3
 *
4
 * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
5
 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "link.h"
P
Per Liden 已提交
39 40
#include "node.h"
#include "name_distr.h"
41
#include "socket.h"
42
#include "bcast.h"
43
#include "discover.h"
P
Per Liden 已提交
44

45 46
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
47
static void tipc_node_delete(struct tipc_node *node);
48
static void tipc_node_timeout(unsigned long data);
49
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
P
Per Liden 已提交
50

51 52 53 54 55 56 57
struct tipc_sock_conn {
	u32 port;
	u32 peer_port;
	u32 peer_node;
	struct list_head list;
};

58 59 60 61 62 63
static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
	[TIPC_NLA_NODE_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_NODE_ADDR]		= { .type = NLA_U32 },
	[TIPC_NLA_NODE_UP]		= { .type = NLA_FLAG }
};

64 65 66 67 68 69
/*
 * A trivial power-of-two bitmask technique is used for speed, since this
 * operation is done for every incoming TIPC packet. The number of hash table
 * entries has been chosen so that no hash chain exceeds 8 nodes and will
 * usually be much smaller (typically only a single node).
 */
70
static unsigned int tipc_hashfn(u32 addr)
71 72 73 74
{
	return addr & (NODE_HTABLE_SIZE - 1);
}

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static void tipc_node_kref_release(struct kref *kref)
{
	struct tipc_node *node = container_of(kref, struct tipc_node, kref);

	tipc_node_delete(node);
}

void tipc_node_put(struct tipc_node *node)
{
	kref_put(&node->kref, tipc_node_kref_release);
}

static void tipc_node_get(struct tipc_node *node)
{
	kref_get(&node->kref);
}

92
/*
93 94
 * tipc_node_find - locate specified node object, if it exists
 */
95
struct tipc_node *tipc_node_find(struct net *net, u32 addr)
96
{
97
	struct tipc_net *tn = net_generic(net, tipc_net_id);
98 99
	struct tipc_node *node;

100
	if (unlikely(!in_own_cluster_exact(net, addr)))
101 102
		return NULL;

103
	rcu_read_lock();
104 105
	hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
				 hash) {
106
		if (node->addr == addr) {
107
			tipc_node_get(node);
108
			rcu_read_unlock();
109
			return node;
110
		}
111
	}
112
	rcu_read_unlock();
113 114 115
	return NULL;
}

116
struct tipc_node *tipc_node_create(struct net *net, u32 addr)
P
Per Liden 已提交
117
{
118
	struct tipc_net *tn = net_generic(net, tipc_net_id);
119
	struct tipc_node *n_ptr, *temp_node;
P
Per Liden 已提交
120

121
	spin_lock_bh(&tn->node_list_lock);
122 123 124
	n_ptr = tipc_node_find(net, addr);
	if (n_ptr)
		goto exit;
125
	n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
126
	if (!n_ptr) {
127
		pr_warn("Node creation failed, no memory\n");
128
		goto exit;
129 130
	}
	n_ptr->addr = addr;
131
	n_ptr->net = net;
132
	kref_init(&n_ptr->kref);
133
	spin_lock_init(&n_ptr->lock);
134 135
	INIT_HLIST_NODE(&n_ptr->hash);
	INIT_LIST_HEAD(&n_ptr->list);
136
	INIT_LIST_HEAD(&n_ptr->publ_list);
137
	INIT_LIST_HEAD(&n_ptr->conn_sks);
138
	skb_queue_head_init(&n_ptr->bclink.namedq);
J
Jon Paul Maloy 已提交
139
	__skb_queue_head_init(&n_ptr->bclink.deferdq);
140 141
	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
142 143 144
		if (n_ptr->addr < temp_node->addr)
			break;
	}
145
	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
146
	n_ptr->state = SELF_DOWN_PEER_LEAVING;
147
	n_ptr->signature = INVALID_NODE_SIG;
148 149
	n_ptr->active_links[0] = INVALID_BEARER_ID;
	n_ptr->active_links[1] = INVALID_BEARER_ID;
150
	tipc_node_get(n_ptr);
151 152
	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
	n_ptr->keepalive_intv = U32_MAX;
153
exit:
154
	spin_unlock_bh(&tn->node_list_lock);
P
Per Liden 已提交
155 156 157
	return n_ptr;
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171
static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
{
	unsigned long tol = l->tolerance;
	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
	unsigned long keepalive_intv = msecs_to_jiffies(intv);

	/* Link with lowest tolerance determines timer interval */
	if (keepalive_intv < n->keepalive_intv)
		n->keepalive_intv = keepalive_intv;

	/* Ensure link's abort limit corresponds to current interval */
	l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
}

172
static void tipc_node_delete(struct tipc_node *node)
P
Per Liden 已提交
173
{
174 175 176
	list_del_rcu(&node->list);
	hlist_del_rcu(&node->hash);
	kfree_rcu(node, rcu);
P
Per Liden 已提交
177 178
}

179
void tipc_node_stop(struct net *net)
180
{
181
	struct tipc_net *tn = net_generic(net, tipc_net_id);
182 183
	struct tipc_node *node, *t_node;

184
	spin_lock_bh(&tn->node_list_lock);
185 186 187
	list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
		if (del_timer(&node->timer))
			tipc_node_put(node);
188
		tipc_node_put(node);
189
	}
190
	spin_unlock_bh(&tn->node_list_lock);
191 192
}

193
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
194 195 196
{
	struct tipc_node *node;
	struct tipc_sock_conn *conn;
197
	int err = 0;
198

199
	if (in_own_node(net, dnode))
200 201
		return 0;

202
	node = tipc_node_find(net, dnode);
203 204 205 206 207
	if (!node) {
		pr_warn("Connecting sock to node 0x%x failed\n", dnode);
		return -EHOSTUNREACH;
	}
	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
208 209 210 211
	if (!conn) {
		err = -EHOSTUNREACH;
		goto exit;
	}
212 213 214 215 216 217 218
	conn->peer_node = dnode;
	conn->port = port;
	conn->peer_port = peer_port;

	tipc_node_lock(node);
	list_add_tail(&conn->list, &node->conn_sks);
	tipc_node_unlock(node);
219 220 221
exit:
	tipc_node_put(node);
	return err;
222 223
}

224
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
225 226 227 228
{
	struct tipc_node *node;
	struct tipc_sock_conn *conn, *safe;

229
	if (in_own_node(net, dnode))
230 231
		return;

232
	node = tipc_node_find(net, dnode);
233 234 235 236 237 238 239 240 241 242 243
	if (!node)
		return;

	tipc_node_lock(node);
	list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
		if (port != conn->port)
			continue;
		list_del(&conn->list);
		kfree(conn);
	}
	tipc_node_unlock(node);
244
	tipc_node_put(node);
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/* tipc_node_timeout - handle expiration of node timer
 */
static void tipc_node_timeout(unsigned long data)
{
	struct tipc_node *n = (struct tipc_node *)data;
	struct sk_buff_head xmitq;
	struct tipc_link *l;
	struct tipc_media_addr *maddr;
	int bearer_id;
	int rc = 0;

	__skb_queue_head_init(&xmitq);

	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
		tipc_node_lock(n);
		l = n->links[bearer_id].link;
		if (l) {
			/* Link tolerance may change asynchronously: */
			tipc_node_calculate_timer(n, l);
			rc = tipc_link_timeout(l, &xmitq);
			if (rc & TIPC_LINK_DOWN_EVT)
				tipc_link_reset(l);
		}
		tipc_node_unlock(n);
		maddr = &n->links[bearer_id].maddr;
		tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
	}
	if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
		tipc_node_get(n);
	tipc_node_put(n);
}

P
Per Liden 已提交
279
/**
280
 * tipc_node_link_up - handle addition of link
281
 *
P
Per Liden 已提交
282 283
 * Link becomes active (alone or shared) or standby, depending on its priority.
 */
284
void tipc_node_link_up(struct tipc_node *n, int bearer_id)
P
Per Liden 已提交
285
{
286 287 288 289
	int *slot0 = &n->active_links[0];
	int *slot1 = &n->active_links[1];
	struct tipc_link_entry *links = n->links;
	struct tipc_link *l = n->links[bearer_id].link;
P
Per Liden 已提交
290

291
	/* Leave room for tunnel header when returning 'mtu' to users: */
292
	links[bearer_id].mtu = l->mtu - INT_H_SIZE;
293 294 295 296

	n->working_links++;
	n->action_flags |= TIPC_NOTIFY_LINK_UP;
	n->link_id = l->peer_bearer_id << 16 | l->bearer_id;
Y
Ying Xue 已提交
297

298
	pr_debug("Established link <%s> on network plane %c\n",
299
		 l->name, l->net_plane);
300

301
	/* No active links ? => take both active slots */
302 303 304
	if (*slot0 < 0) {
		*slot0 = bearer_id;
		*slot1 = bearer_id;
305 306
		node_established_contact(n);
		return;
P
Per Liden 已提交
307
	}
308 309 310

	/* Lower prio than current active ? => no slot */
	if (l->priority < links[*slot0].link->priority) {
311 312
		pr_debug("New link <%s> becomes standby\n", l->name);
		return;
P
Per Liden 已提交
313
	}
314
	tipc_link_dup_queue_xmit(links[*slot0].link, l);
315

316 317 318
	/* Same prio as current active ? => take one slot */
	if (l->priority == links[*slot0].link->priority) {
		*slot0 = bearer_id;
319
		return;
P
Per Liden 已提交
320 321
	}

322 323 324 325
	/* Higher prio than current active => take both active slots */
	pr_debug("Old link <%s> now standby\n", links[*slot0].link->name);
	*slot0 = bearer_id;
	*slot1 = bearer_id;
P
Per Liden 已提交
326 327 328
}

/**
329
 * tipc_node_link_down - handle loss of link
P
Per Liden 已提交
330
 */
331
void tipc_node_link_down(struct tipc_node *n, int bearer_id)
P
Per Liden 已提交
332
{
333 334 335 336
	int *slot0 = &n->active_links[0];
	int *slot1 = &n->active_links[1];
	int i, highest = 0;
	struct tipc_link *l, *_l;
P
Per Liden 已提交
337

338
	l = n->links[bearer_id].link;
339 340 341
	n->working_links--;
	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
	n->link_id = l->peer_bearer_id << 16 | l->bearer_id;
342

343
	pr_debug("Lost link <%s> on network plane %c\n",
344
		 l->name, l->net_plane);
345

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	/* Select new active link if any available */
	*slot0 = INVALID_BEARER_ID;
	*slot1 = INVALID_BEARER_ID;
	for (i = 0; i < MAX_BEARERS; i++) {
		_l = n->links[i].link;
		if (!_l || !tipc_link_is_up(_l))
			continue;
		if (_l->priority < highest)
			continue;
		if (_l->priority > highest) {
			highest = _l->priority;
			*slot0 = i;
			*slot1 = i;
			continue;
		}
		*slot1 = i;
	}
363 364 365 366
	if (tipc_node_is_up(n))
		tipc_link_failover_send_queue(l);
	else
		node_lost_contact(n);
P
Per Liden 已提交
367 368
}

369
bool tipc_node_is_up(struct tipc_node *n)
P
Per Liden 已提交
370
{
371
	return n->active_links[0] != INVALID_BEARER_ID;
P
Per Liden 已提交
372 373
}

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
void tipc_node_check_dest(struct tipc_node *n, struct tipc_bearer *b,
			  bool *link_up, bool *addr_match,
			  struct tipc_media_addr *maddr)
{
	struct tipc_link *l = n->links[b->identity].link;
	struct tipc_media_addr *curr = &n->links[b->identity].maddr;

	*link_up = l && tipc_link_is_up(l);
	*addr_match = l && !memcmp(curr, maddr, sizeof(*maddr));
}

bool tipc_node_update_dest(struct tipc_node *n,  struct tipc_bearer *b,
			   struct tipc_media_addr *maddr)
{
	struct tipc_link *l = n->links[b->identity].link;
	struct tipc_media_addr *curr = &n->links[b->identity].maddr;
390
	struct sk_buff_head *inputq = &n->links[b->identity].inputq;
391

392
	if (!l) {
393
		l = tipc_link_create(n, b, maddr, inputq, &n->bclink.namedq);
394 395 396 397 398 399 400 401
		if (!l)
			return false;
		tipc_node_calculate_timer(n, l);
		if (n->link_cnt == 1) {
			if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
				tipc_node_get(n);
		}
	}
402 403 404 405 406 407
	memcpy(&l->media_addr, maddr, sizeof(*maddr));
	memcpy(curr, maddr, sizeof(*maddr));
	tipc_link_reset(l);
	return true;
}

408
void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
P
Per Liden 已提交
409
{
410
	n_ptr->links[l_ptr->bearer_id].link = l_ptr;
411
	n_ptr->link_cnt++;
P
Per Liden 已提交
412 413
}

414
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
P
Per Liden 已提交
415
{
416 417 418
	int i;

	for (i = 0; i < MAX_BEARERS; i++) {
419
		if (l_ptr != n_ptr->links[i].link)
J
Jon Paul Maloy 已提交
420
			continue;
421
		n_ptr->links[i].link = NULL;
J
Jon Paul Maloy 已提交
422
		n_ptr->link_cnt--;
423
	}
P
Per Liden 已提交
424 425
}

426 427 428
/* tipc_node_fsm_evt - node finite state machine
 * Determines when contact is allowed with peer node
 */
429
static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
{
	int state = n->state;

	switch (state) {
	case SELF_DOWN_PEER_DOWN:
		switch (evt) {
		case SELF_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_COMING;
			break;
		case PEER_ESTABL_CONTACT_EVT:
			state = SELF_COMING_PEER_UP;
			break;
		case SELF_LOST_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_UP_PEER_UP:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_DOWN_PEER_LEAVING:
		switch (evt) {
		case PEER_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
		case SELF_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_UP_PEER_COMING:
		switch (evt) {
		case PEER_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_LEAVING;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_COMING_PEER_UP:
		switch (evt) {
		case SELF_ESTABL_CONTACT_EVT:
			state = SELF_UP_PEER_UP;
			break;
		case PEER_LOST_CONTACT_EVT:
			state = SELF_LEAVING_PEER_DOWN;
			break;
		case SELF_LOST_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	case SELF_LEAVING_PEER_DOWN:
		switch (evt) {
		case SELF_LOST_CONTACT_EVT:
			state = SELF_DOWN_PEER_DOWN;
			break;
		case SELF_ESTABL_CONTACT_EVT:
		case PEER_ESTABL_CONTACT_EVT:
		case PEER_LOST_CONTACT_EVT:
			break;
		default:
			pr_err("Unknown node fsm evt %x/%x\n", state, evt);
		}
		break;
	default:
		pr_err("Unknown node fsm state %x\n", state);
		break;
	}

	n->state = state;
}

528 529
bool tipc_node_filter_skb(struct tipc_node *n, struct tipc_link *l,
			  struct tipc_msg *hdr)
530 531 532 533 534
{
	int state = n->state;

	if (likely(state == SELF_UP_PEER_UP))
		return true;
535

536 537
	if (state == SELF_DOWN_PEER_DOWN)
		return true;
538 539 540 541 542

	if (state == SELF_UP_PEER_COMING) {
		/* If not traffic msg, peer may still be ESTABLISHING */
		if (tipc_link_is_up(l) && msg_is_traffic(hdr))
			tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
543
		return true;
544 545
	}

546 547
	if (state == SELF_COMING_PEER_UP)
		return true;
548

549 550
	if (state == SELF_LEAVING_PEER_DOWN)
		return false;
551 552 553 554 555 556 557

	if (state == SELF_DOWN_PEER_LEAVING) {
		if (msg_peer_is_up(hdr))
			return false;
		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
		return true;
	}
558 559 560
	return false;
}

561
static void node_established_contact(struct tipc_node *n_ptr)
P
Per Liden 已提交
562
{
563
	tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
Y
Ying Xue 已提交
564
	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
565
	n_ptr->bclink.oos_state = 0;
566 567
	n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
	tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
P
Per Liden 已提交
568 569
}

570
static void node_lost_contact(struct tipc_node *n_ptr)
P
Per Liden 已提交
571 572
{
	char addr_string[16];
573 574 575 576 577
	struct tipc_sock_conn *conn, *safe;
	struct list_head *conns = &n_ptr->conn_sks;
	struct sk_buff *skb;
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
	uint i;
P
Per Liden 已提交
578

579 580
	pr_debug("Lost contact with %s\n",
		 tipc_addr_string_fill(addr_string, n_ptr->addr));
581 582

	/* Flush broadcast link info associated with lost node */
583
	if (n_ptr->bclink.recv_permitted) {
J
Jon Paul Maloy 已提交
584
		__skb_queue_purge(&n_ptr->bclink.deferdq);
585

586 587 588
		if (n_ptr->bclink.reasm_buf) {
			kfree_skb(n_ptr->bclink.reasm_buf);
			n_ptr->bclink.reasm_buf = NULL;
589 590
		}

591
		tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
592
		tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
P
Per Liden 已提交
593

594
		n_ptr->bclink.recv_permitted = false;
595
	}
P
Per Liden 已提交
596

597
	/* Abort any ongoing link failover */
P
Per Liden 已提交
598
	for (i = 0; i < MAX_BEARERS; i++) {
599
		struct tipc_link *l_ptr = n_ptr->links[i].link;
600
		if (!l_ptr)
P
Per Liden 已提交
601
			continue;
602
		l_ptr->exec_mode = TIPC_LINK_OPEN;
603 604 605 606
		l_ptr->failover_checkpt = 0;
		l_ptr->failover_pkts = 0;
		kfree_skb(l_ptr->failover_skb);
		l_ptr->failover_skb = NULL;
607
		tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
608
	}
609
	/* Prevent re-contact with node until cleanup is done */
610
	tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627

	/* Notify publications from this node */
	n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;

	/* Notify sockets connected to node */
	list_for_each_entry_safe(conn, safe, conns, list) {
		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
				      SHORT_H_SIZE, 0, tn->own_addr,
				      conn->peer_node, conn->port,
				      conn->peer_port, TIPC_ERR_NO_NODE);
		if (likely(skb)) {
			skb_queue_tail(n_ptr->inputq, skb);
			n_ptr->action_flags |= TIPC_MSG_EVT;
		}
		list_del(&conn->list);
		kfree(conn);
	}
P
Per Liden 已提交
628 629
}

E
Erik Hugne 已提交
630 631 632 633 634 635 636 637 638
/**
 * tipc_node_get_linkname - get the name of a link
 *
 * @bearer_id: id of the bearer
 * @node: peer node address
 * @linkname: link name output buffer
 *
 * Returns 0 on success
 */
639 640
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
			   char *linkname, size_t len)
E
Erik Hugne 已提交
641 642
{
	struct tipc_link *link;
643
	int err = -EINVAL;
644
	struct tipc_node *node = tipc_node_find(net, addr);
E
Erik Hugne 已提交
645

646 647 648 649 650 651
	if (!node)
		return err;

	if (bearer_id >= MAX_BEARERS)
		goto exit;

E
Erik Hugne 已提交
652
	tipc_node_lock(node);
653
	link = node->links[bearer_id].link;
E
Erik Hugne 已提交
654 655
	if (link) {
		strncpy(linkname, link->name, len);
656
		err = 0;
E
Erik Hugne 已提交
657
	}
658
exit:
E
Erik Hugne 已提交
659
	tipc_node_unlock(node);
660 661
	tipc_node_put(node);
	return err;
E
Erik Hugne 已提交
662
}
663 664 665

void tipc_node_unlock(struct tipc_node *node)
{
666
	struct net *net = node->net;
667
	u32 addr = 0;
668
	u32 flags = node->action_flags;
Y
Ying Xue 已提交
669
	u32 link_id = 0;
670
	struct list_head *publ_list;
671
	struct sk_buff_head *inputq = node->inputq;
672
	struct sk_buff_head *namedq;
673

674 675
	if (likely(!flags || (flags == TIPC_MSG_EVT))) {
		node->action_flags = 0;
676
		spin_unlock_bh(&node->lock);
677 678
		if (flags == TIPC_MSG_EVT)
			tipc_sk_rcv(net, inputq);
679 680 681
		return;
	}

Y
Ying Xue 已提交
682 683
	addr = node->addr;
	link_id = node->link_id;
684
	namedq = node->namedq;
685
	publ_list = &node->publ_list;
Y
Ying Xue 已提交
686

687 688 689 690
	node->action_flags &= ~(TIPC_MSG_EVT |
				TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
				TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
691
				TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
Y
Ying Xue 已提交
692

693 694
	spin_unlock_bh(&node->lock);

695 696
	if (flags & TIPC_NOTIFY_NODE_DOWN)
		tipc_publ_notify(net, publ_list, addr);
697

698
	if (flags & TIPC_WAKEUP_BCAST_USERS)
699
		tipc_bclink_wakeup_users(net);
700

Y
Ying Xue 已提交
701
	if (flags & TIPC_NOTIFY_NODE_UP)
702
		tipc_named_node_up(net, addr);
Y
Ying Xue 已提交
703 704

	if (flags & TIPC_NOTIFY_LINK_UP)
705
		tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
Y
Ying Xue 已提交
706 707 708
				     TIPC_NODE_SCOPE, link_id, addr);

	if (flags & TIPC_NOTIFY_LINK_DOWN)
709
		tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
Y
Ying Xue 已提交
710
				      link_id, addr);
711 712 713 714 715 716

	if (flags & TIPC_MSG_EVT)
		tipc_sk_rcv(net, inputq);

	if (flags & TIPC_NAMED_MSG_EVT)
		tipc_named_rcv(net, namedq);
717 718 719

	if (flags & TIPC_BCAST_MSG_EVT)
		tipc_bclink_input(net);
720 721 722

	if (flags & TIPC_BCAST_RESET)
		tipc_link_reset_all(node);
723
}
724 725

/* Caller should hold node lock for the passed node */
726
static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
727 728 729 730
{
	void *hdr;
	struct nlattr *attrs;

731
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
			  NLM_F_MULTI, TIPC_NL_NODE_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
	if (!attrs)
		goto msg_full;

	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
		goto attr_msg_full;
	if (tipc_node_is_up(node))
		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
			goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
					       int *bearer_id,
					       struct tipc_media_addr **maddr)
{
	int id = n->active_links[sel & 1];

	if (unlikely(id < 0))
		return NULL;

	*bearer_id = id;
	*maddr = &n->links[id].maddr;
	return n->links[id].link;
}

/**
 * tipc_node_xmit() is the general link level function for message sending
 * @net: the applicable net namespace
 * @list: chain of buffers containing message
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
		   u32 dnode, int selector)
{
	struct tipc_link *l = NULL;
	struct tipc_node *n;
	struct sk_buff_head xmitq;
	struct tipc_media_addr *maddr;
	int bearer_id;
	int rc = -EHOSTUNREACH;

	__skb_queue_head_init(&xmitq);
	n = tipc_node_find(net, dnode);
	if (likely(n)) {
		tipc_node_lock(n);
		l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
		if (likely(l))
			rc = tipc_link_xmit(l, list, &xmitq);
		if (unlikely(rc == -ENOBUFS))
			tipc_link_reset(l);
		tipc_node_unlock(n);
		tipc_node_put(n);
	}
	if (likely(!rc)) {
		tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
		return 0;
	}
	if (likely(in_own_node(net, dnode))) {
		tipc_sk_rcv(net, list);
		return 0;
	}
	return rc;
}

/* tipc_node_xmit_skb(): send single buffer to destination
 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
 * messages, which will not be rejected
 * The only exception is datagram messages rerouted after secondary
 * lookup, which are rare and safe to dispose of anyway.
 * TODO: Return real return value, and let callers use
 * tipc_wait_for_sendpkt() where applicable
 */
int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
		       u32 selector)
{
	struct sk_buff_head head;
	int rc;

	skb_queue_head_init(&head);
	__skb_queue_tail(&head, skb);
	rc = tipc_node_xmit(net, &head, dnode, selector);
	if (rc == -ELINKCONG)
		kfree_skb(skb);
	return 0;
}

837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
/**
 * tipc_rcv - process TIPC packets/messages arriving from off-node
 * @net: the applicable net namespace
 * @skb: TIPC packet
 * @bearer: pointer to bearer message arrived on
 *
 * Invoked with no locks held. Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
{
	struct sk_buff_head xmitq;
	struct tipc_node *n;
	struct tipc_link *l;
	struct tipc_msg *hdr;
	struct tipc_media_addr *maddr;
	int bearer_id = b->identity;
	int rc = 0;

	__skb_queue_head_init(&xmitq);

	/* Ensure message is well-formed */
	if (unlikely(!tipc_msg_validate(skb)))
		goto discard;

	/* Handle arrival of a non-unicast link packet */
	hdr = buf_msg(skb);
	if (unlikely(msg_non_seq(hdr))) {
		if (msg_user(hdr) ==  LINK_CONFIG)
			tipc_disc_rcv(net, skb, b);
		else
			tipc_bclink_rcv(net, skb);
		return;
	}

	/* Locate neighboring node that sent packet */
	n = tipc_node_find(net, msg_prevnode(hdr));
	if (unlikely(!n))
		goto discard;
	tipc_node_lock(n);

	/* Locate link endpoint that should handle packet */
	l = n->links[bearer_id].link;
	if (unlikely(!l))
		goto unlock;

	/* Is reception of this packet permitted at the moment ? */
	if (unlikely(n->state != SELF_UP_PEER_UP))
		if (!tipc_node_filter_skb(n, l, hdr))
			goto unlock;

	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
		tipc_bclink_sync_state(n, hdr);

	/* Release acked broadcast messages */
	if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
		tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));

	/* Check protocol and update link state */
	rc = tipc_link_rcv(l, skb, &xmitq);

	if (unlikely(rc & TIPC_LINK_UP_EVT))
		tipc_link_activate(l);
	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
		tipc_link_reset(l);
	skb = NULL;
unlock:
	tipc_node_unlock(n);
	tipc_sk_rcv(net, &n->links[bearer_id].inputq);
	maddr = &n->links[bearer_id].maddr;
	tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
	tipc_node_put(n);
discard:
	kfree_skb(skb);
}

913 914 915
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	int err;
916 917
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
918 919 920 921 922 923 924 925 926 927 928 929 930
	int done = cb->args[0];
	int last_addr = cb->args[1];
	struct tipc_node *node;
	struct tipc_nl_msg msg;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
	if (last_addr) {
		node = tipc_node_find(net, last_addr);
		if (!node) {
			rcu_read_unlock();
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the NLMSG_DONE message having
			 * the NLM_F_DUMP_INTR flag set if the node state
			 * changed while we released the lock.
			 */
			cb->prev_seq = 1;
			return -EPIPE;
		}
		tipc_node_put(node);
946 947
	}

948
	list_for_each_entry_rcu(node, &tn->node_list, list) {
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
		if (last_addr) {
			if (node->addr == last_addr)
				last_addr = 0;
			else
				continue;
		}

		tipc_node_lock(node);
		err = __tipc_nl_add_node(&msg, node);
		if (err) {
			last_addr = node->addr;
			tipc_node_unlock(node);
			goto out;
		}

		tipc_node_unlock(node);
	}
	done = 1;
out:
	cb->args[0] = done;
	cb->args[1] = last_addr;
	rcu_read_unlock();

	return skb->len;
}