bcast.c 32.7 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38
#include <linux/tipc_config.h>
39 40
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
41
#include "bcast.h"
42
#include "name_distr.h"
43 44
#include "link.h"
#include "node.h"
P
Per Liden 已提交
45

46
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
47 48
#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
P
Per Liden 已提交
49

50
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/**
 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
 * @primary: pointer to primary bearer
 * @secondary: pointer to secondary bearer
 *
 * Bearers must have same priority and same set of reachable destinations
 * to be paired.
 */

struct tipc_bcbearer_pair {
	struct tipc_bearer *primary;
	struct tipc_bearer *secondary;
};

#define	BCBEARER		MAX_BEARERS

/**
 * struct tipc_bcbearer - bearer used by broadcast link
 * @bearer: (non-standard) broadcast bearer structure
 * @media: (non-standard) broadcast media structure
 * @bpairs: array of bearer pairs
 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
 * @remains: temporary node map used by tipc_bcbearer_send()
 * @remains_new: temporary node map used tipc_bcbearer_send()
 *
 * Note: The fields labelled "temporary" are incorporated into the bearer
 * to avoid consuming potentially limited stack space through the use of
 * large local variables within multicast routines.  Concurrent access is
 * prevented through use of the spinlock "bcast_lock".
 */
struct tipc_bcbearer {
	struct tipc_bearer bearer;
	struct tipc_media media;
	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
	struct tipc_node_map remains;
	struct tipc_node_map remains_new;
};

/**
 * struct tipc_bc_base - link used for broadcast messages
 * @link: (non-standard) broadcast link structure
 * @node: (non-standard) node structure representing b'cast link's peer node
 * @bcast_nodes: map of broadcast-capable nodes
 * @retransmit_to: node that most recently requested a retransmit
 *
 * Handles sequence numbering, fragmentation, bundling, etc.
 */
struct tipc_bc_base {
101
	struct tipc_link *link;
102 103 104
	struct tipc_node node;
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
105
	struct sk_buff_head namedq;
106 107 108 109
	struct tipc_node_map bcast_nodes;
	struct tipc_node *retransmit_to;
};

110 111 112 113 114
static struct tipc_bc_base *tipc_bc_base(struct net *net)
{
	return tipc_net(net)->bcbase;
}

115 116 117 118 119 120 121 122 123
/**
 * tipc_nmap_equal - test for equality of node maps
 */
static int tipc_nmap_equal(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b)
{
	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}

124
static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq);
125 126 127
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
128 129
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
130
static void tipc_bclink_lock(struct net *net)
131
{
132
	tipc_bcast_lock(net);
133 134
}

135
static void tipc_bclink_unlock(struct net *net)
136
{
137
	tipc_bcast_unlock(net);
138 139
}

140 141 142 143
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

144
	tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
145 146
}

147
uint  tipc_bcast_get_mtu(void)
148 149 150 151
{
	return MAX_PKT_DEFAULT_MCAST;
}

152
static u16 bcbuf_acks(struct sk_buff *skb)
P
Per Liden 已提交
153
{
154
	return TIPC_SKB_CB(skb)->ackers;
P
Per Liden 已提交
155 156
}

157
static void bcbuf_set_acks(struct sk_buff *buf, u16 ackers)
P
Per Liden 已提交
158
{
159
	TIPC_SKB_CB(buf)->ackers = ackers;
P
Per Liden 已提交
160 161
}

S
Sam Ravnborg 已提交
162
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
163 164 165 166
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

167
static void bclink_set_last_sent(struct net *net)
168
{
169 170 171
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

172
	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
173 174
}

175
u32 tipc_bclink_get_last_sent(struct net *net)
176
{
177 178
	struct tipc_net *tn = net_generic(net, tipc_net_id);

179
	return tn->bcl->silent_intv_cnt;
180 181
}

182
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
183
{
184 185
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
186 187
}

188
/**
189 190
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
191
 * Called with bclink_lock locked
192
 */
193
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
194
{
195 196
	struct tipc_net *tn = net_generic(net, tipc_net_id);

197
	return tn->bcbase->retransmit_to;
198 199
}

200
/**
P
Per Liden 已提交
201 202 203
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
204
 *
205
 * Called with bclink_lock locked
P
Per Liden 已提交
206
 */
207
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
208
{
209
	struct sk_buff *skb;
210
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
211

J
Jon Paul Maloy 已提交
212
	skb_queue_walk(&bcl->transmq, skb) {
213 214
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
215
			break;
216
		}
217
	}
P
Per Liden 已提交
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
/**
 * bclink_prepare_wakeup - prepare users for wakeup after congestion
 * @bcl: broadcast link
 * @resultq: queue for users which can be woken up
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to specified queue for wakeup
 */
static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
{
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = bcl->window + bcl->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
			continue;
		skb_unlink(skb, &bcl->wakeupq);
		skb_queue_tail(resultq, skb);
	}
}

244 245 246 247 248
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
249
void tipc_bclink_wakeup_users(struct net *net)
250
{
251
	struct tipc_net *tn = net_generic(net, tipc_net_id);
252 253
	struct tipc_link *bcl = tn->bcl;
	struct sk_buff_head resultq;
254

255 256 257
	skb_queue_head_init(&resultq);
	bclink_prepare_wakeup(bcl, &resultq);
	tipc_sk_rcv(net, &resultq);
258 259
}

260
/**
261
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
262 263
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
264
 *
265
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
266
 */
267
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
268
{
269
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
270
	unsigned int released = 0;
271 272
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
273

274 275
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;
276
	tipc_bclink_lock(net);
277

278
	/* Bail out if tx queue is empty (no clean up is required) */
J
Jon Paul Maloy 已提交
279
	skb = skb_peek(&tn->bcl->transmq);
280
	if (!skb)
281 282 283 284 285 286 287 288 289
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
290
		if (tn->bcbase->bcast_nodes.count)
291
			acked = tn->bcl->silent_intv_cnt;
292
		else
293
			acked = tn->bcl->snd_nxt;
294 295 296 297 298
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
299
		if (less(acked, buf_seqno(skb)) ||
300
		    less(tn->bcl->silent_intv_cnt, acked) ||
301 302 303 304
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}
	/* Skip over packets that node has previously acknowledged */
J
Jon Paul Maloy 已提交
305
	skb_queue_walk(&tn->bcl->transmq, skb) {
306 307 308
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
309
	/* Update packets that node is now acknowledging */
J
Jon Paul Maloy 已提交
310
	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
311 312
		if (more(buf_seqno(skb), acked))
			break;
J
Jon Paul Maloy 已提交
313 314
		bcbuf_decr_acks(skb);
		bclink_set_last_sent(net);
315
		if (bcbuf_acks(skb) == 0) {
J
Jon Paul Maloy 已提交
316
			__skb_unlink(skb, &tn->bcl->transmq);
317
			kfree_skb(skb);
P
Per Liden 已提交
318 319 320 321 322 323
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
J
Jon Paul Maloy 已提交
324
	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
325 326
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
327
	}
328
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
329
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
330
exit:
331
	tipc_bclink_unlock(net);
P
Per Liden 已提交
332 333
}

334
/**
335
 * tipc_bclink_update_link_state - update broadcast link state
336
 *
Y
Ying Xue 已提交
337
 * RCU and node lock set
P
Per Liden 已提交
338
 */
339
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
340
				   u32 last_sent)
P
Per Liden 已提交
341
{
342
	struct sk_buff *buf;
343
	struct net *net = n_ptr->net;
344
	struct tipc_net *tn = net_generic(net, tipc_net_id);
345
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
346

347 348 349
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
350

351 352 353
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

354 355 356 357
	/* This is a good location for statistical profiling */
	bcl->stats.queue_sz_counts++;
	bcl->stats.accu_queue_sz += skb_queue_len(&bcl->transmq);

358 359 360 361 362 363 364 365 366 367 368 369
	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
370 371
		return;

372
	/* Send NACK */
373
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
374
	if (buf) {
375
		struct tipc_msg *msg = buf_msg(buf);
J
Jon Paul Maloy 已提交
376
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
377
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
378

379
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
380
			      INT_H_SIZE, n_ptr->addr);
381
		msg_set_non_seq(msg, 1);
382
		msg_set_mc_netid(msg, tn->net_id);
383 384
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
385
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
386

387
		tipc_bclink_lock(net);
388
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
389 390
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
391
		kfree_skb(buf);
P
Per Liden 已提交
392

393
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
394 395 396
	}
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
{
	u16 last = msg_last_bcast(hdr);
	int mtyp = msg_type(hdr);

	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
		return;
	if (mtyp == STATE_MSG) {
		tipc_bclink_update_link_state(n, last);
		return;
	}
	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
	 * and transfer synch info in LINK_PROTOCOL messages.
	 */
	if (tipc_node_is_up(n))
		return;
	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
		return;
	n->bclink.last_sent = last;
	n->bclink.last_in = last;
	n->bclink.oos_state = 0;
}

420
/**
421
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
422
 *
423 424
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
425
 */
426
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
427
{
428
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
429

430
	if (unlikely(!n_ptr))
P
Per Liden 已提交
431
		return;
432

433
	tipc_node_lock(n_ptr);
434
	if (n_ptr->bclink.recv_permitted &&
435 436 437
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;
438
	tipc_node_unlock(n_ptr);
439
	tipc_node_put(n_ptr);
P
Per Liden 已提交
440 441
}

442
/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
443
 *                    and to identified node local sockets
444
 * @net: the applicable net namespace
445
 * @list: chain of buffers containing message
446 447 448
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
449
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
450
{
451 452
	struct tipc_link *l = tipc_bc_sndlink(net);
	struct sk_buff_head xmitq, inputq, rcvq;
453 454
	int rc = 0;

455 456 457
	__skb_queue_head_init(&rcvq);
	__skb_queue_head_init(&xmitq);
	skb_queue_head_init(&inputq);
458

459 460 461
	/* Prepare message clone for local node */
	if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
		return -EHOSTUNREACH;
462

463 464 465 466 467
	tipc_bcast_lock(net);
	if (tipc_link_bc_peers(l))
		rc = tipc_link_xmit(l, list, &xmitq);
	bclink_set_last_sent(net);
	tipc_bcast_unlock(net);
468

469
	/* Don't send to local node if adding to link failed */
470
	if (unlikely(rc)) {
471
		__skb_queue_purge(&rcvq);
472 473
		return rc;
	}
474

475 476 477 478 479
	/* Broadcast to all nodes, inluding local node */
	tipc_bcbearer_xmit(net, &xmitq);
	tipc_sk_mcast_rcv(net, &rcvq, &inputq);
	__skb_queue_purge(list);
	return 0;
480
}
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608

/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
 *
 * RCU is locked, no other locks set
 */
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
{
	struct tipc_msg *hdr = buf_msg(skb);
	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
	struct sk_buff_head xmitq;
	int rc;

	__skb_queue_head_init(&xmitq);

	if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
		kfree_skb(skb);
		return 0;
	}

	tipc_bcast_lock(net);
	if (msg_user(hdr) == BCAST_PROTOCOL)
		rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
	else
		rc = tipc_link_rcv(l, skb, NULL);
	tipc_bcast_unlock(net);

	if (!skb_queue_empty(&xmitq))
		tipc_bcbearer_xmit(net, &xmitq);

	/* Any socket wakeup messages ? */
	if (!skb_queue_empty(inputq))
		tipc_sk_rcv(net, inputq);

	return rc;
}

/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
 *
 * RCU is locked, no other locks set
 */
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
{
	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
	struct sk_buff_head xmitq;

	__skb_queue_head_init(&xmitq);

	tipc_bcast_lock(net);
	tipc_link_bc_ack_rcv(l, acked, &xmitq);
	tipc_bcast_unlock(net);

	tipc_bcbearer_xmit(net, &xmitq);

	/* Any socket wakeup messages ? */
	if (!skb_queue_empty(inputq))
		tipc_sk_rcv(net, inputq);
}

/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
 *
 * RCU is locked, no other locks set
 */
void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
			 struct tipc_msg *hdr)
{
	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
	struct sk_buff_head xmitq;

	__skb_queue_head_init(&xmitq);

	tipc_bcast_lock(net);
	if (msg_type(hdr) == STATE_MSG) {
		tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
		tipc_link_bc_sync_rcv(l, hdr, &xmitq);
	} else {
		tipc_link_bc_init_rcv(l, hdr);
	}
	tipc_bcast_unlock(net);

	tipc_bcbearer_xmit(net, &xmitq);

	/* Any socket wakeup messages ? */
	if (!skb_queue_empty(inputq))
		tipc_sk_rcv(net, inputq);
}

/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
 *
 * RCU is locked, node lock is set
 */
void tipc_bcast_add_peer(struct net *net, u32 addr, struct tipc_link *uc_l,
			 struct sk_buff_head *xmitq)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *snd_l = tipc_bc_sndlink(net);

	tipc_bclink_lock(net);
	tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
	tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
	tipc_bclink_unlock(net);
}

/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
 *
 * RCU is locked, node lock is set
 */
void tipc_bcast_remove_peer(struct net *net, u32 addr,
			    struct tipc_link *rcv_l)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
	struct tipc_link *snd_l = tipc_bc_sndlink(net);
	struct sk_buff_head xmitq;

	__skb_queue_head_init(&xmitq);

	tipc_bclink_lock(net);
	tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
	tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
	tipc_bclink_unlock(net);

	tipc_bcbearer_xmit(net, &xmitq);

	/* Any socket wakeup messages ? */
	if (!skb_queue_empty(inputq))
		tipc_sk_rcv(net, inputq);
}

609
/**
610 611
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
612
 * Called with both sending node's lock and bclink_lock taken.
613 614 615
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
616 617
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

618 619 620
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
621
	tn->bcl->stats.recv_info++;
622 623 624 625 626

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
627
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
628
		tipc_link_proto_xmit(node_active_link(node, node->addr),
629
				     STATE_MSG, 0, 0, 0, 0);
630
		tn->bcl->stats.sent_acks++;
631 632 633
	}
}

634
/**
635
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
636
 *
Y
Ying Xue 已提交
637
 * RCU is locked, no other locks set
P
Per Liden 已提交
638
 */
639
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
640
{
641
	struct tipc_net *tn = net_generic(net, tipc_net_id);
642
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
643
	struct tipc_msg *msg = buf_msg(buf);
644
	struct tipc_node *node;
P
Per Liden 已提交
645 646
	u32 next_in;
	u32 seqno;
647
	int deferred = 0;
648 649
	int pos = 0;
	struct sk_buff *iskb;
650
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
651

652
	/* Screen out unwanted broadcast messages */
653
	if (msg_mc_netid(msg) != tn->net_id)
654 655
		goto exit;

656
	node = tipc_node_find(net, msg_prevnode(msg));
657 658 659
	if (unlikely(!node))
		goto exit;
	tipc_node_lock(node);
660
	if (unlikely(!node->bclink.recv_permitted))
661
		goto unlock;
P
Per Liden 已提交
662

663
	/* Handle broadcast protocol message */
P
Per Liden 已提交
664
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
665 666
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
667
		if (msg_destnode(msg) == tn->own_addr) {
668
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
669
			tipc_bclink_lock(net);
P
Per Liden 已提交
670
			bcl->stats.recv_nacks++;
671
			tn->bcbase->retransmit_to = node;
672
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
673
					      msg_bcgap_to(msg));
674
			tipc_bclink_unlock(net);
675
			tipc_node_unlock(node);
P
Per Liden 已提交
676
		} else {
677
			tipc_node_unlock(node);
678
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
679
		}
680
		tipc_node_put(node);
681
		goto exit;
P
Per Liden 已提交
682
	}
683
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
684
	seqno = msg_seqno(msg);
685
	next_in = mod(node->bclink.last_in + 1);
686 687
	arrvq = &tn->bcbase->arrvq;
	inputq = &tn->bcbase->inputq;
P
Per Liden 已提交
688 689

	if (likely(seqno == next_in)) {
690
receive:
691
		/* Deliver message to destination */
P
Per Liden 已提交
692
		if (likely(msg_isdata(msg))) {
693
			tipc_bclink_lock(net);
694
			bclink_accept_pkt(node, seqno);
695 696 697 698
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
699
			tipc_bclink_unlock(net);
700
			tipc_node_unlock(node);
P
Per Liden 已提交
701
		} else if (msg_user(msg) == MSG_BUNDLER) {
702
			tipc_bclink_lock(net);
703
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
704 705
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
706 707 708 709 710 711 712
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
713
			tipc_bclink_unlock(net);
714
			tipc_node_unlock(node);
P
Per Liden 已提交
715
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
716
			tipc_bclink_lock(net);
717
			bclink_accept_pkt(node, seqno);
718 719 720 721 722
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf)) {
				tipc_bclink_unlock(net);
				goto unlock;
			}
P
Per Liden 已提交
723
			bcl->stats.recv_fragments++;
724
			if (buf) {
P
Per Liden 已提交
725
				bcl->stats.recv_fragmented++;
726
				msg = buf_msg(buf);
727
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
728 729
				goto receive;
			}
730
			tipc_bclink_unlock(net);
731
			tipc_node_unlock(node);
P
Per Liden 已提交
732
		} else {
733
			tipc_bclink_lock(net);
734
			bclink_accept_pkt(node, seqno);
735
			tipc_bclink_unlock(net);
736
			tipc_node_unlock(node);
737
			kfree_skb(buf);
P
Per Liden 已提交
738
		}
739
		buf = NULL;
740 741

		/* Determine new synchronization state */
742
		tipc_node_lock(node);
743 744 745
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

746
		if (node->bclink.last_in == node->bclink.last_sent)
747 748
			goto unlock;

J
Jon Paul Maloy 已提交
749
		if (skb_queue_empty(&node->bclink.deferdq)) {
750 751 752 753
			node->bclink.oos_state = 1;
			goto unlock;
		}

J
Jon Paul Maloy 已提交
754
		msg = buf_msg(skb_peek(&node->bclink.deferdq));
755 756 757 758 759 760
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
J
Jon Paul Maloy 已提交
761
		buf = __skb_dequeue(&node->bclink.deferdq);
762 763 764 765 766
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
J
Jon Paul Maloy 已提交
767
		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
768
					       buf);
769
		bclink_update_last_sent(node, seqno);
770
		buf = NULL;
771
	}
772

773
	tipc_bclink_lock(net);
774

775 776
	if (deferred)
		bcl->stats.deferred_recv++;
777 778
	else
		bcl->stats.duplicates++;
779

780
	tipc_bclink_unlock(net);
781

782
unlock:
783
	tipc_node_unlock(node);
784
	tipc_node_put(node);
785
exit:
786
	kfree_skb(buf);
P
Per Liden 已提交
787 788
}

789
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
790
{
791
	return (n_ptr->bclink.recv_permitted &&
792
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
793 794 795 796
}


/**
797
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
798
 *
799 800
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
801
 *
802 803
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
804
 */
805 806
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
807
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
808 809
{
	int bp_index;
810
	struct tipc_msg *msg = buf_msg(buf);
811
	struct tipc_net *tn = net_generic(net, tipc_net_id);
812
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
813
	struct tipc_bc_base *bclink = tn->bcbase;
P
Per Liden 已提交
814

815
	/* Prepare broadcast link message for reliable transmission,
816 817 818 819
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
820
	if (likely(!msg_non_seq(buf_msg(buf)))) {
821
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
822
		msg_set_non_seq(msg, 1);
823
		msg_set_mc_netid(msg, tn->net_id);
824
		tn->bcl->stats.sent_info++;
825
		if (WARN_ON(!bclink->bcast_nodes.count)) {
826 827 828
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
829
	}
830
	msg_set_mc_netid(msg, tn->net_id);
P
Per Liden 已提交
831 832

	/* Send buffer over bearers until all targets reached */
833
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
834 835

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
836 837
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
838 839
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
840
		struct sk_buff *tbuf;
P
Per Liden 已提交
841 842

		if (!p)
843
			break; /* No more bearers to try */
844 845
		if (!b)
			b = p;
846
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
847
			       &bcbearer->remains_new);
848
		if (bcbearer->remains_new.count == bcbearer->remains.count)
849
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
850

851 852
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
853
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
854 855
		} else {
			/* Avoid concurrent buffer access */
856
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
857 858
			if (!tbuf)
				break;
859 860
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
861 862
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
863
		if (bcbearer->remains_new.count == 0)
864
			break; /* All targets reached */
P
Per Liden 已提交
865

866
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
867
	}
868

869
	return 0;
P
Per Liden 已提交
870 871
}

872 873 874 875 876 877 878 879 880 881 882 883 884
static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq)
{
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(xmitq, skb, tmp) {
		__skb_dequeue(xmitq);
		tipc_bcbearer_send(net, skb, NULL, NULL);

		/* Until we remove cloning in tipc_l2_send_msg(): */
		kfree_skb(skb);
	}
}

P
Per Liden 已提交
885
/**
886
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
887
 */
888 889
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
890
{
891
	struct tipc_net *tn = net_generic(net, tipc_net_id);
892
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
893 894
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
895
	struct tipc_bearer *b;
P
Per Liden 已提交
896 897 898
	int b_index;
	int pri;

899
	tipc_bclink_lock(net);
P
Per Liden 已提交
900

901 902 903 904 905
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
906 907 908
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
909
	rcu_read_lock();
P
Per Liden 已提交
910
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
911
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
912
		if (!b || !b->nodes.count)
P
Per Liden 已提交
913 914 915 916 917 918 919
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
920
	rcu_read_unlock();
P
Per Liden 已提交
921 922 923 924 925

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
926
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
927 928 929 930 931 932 933

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
934 935
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
936 937 938 939 940 941 942 943 944 945
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

946
	tipc_bclink_unlock(net);
P
Per Liden 已提交
947 948
}

949 950
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

1000
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1001 1002 1003 1004 1005
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
1006 1007
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
1008 1009 1010 1011

	if (!bcl)
		return 0;

1012
	tipc_bclink_lock(net);
1013

1014
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
1031
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1032
		goto attr_msg_full;
1033
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1034 1035 1036 1037 1038
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
1039
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1040 1041 1042 1043 1044 1045 1046
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

1047
	tipc_bclink_unlock(net);
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
1058
	tipc_bclink_unlock(net);
1059 1060 1061 1062
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
1063

1064
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
1065
{
1066 1067 1068
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
1069 1070 1071
	if (!bcl)
		return -ENOPROTOOPT;

1072
	tipc_bclink_lock(net);
P
Per Liden 已提交
1073
	memset(&bcl->stats, 0, sizeof(bcl->stats));
1074
	tipc_bclink_unlock(net);
1075
	return 0;
P
Per Liden 已提交
1076 1077
}

1078
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
1079
{
1080 1081 1082
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
1083 1084
	if (!bcl)
		return -ENOPROTOOPT;
1085 1086 1087
	if (limit < BCLINK_WIN_MIN)
		limit = BCLINK_WIN_MIN;
	if (limit > TIPC_MAX_LINK_WIN)
P
Per Liden 已提交
1088
		return -EINVAL;
1089
	tipc_bclink_lock(net);
1090
	tipc_link_set_queue_limits(bcl, limit);
1091
	tipc_bclink_unlock(net);
1092
	return 0;
P
Per Liden 已提交
1093 1094
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
{
	int err;
	u32 win;
	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

	if (!attrs[TIPC_NLA_LINK_PROP])
		return -EINVAL;

	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
	if (err)
		return err;

	if (!props[TIPC_NLA_PROP_WIN])
		return -EOPNOTSUPP;

	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);

	return tipc_bclink_set_queue_limits(net, win);
}

1116
int tipc_bcast_init(struct net *net)
P
Per Liden 已提交
1117
{
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	struct tipc_net *tn = tipc_net(net);
	struct tipc_bcbearer *bcb = NULL;
	struct tipc_bc_base *bb = NULL;
	struct tipc_link *l = NULL;

	bcb = kzalloc(sizeof(*bcb), GFP_ATOMIC);
	if (!bcb)
		goto enomem;
	tn->bcbearer = bcb;

	bcb->bearer.window = BCLINK_WIN_DEFAULT;
	bcb->bearer.mtu = MAX_PKT_DEFAULT_MCAST;
	bcb->bearer.identity = MAX_BEARERS;

	bcb->bearer.media = &bcb->media;
	bcb->media.send_msg = tipc_bcbearer_send;
	sprintf(bcb->media.name, "tipc-broadcast");
	strcpy(bcb->bearer.name, bcb->media.name);

	bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
	if (!bb)
		goto enomem;
	tn->bcbase = bb;
	__skb_queue_head_init(&bb->arrvq);
1142
	spin_lock_init(&tipc_net(net)->bclock);
1143 1144
	bb->node.net = net;

1145
	if (!tipc_link_bc_create(&bb->node, 0, 0,
1146 1147
				 MAX_PKT_DEFAULT_MCAST,
				 BCLINK_WIN_DEFAULT,
1148
				 0,
1149 1150
				 &bb->inputq,
				 &bb->namedq,
1151
				 NULL,
1152 1153 1154 1155 1156
				 &l))
		goto enomem;
	bb->link = l;
	tn->bcl = l;
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcb->bearer);
1157
	return 0;
1158 1159 1160 1161 1162
enomem:
	kfree(bcb);
	kfree(bb);
	kfree(l);
	return -ENOMEM;
P
Per Liden 已提交
1163 1164
}

1165 1166 1167 1168
void tipc_bcast_reinit(struct net *net)
{
	struct tipc_bc_base *b = tipc_bc_base(net);

1169
	msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
1170 1171
}

1172
void tipc_bcast_stop(struct net *net)
P
Per Liden 已提交
1173
{
1174 1175
	struct tipc_net *tn = net_generic(net, tipc_net_id);

1176 1177 1178
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
1179
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
1180
	synchronize_net();
1181
	kfree(tn->bcbearer);
1182
	kfree(tn->bcbase);
1183
	kfree(tn->bcl);
P
Per Liden 已提交
1184 1185
}

1186 1187 1188
/**
 * tipc_nmap_add - add a node to a node map
 */
1189
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
1204
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
1222 1223 1224
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}