bcast.c 30.2 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38
#include <linux/tipc_config.h>
39 40
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
41
#include "bcast.h"
42
#include "name_distr.h"
43 44
#include "link.h"
#include "node.h"
P
Per Liden 已提交
45

46
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
47 48
#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
P
Per Liden 已提交
49

50
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/**
 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
 * @primary: pointer to primary bearer
 * @secondary: pointer to secondary bearer
 *
 * Bearers must have same priority and same set of reachable destinations
 * to be paired.
 */

struct tipc_bcbearer_pair {
	struct tipc_bearer *primary;
	struct tipc_bearer *secondary;
};

#define	BCBEARER		MAX_BEARERS

/**
 * struct tipc_bcbearer - bearer used by broadcast link
 * @bearer: (non-standard) broadcast bearer structure
 * @media: (non-standard) broadcast media structure
 * @bpairs: array of bearer pairs
 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
 * @remains: temporary node map used by tipc_bcbearer_send()
 * @remains_new: temporary node map used tipc_bcbearer_send()
 *
 * Note: The fields labelled "temporary" are incorporated into the bearer
 * to avoid consuming potentially limited stack space through the use of
 * large local variables within multicast routines.  Concurrent access is
 * prevented through use of the spinlock "bcast_lock".
 */
struct tipc_bcbearer {
	struct tipc_bearer bearer;
	struct tipc_media media;
	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
	struct tipc_node_map remains;
	struct tipc_node_map remains_new;
};

/**
 * struct tipc_bc_base - link used for broadcast messages
 * @link: (non-standard) broadcast link structure
 * @node: (non-standard) node structure representing b'cast link's peer node
 * @bcast_nodes: map of broadcast-capable nodes
 * @retransmit_to: node that most recently requested a retransmit
 *
 * Handles sequence numbering, fragmentation, bundling, etc.
 */
struct tipc_bc_base {
101
	struct tipc_link *link;
102 103 104
	struct tipc_node node;
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
105
	struct sk_buff_head namedq;
106 107 108 109
	struct tipc_node_map bcast_nodes;
	struct tipc_node *retransmit_to;
};

110 111 112 113 114
static struct tipc_bc_base *tipc_bc_base(struct net *net)
{
	return tipc_net(net)->bcbase;
}

115 116 117 118 119
static struct tipc_link *tipc_bc_sndlink(struct net *net)
{
	return tipc_net(net)->bcl;
}

120 121 122 123 124 125 126 127 128
/**
 * tipc_nmap_equal - test for equality of node maps
 */
static int tipc_nmap_equal(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b)
{
	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}

129
static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq);
130 131 132
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
133 134
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
135
static void tipc_bclink_lock(struct net *net)
136
{
137
	tipc_bcast_lock(net);
138 139
}

140
static void tipc_bclink_unlock(struct net *net)
141
{
142
	tipc_bcast_unlock(net);
143 144
}

145 146 147 148
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

149
	tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
150 151
}

152
uint  tipc_bcast_get_mtu(void)
153 154 155 156
{
	return MAX_PKT_DEFAULT_MCAST;
}

157
static u16 bcbuf_acks(struct sk_buff *skb)
P
Per Liden 已提交
158
{
159
	return TIPC_SKB_CB(skb)->ackers;
P
Per Liden 已提交
160 161
}

162
static void bcbuf_set_acks(struct sk_buff *buf, u16 ackers)
P
Per Liden 已提交
163
{
164
	TIPC_SKB_CB(buf)->ackers = ackers;
P
Per Liden 已提交
165 166
}

S
Sam Ravnborg 已提交
167
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
168 169 170 171
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

172
void tipc_bclink_add_node(struct net *net, u32 addr)
173
{
174
	struct tipc_net *tn = net_generic(net, tipc_net_id);
175
	struct tipc_link *l = tipc_bc_sndlink(net);
176
	tipc_bclink_lock(net);
177
	tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
178
	tipc_link_add_bc_peer(l);
179
	tipc_bclink_unlock(net);
180 181
}

182
void tipc_bclink_remove_node(struct net *net, u32 addr)
183
{
184 185 186
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
187
	tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
188
	tn->bcl->ackers--;
189 190

	/* Last node? => reset backlog queue */
191
	if (!tn->bcbase->bcast_nodes.count)
192
		tipc_link_purge_backlog(tn->bcbase->link);
193

194
	tipc_bclink_unlock(net);
195
}
P
Per Liden 已提交
196

197
static void bclink_set_last_sent(struct net *net)
198
{
199 200 201
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

202
	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
203 204
}

205
u32 tipc_bclink_get_last_sent(struct net *net)
206
{
207 208
	struct tipc_net *tn = net_generic(net, tipc_net_id);

209
	return tn->bcl->silent_intv_cnt;
210 211
}

212
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
213
{
214 215
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
216 217
}

218
/**
219 220
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
221
 * Called with bclink_lock locked
222
 */
223
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
224
{
225 226
	struct tipc_net *tn = net_generic(net, tipc_net_id);

227
	return tn->bcbase->retransmit_to;
228 229
}

230
/**
P
Per Liden 已提交
231 232 233
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
234
 *
235
 * Called with bclink_lock locked
P
Per Liden 已提交
236
 */
237
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
238
{
239
	struct sk_buff *skb;
240
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
241

J
Jon Paul Maloy 已提交
242
	skb_queue_walk(&bcl->transmq, skb) {
243 244
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
245
			break;
246
		}
247
	}
P
Per Liden 已提交
248 249
}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
/**
 * bclink_prepare_wakeup - prepare users for wakeup after congestion
 * @bcl: broadcast link
 * @resultq: queue for users which can be woken up
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to specified queue for wakeup
 */
static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
{
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = bcl->window + bcl->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
			continue;
		skb_unlink(skb, &bcl->wakeupq);
		skb_queue_tail(resultq, skb);
	}
}

274 275 276 277 278
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
279
void tipc_bclink_wakeup_users(struct net *net)
280
{
281
	struct tipc_net *tn = net_generic(net, tipc_net_id);
282 283
	struct tipc_link *bcl = tn->bcl;
	struct sk_buff_head resultq;
284

285 286 287
	skb_queue_head_init(&resultq);
	bclink_prepare_wakeup(bcl, &resultq);
	tipc_sk_rcv(net, &resultq);
288 289
}

290
/**
291
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
292 293
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
294
 *
295
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
296
 */
297
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
298
{
299
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
300
	unsigned int released = 0;
301 302
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
303

304 305
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;
306
	tipc_bclink_lock(net);
307

308
	/* Bail out if tx queue is empty (no clean up is required) */
J
Jon Paul Maloy 已提交
309
	skb = skb_peek(&tn->bcl->transmq);
310
	if (!skb)
311 312 313 314 315 316 317 318 319
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
320
		if (tn->bcbase->bcast_nodes.count)
321
			acked = tn->bcl->silent_intv_cnt;
322
		else
323
			acked = tn->bcl->snd_nxt;
324 325 326 327 328
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
329
		if (less(acked, buf_seqno(skb)) ||
330
		    less(tn->bcl->silent_intv_cnt, acked) ||
331 332 333 334
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}
	/* Skip over packets that node has previously acknowledged */
J
Jon Paul Maloy 已提交
335
	skb_queue_walk(&tn->bcl->transmq, skb) {
336 337 338
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
339
	/* Update packets that node is now acknowledging */
J
Jon Paul Maloy 已提交
340
	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
341 342
		if (more(buf_seqno(skb), acked))
			break;
J
Jon Paul Maloy 已提交
343 344
		bcbuf_decr_acks(skb);
		bclink_set_last_sent(net);
345
		if (bcbuf_acks(skb) == 0) {
J
Jon Paul Maloy 已提交
346
			__skb_unlink(skb, &tn->bcl->transmq);
347
			kfree_skb(skb);
P
Per Liden 已提交
348 349 350 351 352 353
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
J
Jon Paul Maloy 已提交
354
	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
355 356
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
357
	}
358
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
359
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
360
exit:
361
	tipc_bclink_unlock(net);
P
Per Liden 已提交
362 363
}

364
/**
365
 * tipc_bclink_update_link_state - update broadcast link state
366
 *
Y
Ying Xue 已提交
367
 * RCU and node lock set
P
Per Liden 已提交
368
 */
369
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
370
				   u32 last_sent)
P
Per Liden 已提交
371
{
372
	struct sk_buff *buf;
373
	struct net *net = n_ptr->net;
374
	struct tipc_net *tn = net_generic(net, tipc_net_id);
375
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
376

377 378 379
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
380

381 382 383
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

384 385 386 387
	/* This is a good location for statistical profiling */
	bcl->stats.queue_sz_counts++;
	bcl->stats.accu_queue_sz += skb_queue_len(&bcl->transmq);

388 389 390 391 392 393 394 395 396 397 398 399
	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
400 401
		return;

402
	/* Send NACK */
403
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
404
	if (buf) {
405
		struct tipc_msg *msg = buf_msg(buf);
J
Jon Paul Maloy 已提交
406
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
407
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
408

409
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
410
			      INT_H_SIZE, n_ptr->addr);
411
		msg_set_non_seq(msg, 1);
412
		msg_set_mc_netid(msg, tn->net_id);
413 414
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
415
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
416

417
		tipc_bclink_lock(net);
418
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
419 420
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
421
		kfree_skb(buf);
P
Per Liden 已提交
422

423
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
424 425 426
	}
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
{
	u16 last = msg_last_bcast(hdr);
	int mtyp = msg_type(hdr);

	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
		return;
	if (mtyp == STATE_MSG) {
		tipc_bclink_update_link_state(n, last);
		return;
	}
	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
	 * and transfer synch info in LINK_PROTOCOL messages.
	 */
	if (tipc_node_is_up(n))
		return;
	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
		return;
	n->bclink.last_sent = last;
	n->bclink.last_in = last;
	n->bclink.oos_state = 0;
}

450
/**
451
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
452
 *
453 454
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
455
 */
456
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
457
{
458
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
459

460
	if (unlikely(!n_ptr))
P
Per Liden 已提交
461
		return;
462

463
	tipc_node_lock(n_ptr);
464
	if (n_ptr->bclink.recv_permitted &&
465 466 467
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;
468
	tipc_node_unlock(n_ptr);
469
	tipc_node_put(n_ptr);
P
Per Liden 已提交
470 471
}

472
/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
473
 *                    and to identified node local sockets
474
 * @net: the applicable net namespace
475
 * @list: chain of buffers containing message
476 477 478
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
479
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
480
{
481 482
	struct tipc_link *l = tipc_bc_sndlink(net);
	struct sk_buff_head xmitq, inputq, rcvq;
483 484
	int rc = 0;

485 486 487
	__skb_queue_head_init(&rcvq);
	__skb_queue_head_init(&xmitq);
	skb_queue_head_init(&inputq);
488

489 490 491
	/* Prepare message clone for local node */
	if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
		return -EHOSTUNREACH;
492

493 494 495 496 497
	tipc_bcast_lock(net);
	if (tipc_link_bc_peers(l))
		rc = tipc_link_xmit(l, list, &xmitq);
	bclink_set_last_sent(net);
	tipc_bcast_unlock(net);
498

499
	/* Don't send to local node if adding to link failed */
500
	if (unlikely(rc)) {
501
		__skb_queue_purge(&rcvq);
502 503
		return rc;
	}
504 505 506 507 508
	/* Broadcast to all nodes, inluding local node */
	tipc_bcbearer_xmit(net, &xmitq);
	tipc_sk_mcast_rcv(net, &rcvq, &inputq);
	__skb_queue_purge(list);
	return 0;
509
}
510
/**
511 512
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
513
 * Called with both sending node's lock and bclink_lock taken.
514 515 516
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
517 518
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

519 520 521
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
522
	tn->bcl->stats.recv_info++;
523 524 525 526 527

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
528
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
529
		tipc_link_proto_xmit(node_active_link(node, node->addr),
530
				     STATE_MSG, 0, 0, 0, 0);
531
		tn->bcl->stats.sent_acks++;
532 533 534
	}
}

535
/**
536
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
537
 *
Y
Ying Xue 已提交
538
 * RCU is locked, no other locks set
P
Per Liden 已提交
539
 */
540
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
541
{
542
	struct tipc_net *tn = net_generic(net, tipc_net_id);
543
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
544
	struct tipc_msg *msg = buf_msg(buf);
545
	struct tipc_node *node;
P
Per Liden 已提交
546 547
	u32 next_in;
	u32 seqno;
548
	int deferred = 0;
549 550
	int pos = 0;
	struct sk_buff *iskb;
551
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
552

553
	/* Screen out unwanted broadcast messages */
554
	if (msg_mc_netid(msg) != tn->net_id)
555 556
		goto exit;

557
	node = tipc_node_find(net, msg_prevnode(msg));
558 559 560
	if (unlikely(!node))
		goto exit;
	tipc_node_lock(node);
561
	if (unlikely(!node->bclink.recv_permitted))
562
		goto unlock;
P
Per Liden 已提交
563

564
	/* Handle broadcast protocol message */
P
Per Liden 已提交
565
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
566 567
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
568
		if (msg_destnode(msg) == tn->own_addr) {
569
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
570
			tipc_bclink_lock(net);
P
Per Liden 已提交
571
			bcl->stats.recv_nacks++;
572
			tn->bcbase->retransmit_to = node;
573
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
574
					      msg_bcgap_to(msg));
575
			tipc_bclink_unlock(net);
576
			tipc_node_unlock(node);
P
Per Liden 已提交
577
		} else {
578
			tipc_node_unlock(node);
579
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
580
		}
581
		tipc_node_put(node);
582
		goto exit;
P
Per Liden 已提交
583
	}
584
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
585
	seqno = msg_seqno(msg);
586
	next_in = mod(node->bclink.last_in + 1);
587 588
	arrvq = &tn->bcbase->arrvq;
	inputq = &tn->bcbase->inputq;
P
Per Liden 已提交
589 590

	if (likely(seqno == next_in)) {
591
receive:
592
		/* Deliver message to destination */
P
Per Liden 已提交
593
		if (likely(msg_isdata(msg))) {
594
			tipc_bclink_lock(net);
595
			bclink_accept_pkt(node, seqno);
596 597 598 599
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
600
			tipc_bclink_unlock(net);
601
			tipc_node_unlock(node);
P
Per Liden 已提交
602
		} else if (msg_user(msg) == MSG_BUNDLER) {
603
			tipc_bclink_lock(net);
604
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
605 606
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
607 608 609 610 611 612 613
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
614
			tipc_bclink_unlock(net);
615
			tipc_node_unlock(node);
P
Per Liden 已提交
616
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
617
			tipc_bclink_lock(net);
618
			bclink_accept_pkt(node, seqno);
619 620 621 622 623
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf)) {
				tipc_bclink_unlock(net);
				goto unlock;
			}
P
Per Liden 已提交
624
			bcl->stats.recv_fragments++;
625
			if (buf) {
P
Per Liden 已提交
626
				bcl->stats.recv_fragmented++;
627
				msg = buf_msg(buf);
628
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
629 630
				goto receive;
			}
631
			tipc_bclink_unlock(net);
632
			tipc_node_unlock(node);
P
Per Liden 已提交
633
		} else {
634
			tipc_bclink_lock(net);
635
			bclink_accept_pkt(node, seqno);
636
			tipc_bclink_unlock(net);
637
			tipc_node_unlock(node);
638
			kfree_skb(buf);
P
Per Liden 已提交
639
		}
640
		buf = NULL;
641 642

		/* Determine new synchronization state */
643
		tipc_node_lock(node);
644 645 646
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

647
		if (node->bclink.last_in == node->bclink.last_sent)
648 649
			goto unlock;

J
Jon Paul Maloy 已提交
650
		if (skb_queue_empty(&node->bclink.deferdq)) {
651 652 653 654
			node->bclink.oos_state = 1;
			goto unlock;
		}

J
Jon Paul Maloy 已提交
655
		msg = buf_msg(skb_peek(&node->bclink.deferdq));
656 657 658 659 660 661
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
J
Jon Paul Maloy 已提交
662
		buf = __skb_dequeue(&node->bclink.deferdq);
663 664 665 666 667
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
J
Jon Paul Maloy 已提交
668
		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
669
					       buf);
670
		bclink_update_last_sent(node, seqno);
671
		buf = NULL;
672
	}
673

674
	tipc_bclink_lock(net);
675

676 677
	if (deferred)
		bcl->stats.deferred_recv++;
678 679
	else
		bcl->stats.duplicates++;
680

681
	tipc_bclink_unlock(net);
682

683
unlock:
684
	tipc_node_unlock(node);
685
	tipc_node_put(node);
686
exit:
687
	kfree_skb(buf);
P
Per Liden 已提交
688 689
}

690
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
691
{
692
	return (n_ptr->bclink.recv_permitted &&
693
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
694 695 696 697
}


/**
698
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
699
 *
700 701
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
702
 *
703 704
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
705
 */
706 707
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
708
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
709 710
{
	int bp_index;
711
	struct tipc_msg *msg = buf_msg(buf);
712
	struct tipc_net *tn = net_generic(net, tipc_net_id);
713
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
714
	struct tipc_bc_base *bclink = tn->bcbase;
P
Per Liden 已提交
715

716
	/* Prepare broadcast link message for reliable transmission,
717 718 719 720
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
721
	if (likely(!msg_non_seq(buf_msg(buf)))) {
722
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
723
		msg_set_non_seq(msg, 1);
724
		msg_set_mc_netid(msg, tn->net_id);
725
		tn->bcl->stats.sent_info++;
726
		if (WARN_ON(!bclink->bcast_nodes.count)) {
727 728 729
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
730 731 732
	}

	/* Send buffer over bearers until all targets reached */
733
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
734 735

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
736 737
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
738 739
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
740
		struct sk_buff *tbuf;
P
Per Liden 已提交
741 742

		if (!p)
743
			break; /* No more bearers to try */
744 745
		if (!b)
			b = p;
746
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
747
			       &bcbearer->remains_new);
748
		if (bcbearer->remains_new.count == bcbearer->remains.count)
749
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
750

751 752
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
753
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
754 755
		} else {
			/* Avoid concurrent buffer access */
756
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
757 758
			if (!tbuf)
				break;
759 760
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
761 762
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
763
		if (bcbearer->remains_new.count == 0)
764
			break; /* All targets reached */
P
Per Liden 已提交
765

766
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
767
	}
768

769
	return 0;
P
Per Liden 已提交
770 771
}

772 773 774 775 776 777 778 779 780 781 782 783 784
static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq)
{
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(xmitq, skb, tmp) {
		__skb_dequeue(xmitq);
		tipc_bcbearer_send(net, skb, NULL, NULL);

		/* Until we remove cloning in tipc_l2_send_msg(): */
		kfree_skb(skb);
	}
}

P
Per Liden 已提交
785
/**
786
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
787
 */
788 789
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
790
{
791
	struct tipc_net *tn = net_generic(net, tipc_net_id);
792
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
793 794
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
795
	struct tipc_bearer *b;
P
Per Liden 已提交
796 797 798
	int b_index;
	int pri;

799
	tipc_bclink_lock(net);
P
Per Liden 已提交
800

801 802 803 804 805
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
806 807 808
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
809
	rcu_read_lock();
P
Per Liden 已提交
810
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
811
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
812
		if (!b || !b->nodes.count)
P
Per Liden 已提交
813 814 815 816 817 818 819
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
820
	rcu_read_unlock();
P
Per Liden 已提交
821 822 823 824 825

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
826
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
827 828 829 830 831 832 833

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
834 835
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
836 837 838 839 840 841 842 843 844 845
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

846
	tipc_bclink_unlock(net);
P
Per Liden 已提交
847 848
}

849 850
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

900
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
901 902 903 904 905
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
906 907
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
908 909 910 911

	if (!bcl)
		return 0;

912
	tipc_bclink_lock(net);
913

914
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
931
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
932
		goto attr_msg_full;
933
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
934 935 936 937 938
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
939
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
940 941 942 943 944 945 946
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

947
	tipc_bclink_unlock(net);
948 949 950 951 952 953 954 955 956 957
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
958
	tipc_bclink_unlock(net);
959 960 961 962
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
963

964
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
965
{
966 967 968
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
969 970 971
	if (!bcl)
		return -ENOPROTOOPT;

972
	tipc_bclink_lock(net);
P
Per Liden 已提交
973
	memset(&bcl->stats, 0, sizeof(bcl->stats));
974
	tipc_bclink_unlock(net);
975
	return 0;
P
Per Liden 已提交
976 977
}

978
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
979
{
980 981 982
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
983 984
	if (!bcl)
		return -ENOPROTOOPT;
985 986 987
	if (limit < BCLINK_WIN_MIN)
		limit = BCLINK_WIN_MIN;
	if (limit > TIPC_MAX_LINK_WIN)
P
Per Liden 已提交
988
		return -EINVAL;
989
	tipc_bclink_lock(net);
990
	tipc_link_set_queue_limits(bcl, limit);
991
	tipc_bclink_unlock(net);
992
	return 0;
P
Per Liden 已提交
993 994
}

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
{
	int err;
	u32 win;
	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

	if (!attrs[TIPC_NLA_LINK_PROP])
		return -EINVAL;

	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
	if (err)
		return err;

	if (!props[TIPC_NLA_PROP_WIN])
		return -EOPNOTSUPP;

	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);

	return tipc_bclink_set_queue_limits(net, win);
}

1016
int tipc_bcast_init(struct net *net)
P
Per Liden 已提交
1017
{
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	struct tipc_net *tn = tipc_net(net);
	struct tipc_bcbearer *bcb = NULL;
	struct tipc_bc_base *bb = NULL;
	struct tipc_link *l = NULL;

	bcb = kzalloc(sizeof(*bcb), GFP_ATOMIC);
	if (!bcb)
		goto enomem;
	tn->bcbearer = bcb;

	bcb->bearer.window = BCLINK_WIN_DEFAULT;
	bcb->bearer.mtu = MAX_PKT_DEFAULT_MCAST;
	bcb->bearer.identity = MAX_BEARERS;

	bcb->bearer.media = &bcb->media;
	bcb->media.send_msg = tipc_bcbearer_send;
	sprintf(bcb->media.name, "tipc-broadcast");
	strcpy(bcb->bearer.name, bcb->media.name);

	bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
	if (!bb)
		goto enomem;
	tn->bcbase = bb;
	__skb_queue_head_init(&bb->arrvq);
1042
	spin_lock_init(&tipc_net(net)->bclock);
1043 1044 1045 1046 1047
	bb->node.net = net;

	if (!tipc_link_bc_create(&bb->node,
				 MAX_PKT_DEFAULT_MCAST,
				 BCLINK_WIN_DEFAULT,
1048
				 0,
1049 1050 1051 1052 1053 1054 1055
				 &bb->inputq,
				 &bb->namedq,
				 &l))
		goto enomem;
	bb->link = l;
	tn->bcl = l;
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcb->bearer);
1056
	return 0;
1057 1058 1059 1060 1061
enomem:
	kfree(bcb);
	kfree(bb);
	kfree(l);
	return -ENOMEM;
P
Per Liden 已提交
1062 1063
}

1064 1065 1066 1067
void tipc_bcast_reinit(struct net *net)
{
	struct tipc_bc_base *b = tipc_bc_base(net);

1068
	msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
1069 1070
}

1071
void tipc_bcast_stop(struct net *net)
P
Per Liden 已提交
1072
{
1073 1074
	struct tipc_net *tn = net_generic(net, tipc_net_id);

1075 1076 1077
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
1078
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
1079
	synchronize_net();
1080
	kfree(tn->bcbearer);
1081
	kfree(tn->bcbase);
1082
	kfree(tn->bcl);
P
Per Liden 已提交
1083 1084
}

1085 1086 1087
/**
 * tipc_nmap_add - add a node to a node map
 */
1088
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
1103
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
1121 1122 1123
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}