bcast.c 29.8 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38
#include <linux/tipc_config.h>
39 40
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
41
#include "bcast.h"
42
#include "name_distr.h"
43 44
#include "link.h"
#include "node.h"
P
Per Liden 已提交
45

46
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
47 48
#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
P
Per Liden 已提交
49

50
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
51

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/**
 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
 * @primary: pointer to primary bearer
 * @secondary: pointer to secondary bearer
 *
 * Bearers must have same priority and same set of reachable destinations
 * to be paired.
 */

struct tipc_bcbearer_pair {
	struct tipc_bearer *primary;
	struct tipc_bearer *secondary;
};

#define	BCBEARER		MAX_BEARERS

/**
 * struct tipc_bcbearer - bearer used by broadcast link
 * @bearer: (non-standard) broadcast bearer structure
 * @media: (non-standard) broadcast media structure
 * @bpairs: array of bearer pairs
 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
 * @remains: temporary node map used by tipc_bcbearer_send()
 * @remains_new: temporary node map used tipc_bcbearer_send()
 *
 * Note: The fields labelled "temporary" are incorporated into the bearer
 * to avoid consuming potentially limited stack space through the use of
 * large local variables within multicast routines.  Concurrent access is
 * prevented through use of the spinlock "bcast_lock".
 */
struct tipc_bcbearer {
	struct tipc_bearer bearer;
	struct tipc_media media;
	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
	struct tipc_node_map remains;
	struct tipc_node_map remains_new;
};

/**
 * struct tipc_bc_base - link used for broadcast messages
 * @link: (non-standard) broadcast link structure
 * @node: (non-standard) node structure representing b'cast link's peer node
 * @bcast_nodes: map of broadcast-capable nodes
 * @retransmit_to: node that most recently requested a retransmit
 *
 * Handles sequence numbering, fragmentation, bundling, etc.
 */
struct tipc_bc_base {
	struct tipc_link link;
	struct tipc_node node;
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
	struct tipc_node_map bcast_nodes;
	struct tipc_node *retransmit_to;
};

/**
 * tipc_nmap_equal - test for equality of node maps
 */
static int tipc_nmap_equal(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b)
{
	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
}

118 119 120
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
121 122
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
123
static void tipc_bclink_lock(struct net *net)
124
{
125
	tipc_bcast_lock(net);
126 127
}

128
static void tipc_bclink_unlock(struct net *net)
129
{
130
	tipc_bcast_unlock(net);
131 132
}

133 134 135 136
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

137
	tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
138 139
}

140
uint  tipc_bcast_get_mtu(void)
141 142 143 144
{
	return MAX_PKT_DEFAULT_MCAST;
}

S
Sam Ravnborg 已提交
145
static u32 bcbuf_acks(struct sk_buff *buf)
P
Per Liden 已提交
146
{
147
	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
P
Per Liden 已提交
148 149
}

S
Sam Ravnborg 已提交
150
static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
P
Per Liden 已提交
151
{
152
	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
P
Per Liden 已提交
153 154
}

S
Sam Ravnborg 已提交
155
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
156 157 158 159
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

160
void tipc_bclink_add_node(struct net *net, u32 addr)
161
{
162 163 164
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
165
	tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
166
	tipc_bclink_unlock(net);
167 168
}

169
void tipc_bclink_remove_node(struct net *net, u32 addr)
170
{
171 172 173
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
174
	tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
175 176

	/* Last node? => reset backlog queue */
177 178
	if (!tn->bcbase->bcast_nodes.count)
		tipc_link_purge_backlog(&tn->bcbase->link);
179

180
	tipc_bclink_unlock(net);
181
}
P
Per Liden 已提交
182

183
static void bclink_set_last_sent(struct net *net)
184
{
185 186 187
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

188
	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
189 190
}

191
u32 tipc_bclink_get_last_sent(struct net *net)
192
{
193 194
	struct tipc_net *tn = net_generic(net, tipc_net_id);

195
	return tn->bcl->silent_intv_cnt;
196 197
}

198
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
199
{
200 201
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
202 203
}

204
/**
205 206
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
207
 * Called with bclink_lock locked
208
 */
209
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
210
{
211 212
	struct tipc_net *tn = net_generic(net, tipc_net_id);

213
	return tn->bcbase->retransmit_to;
214 215
}

216
/**
P
Per Liden 已提交
217 218 219
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
220
 *
221
 * Called with bclink_lock locked
P
Per Liden 已提交
222
 */
223
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
224
{
225
	struct sk_buff *skb;
226
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
227

J
Jon Paul Maloy 已提交
228
	skb_queue_walk(&bcl->transmq, skb) {
229 230
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
231
			break;
232
		}
233
	}
P
Per Liden 已提交
234 235
}

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
/**
 * bclink_prepare_wakeup - prepare users for wakeup after congestion
 * @bcl: broadcast link
 * @resultq: queue for users which can be woken up
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to specified queue for wakeup
 */
static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
{
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = bcl->window + bcl->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
			continue;
		skb_unlink(skb, &bcl->wakeupq);
		skb_queue_tail(resultq, skb);
	}
}

260 261 262 263 264
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
265
void tipc_bclink_wakeup_users(struct net *net)
266
{
267
	struct tipc_net *tn = net_generic(net, tipc_net_id);
268 269
	struct tipc_link *bcl = tn->bcl;
	struct sk_buff_head resultq;
270

271 272 273
	skb_queue_head_init(&resultq);
	bclink_prepare_wakeup(bcl, &resultq);
	tipc_sk_rcv(net, &resultq);
274 275
}

276
/**
277
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
278 279
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
280
 *
281
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
282
 */
283
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
284
{
285
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
286
	unsigned int released = 0;
287 288
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
289

290 291 292
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;

293
	tipc_bclink_lock(net);
294

295
	/* Bail out if tx queue is empty (no clean up is required) */
J
Jon Paul Maloy 已提交
296
	skb = skb_peek(&tn->bcl->transmq);
297
	if (!skb)
298 299 300 301 302 303 304 305 306
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
307
		if (tn->bcbase->bcast_nodes.count)
308
			acked = tn->bcl->silent_intv_cnt;
309
		else
310
			acked = tn->bcl->snd_nxt;
311 312 313 314 315
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
316
		if (less(acked, buf_seqno(skb)) ||
317
		    less(tn->bcl->silent_intv_cnt, acked) ||
318 319 320 321 322
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
J
Jon Paul Maloy 已提交
323
	skb_queue_walk(&tn->bcl->transmq, skb) {
324 325 326
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
327 328

	/* Update packets that node is now acknowledging */
J
Jon Paul Maloy 已提交
329
	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
330 331
		if (more(buf_seqno(skb), acked))
			break;
J
Jon Paul Maloy 已提交
332 333
		bcbuf_decr_acks(skb);
		bclink_set_last_sent(net);
334
		if (bcbuf_acks(skb) == 0) {
J
Jon Paul Maloy 已提交
335
			__skb_unlink(skb, &tn->bcl->transmq);
336
			kfree_skb(skb);
P
Per Liden 已提交
337 338 339 340 341 342
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
J
Jon Paul Maloy 已提交
343
	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
344 345
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
346
	}
347
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
348
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
349
exit:
350
	tipc_bclink_unlock(net);
P
Per Liden 已提交
351 352
}

353
/**
354
 * tipc_bclink_update_link_state - update broadcast link state
355
 *
Y
Ying Xue 已提交
356
 * RCU and node lock set
P
Per Liden 已提交
357
 */
358
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
359
				   u32 last_sent)
P
Per Liden 已提交
360
{
361
	struct sk_buff *buf;
362
	struct net *net = n_ptr->net;
363
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
364

365 366 367
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
368

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
384 385
		return;

386
	/* Send NACK */
387
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
388
	if (buf) {
389
		struct tipc_msg *msg = buf_msg(buf);
J
Jon Paul Maloy 已提交
390
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
391
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
392

393
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
394
			      INT_H_SIZE, n_ptr->addr);
395
		msg_set_non_seq(msg, 1);
396
		msg_set_mc_netid(msg, tn->net_id);
397 398
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
399
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
400

401
		tipc_bclink_lock(net);
402
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
403 404
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
405
		kfree_skb(buf);
P
Per Liden 已提交
406

407
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
408 409 410
	}
}

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
{
	u16 last = msg_last_bcast(hdr);
	int mtyp = msg_type(hdr);

	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
		return;
	if (mtyp == STATE_MSG) {
		tipc_bclink_update_link_state(n, last);
		return;
	}
	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
	 * and transfer synch info in LINK_PROTOCOL messages.
	 */
	if (tipc_node_is_up(n))
		return;
	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
		return;
	n->bclink.last_sent = last;
	n->bclink.last_in = last;
	n->bclink.oos_state = 0;
}

434
/**
435
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
436
 *
437 438
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
439
 */
440
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
441
{
442
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
443

444
	if (unlikely(!n_ptr))
P
Per Liden 已提交
445
		return;
446

447
	tipc_node_lock(n_ptr);
448
	if (n_ptr->bclink.recv_permitted &&
449 450 451
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;
452
	tipc_node_unlock(n_ptr);
453
	tipc_node_put(n_ptr);
P
Per Liden 已提交
454 455
}

456
/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
457
 *                    and to identified node local sockets
458
 * @net: the applicable net namespace
459
 * @list: chain of buffers containing message
460 461 462
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
463
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
464
{
465 466
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
467
	struct tipc_bc_base *bclink = tn->bcbase;
468 469
	int rc = 0;
	int bc = 0;
470
	struct sk_buff *skb;
471 472
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
473 474

	/* Prepare clone of message for local node */
475
	skb = tipc_msg_reassemble(list);
476
	if (unlikely(!skb))
477
		return -EHOSTUNREACH;
478

479
	/* Broadcast to all nodes */
480
	if (likely(bclink)) {
481
		tipc_bclink_lock(net);
482
		if (likely(bclink->bcast_nodes.count)) {
483
			rc = __tipc_link_xmit(net, bcl, list);
484
			if (likely(!rc)) {
J
Jon Paul Maloy 已提交
485
				u32 len = skb_queue_len(&bcl->transmq);
486

487
				bclink_set_last_sent(net);
488
				bcl->stats.queue_sz_counts++;
489
				bcl->stats.accu_queue_sz += len;
490 491 492
			}
			bc = 1;
		}
493
		tipc_bclink_unlock(net);
494 495 496
	}

	if (unlikely(!bc))
497
		__skb_queue_purge(list);
498

499
	if (unlikely(rc)) {
500
		kfree_skb(skb);
501 502 503 504 505 506 507
		return rc;
	}
	/* Deliver message clone */
	__skb_queue_head_init(&arrvq);
	skb_queue_head_init(&inputq);
	__skb_queue_tail(&arrvq, skb);
	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
508 509 510
	return rc;
}

511
/**
512 513
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
514
 * Called with both sending node's lock and bclink_lock taken.
515 516 517
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
518 519
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

520 521 522
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
523
	tn->bcl->stats.recv_info++;
524 525 526 527 528

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
529
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
530
		tipc_link_proto_xmit(node_active_link(node, node->addr),
531
				     STATE_MSG, 0, 0, 0, 0);
532
		tn->bcl->stats.sent_acks++;
533 534 535
	}
}

536
/**
537
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
538
 *
Y
Ying Xue 已提交
539
 * RCU is locked, no other locks set
P
Per Liden 已提交
540
 */
541
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
542
{
543
	struct tipc_net *tn = net_generic(net, tipc_net_id);
544
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
545
	struct tipc_msg *msg = buf_msg(buf);
546
	struct tipc_node *node;
P
Per Liden 已提交
547 548
	u32 next_in;
	u32 seqno;
549
	int deferred = 0;
550 551
	int pos = 0;
	struct sk_buff *iskb;
552
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
553

554
	/* Screen out unwanted broadcast messages */
555
	if (msg_mc_netid(msg) != tn->net_id)
556 557
		goto exit;

558
	node = tipc_node_find(net, msg_prevnode(msg));
559 560 561 562
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
563
	if (unlikely(!node->bclink.recv_permitted))
564
		goto unlock;
P
Per Liden 已提交
565

566
	/* Handle broadcast protocol message */
P
Per Liden 已提交
567
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
568 569
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
570
		if (msg_destnode(msg) == tn->own_addr) {
571
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
572
			tipc_bclink_lock(net);
P
Per Liden 已提交
573
			bcl->stats.recv_nacks++;
574
			tn->bcbase->retransmit_to = node;
575
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
576
					      msg_bcgap_to(msg));
577
			tipc_bclink_unlock(net);
578
			tipc_node_unlock(node);
P
Per Liden 已提交
579
		} else {
580
			tipc_node_unlock(node);
581
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
582
		}
583
		tipc_node_put(node);
584
		goto exit;
P
Per Liden 已提交
585 586
	}

587
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
588
	seqno = msg_seqno(msg);
589
	next_in = mod(node->bclink.last_in + 1);
590 591
	arrvq = &tn->bcbase->arrvq;
	inputq = &tn->bcbase->inputq;
P
Per Liden 已提交
592 593

	if (likely(seqno == next_in)) {
594
receive:
595
		/* Deliver message to destination */
P
Per Liden 已提交
596
		if (likely(msg_isdata(msg))) {
597
			tipc_bclink_lock(net);
598
			bclink_accept_pkt(node, seqno);
599 600 601 602
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
603
			tipc_bclink_unlock(net);
604
			tipc_node_unlock(node);
P
Per Liden 已提交
605
		} else if (msg_user(msg) == MSG_BUNDLER) {
606
			tipc_bclink_lock(net);
607
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
608 609
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
610 611 612 613 614 615 616
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
617
			tipc_bclink_unlock(net);
618
			tipc_node_unlock(node);
P
Per Liden 已提交
619
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
620
			tipc_bclink_lock(net);
621
			bclink_accept_pkt(node, seqno);
622 623 624 625 626
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf)) {
				tipc_bclink_unlock(net);
				goto unlock;
			}
P
Per Liden 已提交
627
			bcl->stats.recv_fragments++;
628
			if (buf) {
P
Per Liden 已提交
629
				bcl->stats.recv_fragmented++;
630
				msg = buf_msg(buf);
631
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
632 633
				goto receive;
			}
634
			tipc_bclink_unlock(net);
635
			tipc_node_unlock(node);
P
Per Liden 已提交
636
		} else {
637
			tipc_bclink_lock(net);
638
			bclink_accept_pkt(node, seqno);
639
			tipc_bclink_unlock(net);
640
			tipc_node_unlock(node);
641
			kfree_skb(buf);
P
Per Liden 已提交
642
		}
643
		buf = NULL;
644 645

		/* Determine new synchronization state */
646
		tipc_node_lock(node);
647 648 649
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

650
		if (node->bclink.last_in == node->bclink.last_sent)
651 652
			goto unlock;

J
Jon Paul Maloy 已提交
653
		if (skb_queue_empty(&node->bclink.deferdq)) {
654 655 656 657
			node->bclink.oos_state = 1;
			goto unlock;
		}

J
Jon Paul Maloy 已提交
658
		msg = buf_msg(skb_peek(&node->bclink.deferdq));
659 660 661 662 663 664
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
J
Jon Paul Maloy 已提交
665
		buf = __skb_dequeue(&node->bclink.deferdq);
666 667 668 669 670
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
J
Jon Paul Maloy 已提交
671
		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
672
					       buf);
673
		bclink_update_last_sent(node, seqno);
674
		buf = NULL;
675
	}
676

677
	tipc_bclink_lock(net);
678

679 680
	if (deferred)
		bcl->stats.deferred_recv++;
681 682
	else
		bcl->stats.duplicates++;
683

684
	tipc_bclink_unlock(net);
685

686
unlock:
687
	tipc_node_unlock(node);
688
	tipc_node_put(node);
689
exit:
690
	kfree_skb(buf);
P
Per Liden 已提交
691 692
}

693
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
694
{
695
	return (n_ptr->bclink.recv_permitted &&
696
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
697 698 699 700
}


/**
701
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
702
 *
703 704
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
705
 *
706 707
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
708
 */
709 710
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
711
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
712 713
{
	int bp_index;
714
	struct tipc_msg *msg = buf_msg(buf);
715
	struct tipc_net *tn = net_generic(net, tipc_net_id);
716
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
717
	struct tipc_bc_base *bclink = tn->bcbase;
P
Per Liden 已提交
718

719
	/* Prepare broadcast link message for reliable transmission,
720 721 722 723
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
724
	if (likely(!msg_non_seq(buf_msg(buf)))) {
725
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
726
		msg_set_non_seq(msg, 1);
727
		msg_set_mc_netid(msg, tn->net_id);
728
		tn->bcl->stats.sent_info++;
729
		if (WARN_ON(!bclink->bcast_nodes.count)) {
730 731 732
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
733 734 735
	}

	/* Send buffer over bearers until all targets reached */
736
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
737 738

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
739 740
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
741 742
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
743
		struct sk_buff *tbuf;
P
Per Liden 已提交
744 745

		if (!p)
746
			break; /* No more bearers to try */
747 748
		if (!b)
			b = p;
749
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
750
			       &bcbearer->remains_new);
751
		if (bcbearer->remains_new.count == bcbearer->remains.count)
752
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
753

754 755
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
756
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
757 758
		} else {
			/* Avoid concurrent buffer access */
759
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
760 761
			if (!tbuf)
				break;
762 763
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
764 765
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
766
		if (bcbearer->remains_new.count == 0)
767
			break; /* All targets reached */
P
Per Liden 已提交
768

769
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
770
	}
771

772
	return 0;
P
Per Liden 已提交
773 774 775
}

/**
776
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
777
 */
778 779
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
780
{
781
	struct tipc_net *tn = net_generic(net, tipc_net_id);
782
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
783 784
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
785
	struct tipc_bearer *b;
P
Per Liden 已提交
786 787 788
	int b_index;
	int pri;

789
	tipc_bclink_lock(net);
P
Per Liden 已提交
790

791 792 793 794 795
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
796 797 798
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
799
	rcu_read_lock();
P
Per Liden 已提交
800
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
801
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
802
		if (!b || !b->nodes.count)
P
Per Liden 已提交
803 804 805 806 807 808 809
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
810
	rcu_read_unlock();
P
Per Liden 已提交
811 812 813 814 815

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
816
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
817 818 819 820 821 822 823

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
824 825
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
826 827 828 829 830 831 832 833 834 835
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

836
	tipc_bclink_unlock(net);
P
Per Liden 已提交
837 838
}

839 840
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

890
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
891 892 893 894 895
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
896 897
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
898 899 900 901

	if (!bcl)
		return 0;

902
	tipc_bclink_lock(net);
903

904
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
921
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
922
		goto attr_msg_full;
923
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
924 925 926 927 928
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
929
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
930 931 932 933 934 935 936
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

937
	tipc_bclink_unlock(net);
938 939 940 941 942 943 944 945 946 947
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
948
	tipc_bclink_unlock(net);
949 950 951 952
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
953

954
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
955
{
956 957 958
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
959 960 961
	if (!bcl)
		return -ENOPROTOOPT;

962
	tipc_bclink_lock(net);
P
Per Liden 已提交
963
	memset(&bcl->stats, 0, sizeof(bcl->stats));
964
	tipc_bclink_unlock(net);
965
	return 0;
P
Per Liden 已提交
966 967
}

968
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
969
{
970 971 972
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
973 974
	if (!bcl)
		return -ENOPROTOOPT;
975 976 977
	if (limit < BCLINK_WIN_MIN)
		limit = BCLINK_WIN_MIN;
	if (limit > TIPC_MAX_LINK_WIN)
P
Per Liden 已提交
978
		return -EINVAL;
979
	tipc_bclink_lock(net);
980
	tipc_link_set_queue_limits(bcl, limit);
981
	tipc_bclink_unlock(net);
982
	return 0;
P
Per Liden 已提交
983 984
}

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
{
	int err;
	u32 win;
	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

	if (!attrs[TIPC_NLA_LINK_PROP])
		return -EINVAL;

	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
	if (err)
		return err;

	if (!props[TIPC_NLA_PROP_WIN])
		return -EOPNOTSUPP;

	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);

	return tipc_bclink_set_queue_limits(net, win);
}

1006
int tipc_bcast_init(struct net *net)
P
Per Liden 已提交
1007
{
1008
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1009
	struct tipc_bcbearer *bcbearer;
1010
	struct tipc_bc_base *bclink;
1011
	struct tipc_link *bcl;
1012

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
	if (!bcbearer)
		return -ENOMEM;

	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
	if (!bclink) {
		kfree(bcbearer);
		return -ENOMEM;
	}

	bcl = &bclink->link;
P
Per Liden 已提交
1024
	bcbearer->bearer.media = &bcbearer->media;
1025
	bcbearer->media.send_msg = tipc_bcbearer_send;
1026
	sprintf(bcbearer->media.name, "tipc-broadcast");
P
Per Liden 已提交
1027

1028
	spin_lock_init(&tipc_net(net)->bclock);
J
Jon Paul Maloy 已提交
1029 1030 1031
	__skb_queue_head_init(&bcl->transmq);
	__skb_queue_head_init(&bcl->backlogq);
	__skb_queue_head_init(&bcl->deferdq);
1032
	skb_queue_head_init(&bcl->wakeupq);
1033
	bcl->snd_nxt = 1;
I
Ingo Molnar 已提交
1034
	spin_lock_init(&bclink->node.lock);
1035 1036
	__skb_queue_head_init(&bclink->arrvq);
	skb_queue_head_init(&bclink->inputq);
P
Per Liden 已提交
1037
	bcl->owner = &bclink->node;
1038
	bcl->owner->net = net;
1039
	bcl->mtu = MAX_PKT_DEFAULT_MCAST;
1040
	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
1041
	bcl->bearer_id = MAX_BEARERS;
1042
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
1043 1044
	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
	msg_set_prevnode(bcl->pmsg, tn->own_addr);
1045
	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
1046
	tn->bcbearer = bcbearer;
1047
	tn->bcbase = bclink;
1048
	tn->bcl = bcl;
1049
	return 0;
P
Per Liden 已提交
1050 1051
}

1052
void tipc_bcast_stop(struct net *net)
P
Per Liden 已提交
1053
{
1054 1055
	struct tipc_net *tn = net_generic(net, tipc_net_id);

1056 1057 1058
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
1059

1060
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
1061
	synchronize_net();
1062
	kfree(tn->bcbearer);
1063
	kfree(tn->bcbase);
P
Per Liden 已提交
1064 1065
}

1066 1067 1068
/**
 * tipc_nmap_add - add a node to a node map
 */
1069
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
1084
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
1102 1103 1104
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}