bcast.c 27.8 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38 39
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
40
#include "bcast.h"
41
#include "name_distr.h"
42
#include "core.h"
P
Per Liden 已提交
43

44
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
45 46
#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
P
Per Liden 已提交
47

48
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
49

50 51 52
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
53 54
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
P
Per Liden 已提交
55

56
static void tipc_bclink_lock(struct net *net)
57
{
58 59 60
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	spin_lock_bh(&tn->bclink->lock);
61 62
}

63
static void tipc_bclink_unlock(struct net *net)
64
{
65
	struct tipc_net *tn = net_generic(net, tipc_net_id);
66

67
	spin_unlock_bh(&tn->bclink->lock);
68 69
}

70 71 72 73 74 75 76
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
}

77 78 79 80 81
uint  tipc_bclink_get_mtu(void)
{
	return MAX_PKT_DEFAULT_MCAST;
}

S
Sam Ravnborg 已提交
82
static u32 bcbuf_acks(struct sk_buff *buf)
P
Per Liden 已提交
83
{
84
	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
P
Per Liden 已提交
85 86
}

S
Sam Ravnborg 已提交
87
static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
P
Per Liden 已提交
88
{
89
	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
P
Per Liden 已提交
90 91
}

S
Sam Ravnborg 已提交
92
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
93 94 95 96
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

97
void tipc_bclink_add_node(struct net *net, u32 addr)
98
{
99 100 101 102 103
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
104 105
}

106
void tipc_bclink_remove_node(struct net *net, u32 addr)
107
{
108 109 110 111
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
112 113 114 115 116

	/* Last node? => reset backlog queue */
	if (!tn->bclink->bcast_nodes.count)
		tipc_link_purge_backlog(&tn->bclink->link);

117
	tipc_bclink_unlock(net);
118
}
P
Per Liden 已提交
119

120
static void bclink_set_last_sent(struct net *net)
121
{
122 123 124
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

125
	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
126 127
}

128
u32 tipc_bclink_get_last_sent(struct net *net)
129
{
130 131
	struct tipc_net *tn = net_generic(net, tipc_net_id);

132
	return tn->bcl->silent_intv_cnt;
133 134
}

135
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
136
{
137 138
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
139 140
}

141
/**
142 143
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
144
 * Called with bclink_lock locked
145
 */
146
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
147
{
148 149 150
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bclink->retransmit_to;
151 152
}

153
/**
P
Per Liden 已提交
154 155 156
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
157
 *
158
 * Called with bclink_lock locked
P
Per Liden 已提交
159
 */
160
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
161
{
162
	struct sk_buff *skb;
163
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
164

J
Jon Paul Maloy 已提交
165
	skb_queue_walk(&bcl->transmq, skb) {
166 167
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
168
			break;
169
		}
170
	}
P
Per Liden 已提交
171 172
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
/**
 * bclink_prepare_wakeup - prepare users for wakeup after congestion
 * @bcl: broadcast link
 * @resultq: queue for users which can be woken up
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to specified queue for wakeup
 */
static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
{
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = bcl->window + bcl->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
			continue;
		skb_unlink(skb, &bcl->wakeupq);
		skb_queue_tail(resultq, skb);
	}
}

197 198 199 200 201
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
202
void tipc_bclink_wakeup_users(struct net *net)
203
{
204
	struct tipc_net *tn = net_generic(net, tipc_net_id);
205 206
	struct tipc_link *bcl = tn->bcl;
	struct sk_buff_head resultq;
207

208 209 210
	skb_queue_head_init(&resultq);
	bclink_prepare_wakeup(bcl, &resultq);
	tipc_sk_rcv(net, &resultq);
211 212
}

213
/**
214
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
215 216
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
217
 *
218
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
219
 */
220
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
221
{
222
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
223
	unsigned int released = 0;
224 225
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
226

227 228 229
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;

230
	tipc_bclink_lock(net);
231

232
	/* Bail out if tx queue is empty (no clean up is required) */
J
Jon Paul Maloy 已提交
233
	skb = skb_peek(&tn->bcl->transmq);
234
	if (!skb)
235 236 237 238 239 240 241 242 243
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
244
		if (tn->bclink->bcast_nodes.count)
245
			acked = tn->bcl->silent_intv_cnt;
246
		else
247
			acked = tn->bcl->snd_nxt;
248 249 250 251 252
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
253
		if (less(acked, buf_seqno(skb)) ||
254
		    less(tn->bcl->silent_intv_cnt, acked) ||
255 256 257 258 259
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
J
Jon Paul Maloy 已提交
260
	skb_queue_walk(&tn->bcl->transmq, skb) {
261 262 263
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
264 265

	/* Update packets that node is now acknowledging */
J
Jon Paul Maloy 已提交
266
	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
267 268
		if (more(buf_seqno(skb), acked))
			break;
J
Jon Paul Maloy 已提交
269 270
		bcbuf_decr_acks(skb);
		bclink_set_last_sent(net);
271
		if (bcbuf_acks(skb) == 0) {
J
Jon Paul Maloy 已提交
272
			__skb_unlink(skb, &tn->bcl->transmq);
273
			kfree_skb(skb);
P
Per Liden 已提交
274 275 276 277 278 279
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
J
Jon Paul Maloy 已提交
280
	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
281 282
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
283
	}
284
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
285
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
286
exit:
287
	tipc_bclink_unlock(net);
P
Per Liden 已提交
288 289
}

290
/**
291
 * tipc_bclink_update_link_state - update broadcast link state
292
 *
Y
Ying Xue 已提交
293
 * RCU and node lock set
P
Per Liden 已提交
294
 */
295
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
296
				   u32 last_sent)
P
Per Liden 已提交
297
{
298
	struct sk_buff *buf;
299
	struct net *net = n_ptr->net;
300
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
301

302 303 304
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
305

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
321 322
		return;

323
	/* Send NACK */
324
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
325
	if (buf) {
326
		struct tipc_msg *msg = buf_msg(buf);
J
Jon Paul Maloy 已提交
327
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
328
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
329

330
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
331
			      INT_H_SIZE, n_ptr->addr);
332
		msg_set_non_seq(msg, 1);
333
		msg_set_mc_netid(msg, tn->net_id);
334 335
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
336
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
337

338
		tipc_bclink_lock(net);
339
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
340 341
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
342
		kfree_skb(buf);
P
Per Liden 已提交
343

344
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
345 346 347
	}
}

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
{
	u16 last = msg_last_bcast(hdr);
	int mtyp = msg_type(hdr);

	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
		return;
	if (mtyp == STATE_MSG) {
		tipc_bclink_update_link_state(n, last);
		return;
	}
	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
	 * and transfer synch info in LINK_PROTOCOL messages.
	 */
	if (tipc_node_is_up(n))
		return;
	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
		return;
	n->bclink.last_sent = last;
	n->bclink.last_in = last;
	n->bclink.oos_state = 0;
}

371
/**
372
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
373
 *
374 375
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
376
 */
377
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
378
{
379
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
380

381
	if (unlikely(!n_ptr))
P
Per Liden 已提交
382
		return;
383

384
	tipc_node_lock(n_ptr);
385
	if (n_ptr->bclink.recv_permitted &&
386 387 388
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;
389
	tipc_node_unlock(n_ptr);
390
	tipc_node_put(n_ptr);
P
Per Liden 已提交
391 392
}

393
/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
394
 *                    and to identified node local sockets
395
 * @net: the applicable net namespace
396
 * @list: chain of buffers containing message
397 398 399
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
400
int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
401
{
402 403 404
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
	struct tipc_bclink *bclink = tn->bclink;
405 406
	int rc = 0;
	int bc = 0;
407
	struct sk_buff *skb;
408 409
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
410 411

	/* Prepare clone of message for local node */
412
	skb = tipc_msg_reassemble(list);
413
	if (unlikely(!skb))
414
		return -EHOSTUNREACH;
415

416
	/* Broadcast to all nodes */
417
	if (likely(bclink)) {
418
		tipc_bclink_lock(net);
419
		if (likely(bclink->bcast_nodes.count)) {
420
			rc = __tipc_link_xmit(net, bcl, list);
421
			if (likely(!rc)) {
J
Jon Paul Maloy 已提交
422
				u32 len = skb_queue_len(&bcl->transmq);
423

424
				bclink_set_last_sent(net);
425
				bcl->stats.queue_sz_counts++;
426
				bcl->stats.accu_queue_sz += len;
427 428 429
			}
			bc = 1;
		}
430
		tipc_bclink_unlock(net);
431 432 433
	}

	if (unlikely(!bc))
434
		__skb_queue_purge(list);
435

436
	if (unlikely(rc)) {
437
		kfree_skb(skb);
438 439 440 441 442 443 444
		return rc;
	}
	/* Deliver message clone */
	__skb_queue_head_init(&arrvq);
	skb_queue_head_init(&inputq);
	__skb_queue_tail(&arrvq, skb);
	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
445 446 447
	return rc;
}

448
/**
449 450
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
451
 * Called with both sending node's lock and bclink_lock taken.
452 453 454
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
455 456
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

457 458 459
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
460
	tn->bcl->stats.recv_info++;
461 462 463 464 465

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
466
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
467
		tipc_link_proto_xmit(node_active_link(node, node->addr),
468
				     STATE_MSG, 0, 0, 0, 0);
469
		tn->bcl->stats.sent_acks++;
470 471 472
	}
}

473
/**
474
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
475
 *
Y
Ying Xue 已提交
476
 * RCU is locked, no other locks set
P
Per Liden 已提交
477
 */
478
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
479
{
480
	struct tipc_net *tn = net_generic(net, tipc_net_id);
481
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
482
	struct tipc_msg *msg = buf_msg(buf);
483
	struct tipc_node *node;
P
Per Liden 已提交
484 485
	u32 next_in;
	u32 seqno;
486
	int deferred = 0;
487 488
	int pos = 0;
	struct sk_buff *iskb;
489
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
490

491
	/* Screen out unwanted broadcast messages */
492
	if (msg_mc_netid(msg) != tn->net_id)
493 494
		goto exit;

495
	node = tipc_node_find(net, msg_prevnode(msg));
496 497 498 499
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
500
	if (unlikely(!node->bclink.recv_permitted))
501
		goto unlock;
P
Per Liden 已提交
502

503
	/* Handle broadcast protocol message */
P
Per Liden 已提交
504
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
505 506
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
507
		if (msg_destnode(msg) == tn->own_addr) {
508
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
509
			tipc_bclink_lock(net);
P
Per Liden 已提交
510
			bcl->stats.recv_nacks++;
511 512
			tn->bclink->retransmit_to = node;
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
513
					      msg_bcgap_to(msg));
514
			tipc_bclink_unlock(net);
515
			tipc_node_unlock(node);
P
Per Liden 已提交
516
		} else {
517
			tipc_node_unlock(node);
518
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
519
		}
520
		tipc_node_put(node);
521
		goto exit;
P
Per Liden 已提交
522 523
	}

524
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
525
	seqno = msg_seqno(msg);
526
	next_in = mod(node->bclink.last_in + 1);
527 528
	arrvq = &tn->bclink->arrvq;
	inputq = &tn->bclink->inputq;
P
Per Liden 已提交
529 530

	if (likely(seqno == next_in)) {
531
receive:
532
		/* Deliver message to destination */
P
Per Liden 已提交
533
		if (likely(msg_isdata(msg))) {
534
			tipc_bclink_lock(net);
535
			bclink_accept_pkt(node, seqno);
536 537 538 539
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
540
			tipc_bclink_unlock(net);
541
			tipc_node_unlock(node);
P
Per Liden 已提交
542
		} else if (msg_user(msg) == MSG_BUNDLER) {
543
			tipc_bclink_lock(net);
544
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
545 546
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
547 548 549 550 551 552 553
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
554
			tipc_bclink_unlock(net);
555
			tipc_node_unlock(node);
P
Per Liden 已提交
556
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
557
			tipc_bclink_lock(net);
558
			bclink_accept_pkt(node, seqno);
559 560 561 562 563
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf)) {
				tipc_bclink_unlock(net);
				goto unlock;
			}
P
Per Liden 已提交
564
			bcl->stats.recv_fragments++;
565
			if (buf) {
P
Per Liden 已提交
566
				bcl->stats.recv_fragmented++;
567
				msg = buf_msg(buf);
568
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
569 570
				goto receive;
			}
571
			tipc_bclink_unlock(net);
572
			tipc_node_unlock(node);
P
Per Liden 已提交
573
		} else {
574
			tipc_bclink_lock(net);
575
			bclink_accept_pkt(node, seqno);
576
			tipc_bclink_unlock(net);
577
			tipc_node_unlock(node);
578
			kfree_skb(buf);
P
Per Liden 已提交
579
		}
580
		buf = NULL;
581 582

		/* Determine new synchronization state */
583
		tipc_node_lock(node);
584 585 586
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

587
		if (node->bclink.last_in == node->bclink.last_sent)
588 589
			goto unlock;

J
Jon Paul Maloy 已提交
590
		if (skb_queue_empty(&node->bclink.deferdq)) {
591 592 593 594
			node->bclink.oos_state = 1;
			goto unlock;
		}

J
Jon Paul Maloy 已提交
595
		msg = buf_msg(skb_peek(&node->bclink.deferdq));
596 597 598 599 600 601
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
J
Jon Paul Maloy 已提交
602
		buf = __skb_dequeue(&node->bclink.deferdq);
603 604 605 606 607
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
J
Jon Paul Maloy 已提交
608
		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
609
					       buf);
610
		bclink_update_last_sent(node, seqno);
611
		buf = NULL;
612
	}
613

614
	tipc_bclink_lock(net);
615

616 617
	if (deferred)
		bcl->stats.deferred_recv++;
618 619
	else
		bcl->stats.duplicates++;
620

621
	tipc_bclink_unlock(net);
622

623
unlock:
624
	tipc_node_unlock(node);
625
	tipc_node_put(node);
626
exit:
627
	kfree_skb(buf);
P
Per Liden 已提交
628 629
}

630
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
631
{
632
	return (n_ptr->bclink.recv_permitted &&
633
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
634 635 636 637
}


/**
638
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
639
 *
640 641
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
642
 *
643 644
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
645
 */
646 647
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
648
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
649 650
{
	int bp_index;
651
	struct tipc_msg *msg = buf_msg(buf);
652
	struct tipc_net *tn = net_generic(net, tipc_net_id);
653 654
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
	struct tipc_bclink *bclink = tn->bclink;
P
Per Liden 已提交
655

656
	/* Prepare broadcast link message for reliable transmission,
657 658 659 660
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
661
	if (likely(!msg_non_seq(buf_msg(buf)))) {
662
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
663
		msg_set_non_seq(msg, 1);
664
		msg_set_mc_netid(msg, tn->net_id);
665
		tn->bcl->stats.sent_info++;
666
		if (WARN_ON(!bclink->bcast_nodes.count)) {
667 668 669
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
670 671 672
	}

	/* Send buffer over bearers until all targets reached */
673
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
674 675

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
676 677
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
678 679
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
680
		struct sk_buff *tbuf;
P
Per Liden 已提交
681 682

		if (!p)
683
			break; /* No more bearers to try */
684 685
		if (!b)
			b = p;
686
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
687
			       &bcbearer->remains_new);
688
		if (bcbearer->remains_new.count == bcbearer->remains.count)
689
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
690

691 692
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
693
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
694 695
		} else {
			/* Avoid concurrent buffer access */
696
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
697 698
			if (!tbuf)
				break;
699 700
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
701 702
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
703
		if (bcbearer->remains_new.count == 0)
704
			break; /* All targets reached */
P
Per Liden 已提交
705

706
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
707
	}
708

709
	return 0;
P
Per Liden 已提交
710 711 712
}

/**
713
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
714
 */
715 716
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
717
{
718
	struct tipc_net *tn = net_generic(net, tipc_net_id);
719
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
720 721
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
722
	struct tipc_bearer *b;
P
Per Liden 已提交
723 724 725
	int b_index;
	int pri;

726
	tipc_bclink_lock(net);
P
Per Liden 已提交
727

728 729 730 731 732
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
733 734 735
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
736
	rcu_read_lock();
P
Per Liden 已提交
737
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
738
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
739
		if (!b || !b->nodes.count)
P
Per Liden 已提交
740 741 742 743 744 745 746
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
747
	rcu_read_unlock();
P
Per Liden 已提交
748 749 750 751 752

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
753
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
754 755 756 757 758 759 760

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
761 762
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
763 764 765 766 767 768 769 770 771 772
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

773
	tipc_bclink_unlock(net);
P
Per Liden 已提交
774 775
}

776 777
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

827
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
828 829 830 831 832
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
833 834
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
835 836 837 838

	if (!bcl)
		return 0;

839
	tipc_bclink_lock(net);
840

841
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
858
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
859
		goto attr_msg_full;
860
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
861 862 863 864 865
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
866
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
867 868 869 870 871 872 873
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

874
	tipc_bclink_unlock(net);
875 876 877 878 879 880 881 882 883 884
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
885
	tipc_bclink_unlock(net);
886 887 888 889
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
890

891
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
892
{
893 894 895
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
896 897 898
	if (!bcl)
		return -ENOPROTOOPT;

899
	tipc_bclink_lock(net);
P
Per Liden 已提交
900
	memset(&bcl->stats, 0, sizeof(bcl->stats));
901
	tipc_bclink_unlock(net);
902
	return 0;
P
Per Liden 已提交
903 904
}

905
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
906
{
907 908 909
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
910 911
	if (!bcl)
		return -ENOPROTOOPT;
912 913 914
	if (limit < BCLINK_WIN_MIN)
		limit = BCLINK_WIN_MIN;
	if (limit > TIPC_MAX_LINK_WIN)
P
Per Liden 已提交
915
		return -EINVAL;
916
	tipc_bclink_lock(net);
917
	tipc_link_set_queue_limits(bcl, limit);
918
	tipc_bclink_unlock(net);
919
	return 0;
P
Per Liden 已提交
920 921
}

922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
{
	int err;
	u32 win;
	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

	if (!attrs[TIPC_NLA_LINK_PROP])
		return -EINVAL;

	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
	if (err)
		return err;

	if (!props[TIPC_NLA_PROP_WIN])
		return -EOPNOTSUPP;

	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);

	return tipc_bclink_set_queue_limits(net, win);
}

943
int tipc_bclink_init(struct net *net)
P
Per Liden 已提交
944
{
945
	struct tipc_net *tn = net_generic(net, tipc_net_id);
946 947 948
	struct tipc_bcbearer *bcbearer;
	struct tipc_bclink *bclink;
	struct tipc_link *bcl;
949

950 951 952 953 954 955 956 957 958 959 960
	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
	if (!bcbearer)
		return -ENOMEM;

	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
	if (!bclink) {
		kfree(bcbearer);
		return -ENOMEM;
	}

	bcl = &bclink->link;
P
Per Liden 已提交
961
	bcbearer->bearer.media = &bcbearer->media;
962
	bcbearer->media.send_msg = tipc_bcbearer_send;
963
	sprintf(bcbearer->media.name, "tipc-broadcast");
P
Per Liden 已提交
964

965
	spin_lock_init(&bclink->lock);
J
Jon Paul Maloy 已提交
966 967 968
	__skb_queue_head_init(&bcl->transmq);
	__skb_queue_head_init(&bcl->backlogq);
	__skb_queue_head_init(&bcl->deferdq);
969
	skb_queue_head_init(&bcl->wakeupq);
970
	bcl->snd_nxt = 1;
I
Ingo Molnar 已提交
971
	spin_lock_init(&bclink->node.lock);
972 973
	__skb_queue_head_init(&bclink->arrvq);
	skb_queue_head_init(&bclink->inputq);
P
Per Liden 已提交
974
	bcl->owner = &bclink->node;
975
	bcl->owner->net = net;
976
	bcl->mtu = MAX_PKT_DEFAULT_MCAST;
977
	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
978
	bcl->bearer_id = MAX_BEARERS;
979
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
980 981
	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
	msg_set_prevnode(bcl->pmsg, tn->own_addr);
982
	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
983 984 985
	tn->bcbearer = bcbearer;
	tn->bclink = bclink;
	tn->bcl = bcl;
986
	return 0;
P
Per Liden 已提交
987 988
}

989
void tipc_bclink_stop(struct net *net)
P
Per Liden 已提交
990
{
991 992
	struct tipc_net *tn = net_generic(net, tipc_net_id);

993 994 995
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
996

997
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
998
	synchronize_net();
999 1000
	kfree(tn->bcbearer);
	kfree(tn->bclink);
P
Per Liden 已提交
1001 1002
}

1003 1004 1005
/**
 * tipc_nmap_add - add a node to a node map
 */
1006
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
1021
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
1039 1040 1041
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}