bcast.c 26.3 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38 39
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
40
#include "bcast.h"
41
#include "name_distr.h"
42
#include "core.h"
P
Per Liden 已提交
43

44 45
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
#define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
P
Per Liden 已提交
46

47
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
48

49 50 51
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
52 53
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
P
Per Liden 已提交
54

55
static void tipc_bclink_lock(struct net *net)
56
{
57 58 59
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	spin_lock_bh(&tn->bclink->lock);
60 61
}

62
static void tipc_bclink_unlock(struct net *net)
63
{
64
	struct tipc_net *tn = net_generic(net, tipc_net_id);
65 66
	struct tipc_node *node = NULL;

67 68
	if (likely(!tn->bclink->flags)) {
		spin_unlock_bh(&tn->bclink->lock);
69 70 71
		return;
	}

72 73 74
	if (tn->bclink->flags & TIPC_BCLINK_RESET) {
		tn->bclink->flags &= ~TIPC_BCLINK_RESET;
		node = tipc_bclink_retransmit_to(net);
75
	}
76
	spin_unlock_bh(&tn->bclink->lock);
77 78 79 80 81

	if (node)
		tipc_link_reset_all(node);
}

82 83 84 85 86 87 88
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
}

89 90 91 92 93
uint  tipc_bclink_get_mtu(void)
{
	return MAX_PKT_DEFAULT_MCAST;
}

94
void tipc_bclink_set_flags(struct net *net, unsigned int flags)
95
{
96 97 98
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tn->bclink->flags |= flags;
99 100
}

S
Sam Ravnborg 已提交
101
static u32 bcbuf_acks(struct sk_buff *buf)
P
Per Liden 已提交
102
{
103
	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
P
Per Liden 已提交
104 105
}

S
Sam Ravnborg 已提交
106
static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
P
Per Liden 已提交
107
{
108
	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
P
Per Liden 已提交
109 110
}

S
Sam Ravnborg 已提交
111
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
112 113 114 115
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

116
void tipc_bclink_add_node(struct net *net, u32 addr)
117
{
118 119 120 121 122
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
123 124
}

125
void tipc_bclink_remove_node(struct net *net, u32 addr)
126
{
127 128 129 130 131
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
132
}
P
Per Liden 已提交
133

134
static void bclink_set_last_sent(struct net *net)
135
{
136 137
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
J
Jon Paul Maloy 已提交
138
	struct sk_buff *skb = skb_peek(&bcl->backlogq);
139

J
Jon Paul Maloy 已提交
140 141
	if (skb)
		bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
142 143 144 145
	else
		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
}

146
u32 tipc_bclink_get_last_sent(struct net *net)
147
{
148 149 150
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bcl->fsm_msg_cnt;
151 152
}

153
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
154
{
155 156
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
157 158 159
}


160
/**
161 162
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
163
 * Called with bclink_lock locked
164
 */
165
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
166
{
167 168 169
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bclink->retransmit_to;
170 171
}

172
/**
P
Per Liden 已提交
173 174 175
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
176
 *
177
 * Called with bclink_lock locked
P
Per Liden 已提交
178
 */
179
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
180
{
181
	struct sk_buff *skb;
182
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
183

J
Jon Paul Maloy 已提交
184
	skb_queue_walk(&bcl->transmq, skb) {
185 186
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
187
			break;
188
		}
189
	}
P
Per Liden 已提交
190 191
}

192 193 194 195 196
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
197
void tipc_bclink_wakeup_users(struct net *net)
198
{
199
	struct tipc_net *tn = net_generic(net, tipc_net_id);
200

201
	tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
202 203
}

204
/**
205
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
206 207
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
208
 *
209
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
210
 */
211
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
212
{
213
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
214
	unsigned int released = 0;
215 216
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
217

218 219 220
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;

221
	tipc_bclink_lock(net);
222

223
	/* Bail out if tx queue is empty (no clean up is required) */
J
Jon Paul Maloy 已提交
224
	skb = skb_peek(&tn->bcl->transmq);
225
	if (!skb)
226 227 228 229 230 231 232 233 234
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
235 236
		if (tn->bclink->bcast_nodes.count)
			acked = tn->bcl->fsm_msg_cnt;
237
		else
238
			acked = tn->bcl->next_out_no;
239 240 241 242 243
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
244
		if (less(acked, buf_seqno(skb)) ||
245
		    less(tn->bcl->fsm_msg_cnt, acked) ||
246 247 248 249 250
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
J
Jon Paul Maloy 已提交
251
	skb_queue_walk(&tn->bcl->transmq, skb) {
252 253 254
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
255 256

	/* Update packets that node is now acknowledging */
J
Jon Paul Maloy 已提交
257
	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
258 259
		if (more(buf_seqno(skb), acked))
			break;
J
Jon Paul Maloy 已提交
260 261
		bcbuf_decr_acks(skb);
		bclink_set_last_sent(net);
262
		if (bcbuf_acks(skb) == 0) {
J
Jon Paul Maloy 已提交
263
			__skb_unlink(skb, &tn->bcl->transmq);
264
			kfree_skb(skb);
P
Per Liden 已提交
265 266 267 268 269 270
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
J
Jon Paul Maloy 已提交
271
	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
272 273
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
274
	}
275
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
276
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
277
exit:
278
	tipc_bclink_unlock(net);
P
Per Liden 已提交
279 280
}

281
/**
282
 * tipc_bclink_update_link_state - update broadcast link state
283
 *
Y
Ying Xue 已提交
284
 * RCU and node lock set
P
Per Liden 已提交
285
 */
286
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
287
				   u32 last_sent)
P
Per Liden 已提交
288
{
289
	struct sk_buff *buf;
290
	struct net *net = n_ptr->net;
291
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
292

293 294 295
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
296

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
312 313
		return;

314
	/* Send NACK */
315
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
316
	if (buf) {
317
		struct tipc_msg *msg = buf_msg(buf);
J
Jon Paul Maloy 已提交
318
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
319
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
320

321
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
322
			      INT_H_SIZE, n_ptr->addr);
323
		msg_set_non_seq(msg, 1);
324
		msg_set_mc_netid(msg, tn->net_id);
325 326
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
327
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
328

329
		tipc_bclink_lock(net);
330
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
331 332
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
333
		kfree_skb(buf);
P
Per Liden 已提交
334

335
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
336 337 338
	}
}

339
/**
340
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
341
 *
342 343
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
344
 */
345
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
346
{
347
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
348

349
	if (unlikely(!n_ptr))
P
Per Liden 已提交
350
		return;
351

352
	tipc_node_lock(n_ptr);
353

354
	if (n_ptr->bclink.recv_permitted &&
355 356 357 358
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;

359
	tipc_node_unlock(n_ptr);
P
Per Liden 已提交
360 361
}

362
/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
363
 *                    and to identified node local sockets
364
 * @net: the applicable net namespace
365
 * @list: chain of buffers containing message
366 367 368
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
369
int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
370
{
371 372 373
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
	struct tipc_bclink *bclink = tn->bclink;
374 375
	int rc = 0;
	int bc = 0;
376
	struct sk_buff *skb;
377 378
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
379 380

	/* Prepare clone of message for local node */
381 382 383
	skb = tipc_msg_reassemble(list);
	if (unlikely(!skb)) {
		__skb_queue_purge(list);
384 385
		return -EHOSTUNREACH;
	}
386
	/* Broadcast to all nodes */
387
	if (likely(bclink)) {
388
		tipc_bclink_lock(net);
389
		if (likely(bclink->bcast_nodes.count)) {
390
			rc = __tipc_link_xmit(net, bcl, list);
391
			if (likely(!rc)) {
J
Jon Paul Maloy 已提交
392
				u32 len = skb_queue_len(&bcl->transmq);
393

394
				bclink_set_last_sent(net);
395
				bcl->stats.queue_sz_counts++;
396
				bcl->stats.accu_queue_sz += len;
397 398 399
			}
			bc = 1;
		}
400
		tipc_bclink_unlock(net);
401 402 403
	}

	if (unlikely(!bc))
404
		__skb_queue_purge(list);
405

406
	if (unlikely(rc)) {
407
		kfree_skb(skb);
408 409 410 411 412 413 414
		return rc;
	}
	/* Deliver message clone */
	__skb_queue_head_init(&arrvq);
	skb_queue_head_init(&inputq);
	__skb_queue_tail(&arrvq, skb);
	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
415 416 417
	return rc;
}

418
/**
419 420
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
421
 * Called with both sending node's lock and bclink_lock taken.
422 423 424
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
425 426
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

427 428 429
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
430
	tn->bcl->stats.recv_info++;
431 432 433 434 435

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
436
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
437 438
		tipc_link_proto_xmit(node->active_links[node->addr & 1],
				     STATE_MSG, 0, 0, 0, 0, 0);
439
		tn->bcl->stats.sent_acks++;
440 441 442
	}
}

443
/**
444
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
445
 *
Y
Ying Xue 已提交
446
 * RCU is locked, no other locks set
P
Per Liden 已提交
447
 */
448
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
449
{
450
	struct tipc_net *tn = net_generic(net, tipc_net_id);
451
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
452
	struct tipc_msg *msg = buf_msg(buf);
453
	struct tipc_node *node;
P
Per Liden 已提交
454 455
	u32 next_in;
	u32 seqno;
456
	int deferred = 0;
457 458
	int pos = 0;
	struct sk_buff *iskb;
459
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
460

461
	/* Screen out unwanted broadcast messages */
462
	if (msg_mc_netid(msg) != tn->net_id)
463 464
		goto exit;

465
	node = tipc_node_find(net, msg_prevnode(msg));
466 467 468 469
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
470
	if (unlikely(!node->bclink.recv_permitted))
471
		goto unlock;
P
Per Liden 已提交
472

473
	/* Handle broadcast protocol message */
P
Per Liden 已提交
474
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
475 476
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
477
		if (msg_destnode(msg) == tn->own_addr) {
478 479
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
480
			tipc_bclink_lock(net);
P
Per Liden 已提交
481
			bcl->stats.recv_nacks++;
482 483
			tn->bclink->retransmit_to = node;
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
484
					      msg_bcgap_to(msg));
485
			tipc_bclink_unlock(net);
P
Per Liden 已提交
486
		} else {
487
			tipc_node_unlock(node);
488
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
489
		}
490
		goto exit;
P
Per Liden 已提交
491 492
	}

493
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
494
	seqno = msg_seqno(msg);
495
	next_in = mod(node->bclink.last_in + 1);
496 497
	arrvq = &tn->bclink->arrvq;
	inputq = &tn->bclink->inputq;
P
Per Liden 已提交
498 499

	if (likely(seqno == next_in)) {
500
receive:
501
		/* Deliver message to destination */
P
Per Liden 已提交
502
		if (likely(msg_isdata(msg))) {
503
			tipc_bclink_lock(net);
504
			bclink_accept_pkt(node, seqno);
505 506 507 508
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
509
			tipc_bclink_unlock(net);
510
			tipc_node_unlock(node);
P
Per Liden 已提交
511
		} else if (msg_user(msg) == MSG_BUNDLER) {
512
			tipc_bclink_lock(net);
513
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
514 515
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
516 517 518 519 520 521 522
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
523
			tipc_bclink_unlock(net);
524
			tipc_node_unlock(node);
P
Per Liden 已提交
525
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
526
			tipc_bclink_lock(net);
527
			bclink_accept_pkt(node, seqno);
528 529 530 531 532
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf)) {
				tipc_bclink_unlock(net);
				goto unlock;
			}
P
Per Liden 已提交
533
			bcl->stats.recv_fragments++;
534
			if (buf) {
P
Per Liden 已提交
535
				bcl->stats.recv_fragmented++;
536
				msg = buf_msg(buf);
537
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
538 539
				goto receive;
			}
540
			tipc_bclink_unlock(net);
541
			tipc_node_unlock(node);
P
Per Liden 已提交
542
		} else {
543
			tipc_bclink_lock(net);
544
			bclink_accept_pkt(node, seqno);
545
			tipc_bclink_unlock(net);
546
			tipc_node_unlock(node);
547
			kfree_skb(buf);
P
Per Liden 已提交
548
		}
549
		buf = NULL;
550 551

		/* Determine new synchronization state */
552
		tipc_node_lock(node);
553 554 555
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

556
		if (node->bclink.last_in == node->bclink.last_sent)
557 558
			goto unlock;

J
Jon Paul Maloy 已提交
559
		if (skb_queue_empty(&node->bclink.deferdq)) {
560 561 562 563
			node->bclink.oos_state = 1;
			goto unlock;
		}

J
Jon Paul Maloy 已提交
564
		msg = buf_msg(skb_peek(&node->bclink.deferdq));
565 566 567 568 569 570
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
J
Jon Paul Maloy 已提交
571
		buf = __skb_dequeue(&node->bclink.deferdq);
572 573 574 575 576
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
J
Jon Paul Maloy 已提交
577
		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
578
					       buf);
579
		bclink_update_last_sent(node, seqno);
580
		buf = NULL;
581
	}
582

583
	tipc_bclink_lock(net);
584

585 586
	if (deferred)
		bcl->stats.deferred_recv++;
587 588
	else
		bcl->stats.duplicates++;
589

590
	tipc_bclink_unlock(net);
591

592
unlock:
593
	tipc_node_unlock(node);
594
exit:
595
	kfree_skb(buf);
P
Per Liden 已提交
596 597
}

598
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
599
{
600
	return (n_ptr->bclink.recv_permitted &&
601
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
602 603 604 605
}


/**
606
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
607
 *
608 609
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
610
 *
611 612
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
613
 */
614 615
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
616
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
617 618
{
	int bp_index;
619
	struct tipc_msg *msg = buf_msg(buf);
620
	struct tipc_net *tn = net_generic(net, tipc_net_id);
621 622
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
	struct tipc_bclink *bclink = tn->bclink;
P
Per Liden 已提交
623

624
	/* Prepare broadcast link message for reliable transmission,
625 626 627 628
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
629
	if (likely(!msg_non_seq(buf_msg(buf)))) {
630
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
631
		msg_set_non_seq(msg, 1);
632
		msg_set_mc_netid(msg, tn->net_id);
633
		tn->bcl->stats.sent_info++;
634
		if (WARN_ON(!bclink->bcast_nodes.count)) {
635 636 637
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
638 639 640
	}

	/* Send buffer over bearers until all targets reached */
641
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
642 643

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
644 645
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
646 647
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
648
		struct sk_buff *tbuf;
P
Per Liden 已提交
649 650

		if (!p)
651
			break; /* No more bearers to try */
652 653
		if (!b)
			b = p;
654
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
655
			       &bcbearer->remains_new);
656
		if (bcbearer->remains_new.count == bcbearer->remains.count)
657
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
658

659 660
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
661
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
662 663
		} else {
			/* Avoid concurrent buffer access */
664
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
665 666
			if (!tbuf)
				break;
667 668
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
669 670
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
671
		if (bcbearer->remains_new.count == 0)
672
			break; /* All targets reached */
P
Per Liden 已提交
673

674
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
675
	}
676

677
	return 0;
P
Per Liden 已提交
678 679 680
}

/**
681
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
682
 */
683 684
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
685
{
686
	struct tipc_net *tn = net_generic(net, tipc_net_id);
687
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
688 689
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
690
	struct tipc_bearer *b;
P
Per Liden 已提交
691 692 693
	int b_index;
	int pri;

694
	tipc_bclink_lock(net);
P
Per Liden 已提交
695

696 697 698 699 700
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
701 702 703
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
704
	rcu_read_lock();
P
Per Liden 已提交
705
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
706
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
707
		if (!b || !b->nodes.count)
P
Per Liden 已提交
708 709 710 711 712 713 714
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
715
	rcu_read_unlock();
P
Per Liden 已提交
716 717 718 719 720

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
721
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
722 723 724 725 726 727 728

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
729 730
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
731 732 733 734 735 736 737 738 739 740
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

741
	tipc_bclink_unlock(net);
P
Per Liden 已提交
742 743
}

744 745
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

795
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
796 797 798 799 800
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
801 802
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
803 804 805 806

	if (!bcl)
		return 0;

807
	tipc_bclink_lock(net);
808

809
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

842
	tipc_bclink_unlock(net);
843 844 845 846 847 848 849 850 851 852
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
853
	tipc_bclink_unlock(net);
854 855 856 857
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
858

859
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
860
{
861 862 863
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
864 865 866
	if (!bcl)
		return -ENOPROTOOPT;

867
	tipc_bclink_lock(net);
P
Per Liden 已提交
868
	memset(&bcl->stats, 0, sizeof(bcl->stats));
869
	tipc_bclink_unlock(net);
870
	return 0;
P
Per Liden 已提交
871 872
}

873
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
874
{
875 876 877
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
878 879 880 881 882
	if (!bcl)
		return -ENOPROTOOPT;
	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
		return -EINVAL;

883
	tipc_bclink_lock(net);
884
	tipc_link_set_queue_limits(bcl, limit);
885
	tipc_bclink_unlock(net);
886
	return 0;
P
Per Liden 已提交
887 888
}

889
int tipc_bclink_init(struct net *net)
P
Per Liden 已提交
890
{
891
	struct tipc_net *tn = net_generic(net, tipc_net_id);
892 893 894
	struct tipc_bcbearer *bcbearer;
	struct tipc_bclink *bclink;
	struct tipc_link *bcl;
895

896 897 898 899 900 901 902 903 904 905 906
	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
	if (!bcbearer)
		return -ENOMEM;

	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
	if (!bclink) {
		kfree(bcbearer);
		return -ENOMEM;
	}

	bcl = &bclink->link;
P
Per Liden 已提交
907
	bcbearer->bearer.media = &bcbearer->media;
908
	bcbearer->media.send_msg = tipc_bcbearer_send;
909
	sprintf(bcbearer->media.name, "tipc-broadcast");
P
Per Liden 已提交
910

911
	spin_lock_init(&bclink->lock);
J
Jon Paul Maloy 已提交
912 913 914
	__skb_queue_head_init(&bcl->transmq);
	__skb_queue_head_init(&bcl->backlogq);
	__skb_queue_head_init(&bcl->deferdq);
915
	skb_queue_head_init(&bcl->wakeupq);
P
Per Liden 已提交
916
	bcl->next_out_no = 1;
I
Ingo Molnar 已提交
917
	spin_lock_init(&bclink->node.lock);
918 919
	__skb_queue_head_init(&bclink->arrvq);
	skb_queue_head_init(&bclink->inputq);
P
Per Liden 已提交
920
	bcl->owner = &bclink->node;
921
	bcl->owner->net = net;
922
	bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
923
	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
924
	bcl->bearer_id = MAX_BEARERS;
925
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
P
Per Liden 已提交
926
	bcl->state = WORKING_WORKING;
927 928
	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
	msg_set_prevnode(bcl->pmsg, tn->own_addr);
929
	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
930 931 932
	tn->bcbearer = bcbearer;
	tn->bclink = bclink;
	tn->bcl = bcl;
933
	return 0;
P
Per Liden 已提交
934 935
}

936
void tipc_bclink_stop(struct net *net)
P
Per Liden 已提交
937
{
938 939
	struct tipc_net *tn = net_generic(net, tipc_net_id);

940 941 942
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
943

944
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
945
	synchronize_net();
946 947
	kfree(tn->bcbearer);
	kfree(tn->bclink);
P
Per Liden 已提交
948 949
}

950 951 952
/**
 * tipc_nmap_add - add a node to a node map
 */
953
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
954 955 956 957 958 959 960 961 962 963 964 965 966 967
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
968
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
986 987 988
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}