bcast.c 26.4 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38 39
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
40
#include "bcast.h"
41
#include "name_distr.h"
42
#include "core.h"
P
Per Liden 已提交
43

44 45
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
#define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
P
Per Liden 已提交
46

47
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
48

49 50 51
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
52 53
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
P
Per Liden 已提交
54

55
static void tipc_bclink_lock(struct net *net)
56
{
57 58 59
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	spin_lock_bh(&tn->bclink->lock);
60 61
}

62
static void tipc_bclink_unlock(struct net *net)
63
{
64
	struct tipc_net *tn = net_generic(net, tipc_net_id);
65 66
	struct tipc_node *node = NULL;

67 68
	if (likely(!tn->bclink->flags)) {
		spin_unlock_bh(&tn->bclink->lock);
69 70 71
		return;
	}

72 73 74
	if (tn->bclink->flags & TIPC_BCLINK_RESET) {
		tn->bclink->flags &= ~TIPC_BCLINK_RESET;
		node = tipc_bclink_retransmit_to(net);
75
	}
76
	spin_unlock_bh(&tn->bclink->lock);
77 78 79 80 81

	if (node)
		tipc_link_reset_all(node);
}

82 83 84 85 86 87 88
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
}

89 90 91 92 93
uint  tipc_bclink_get_mtu(void)
{
	return MAX_PKT_DEFAULT_MCAST;
}

94
void tipc_bclink_set_flags(struct net *net, unsigned int flags)
95
{
96 97 98
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tn->bclink->flags |= flags;
99 100
}

S
Sam Ravnborg 已提交
101
static u32 bcbuf_acks(struct sk_buff *buf)
P
Per Liden 已提交
102
{
103
	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
P
Per Liden 已提交
104 105
}

S
Sam Ravnborg 已提交
106
static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
P
Per Liden 已提交
107
{
108
	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
P
Per Liden 已提交
109 110
}

S
Sam Ravnborg 已提交
111
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
112 113 114 115
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

116
void tipc_bclink_add_node(struct net *net, u32 addr)
117
{
118 119 120 121 122
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
123 124
}

125
void tipc_bclink_remove_node(struct net *net, u32 addr)
126
{
127 128 129 130 131
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
132
}
P
Per Liden 已提交
133

134
static void bclink_set_last_sent(struct net *net)
135
{
136 137 138
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

139 140 141 142 143 144
	if (bcl->next_out)
		bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
	else
		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
}

145
u32 tipc_bclink_get_last_sent(struct net *net)
146
{
147 148 149
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bcl->fsm_msg_cnt;
150 151
}

152
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
153
{
154 155
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
156 157 158
}


159
/**
160 161
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
162
 * Called with bclink_lock locked
163
 */
164
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
165
{
166 167 168
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bclink->retransmit_to;
169 170
}

171
/**
P
Per Liden 已提交
172 173 174
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
175
 *
176
 * Called with bclink_lock locked
P
Per Liden 已提交
177
 */
178
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
179
{
180
	struct sk_buff *skb;
181
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
182

183
	skb_queue_walk(&bcl->outqueue, skb) {
184 185
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
186
			break;
187
		}
188
	}
P
Per Liden 已提交
189 190
}

191 192 193 194 195
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
196
void tipc_bclink_wakeup_users(struct net *net)
197
{
198
	struct tipc_net *tn = net_generic(net, tipc_net_id);
199

200
	tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
201 202
}

203
/**
204
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
205 206
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
207
 *
208
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
209
 */
210
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
211
{
212
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
213 214
	struct sk_buff *next;
	unsigned int released = 0;
215 216
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
217

218 219 220
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;

221
	tipc_bclink_lock(net);
222

223
	/* Bail out if tx queue is empty (no clean up is required) */
224
	skb = skb_peek(&tn->bcl->outqueue);
225
	if (!skb)
226 227 228 229 230 231 232 233 234
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
235 236
		if (tn->bclink->bcast_nodes.count)
			acked = tn->bcl->fsm_msg_cnt;
237
		else
238
			acked = tn->bcl->next_out_no;
239 240 241 242 243
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
244
		if (less(acked, buf_seqno(skb)) ||
245
		    less(tn->bcl->fsm_msg_cnt, acked) ||
246 247 248 249 250
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
251
	skb_queue_walk(&tn->bcl->outqueue, skb) {
252 253 254
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
255 256

	/* Update packets that node is now acknowledging */
257
	skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
258 259
		if (more(buf_seqno(skb), acked))
			break;
P
Per Liden 已提交
260

261 262
		next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
		if (skb != tn->bcl->next_out) {
263 264 265
			bcbuf_decr_acks(skb);
		} else {
			bcbuf_set_acks(skb, 0);
266 267
			tn->bcl->next_out = next;
			bclink_set_last_sent(net);
268 269
		}

270
		if (bcbuf_acks(skb) == 0) {
271
			__skb_unlink(skb, &tn->bcl->outqueue);
272
			kfree_skb(skb);
P
Per Liden 已提交
273 274 275 276 277 278
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
279 280 281
	if (unlikely(tn->bcl->next_out)) {
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
282
	}
283
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
284
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
285
exit:
286
	tipc_bclink_unlock(net);
P
Per Liden 已提交
287 288
}

289
/**
290
 * tipc_bclink_update_link_state - update broadcast link state
291
 *
Y
Ying Xue 已提交
292
 * RCU and node lock set
P
Per Liden 已提交
293
 */
294
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
295
				   u32 last_sent)
P
Per Liden 已提交
296
{
297
	struct sk_buff *buf;
298
	struct net *net = n_ptr->net;
299
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
300

301 302 303
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
304

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
320 321
		return;

322
	/* Send NACK */
323
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
324
	if (buf) {
325
		struct tipc_msg *msg = buf_msg(buf);
326 327
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
328

329
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
330
			      INT_H_SIZE, n_ptr->addr);
331
		msg_set_non_seq(msg, 1);
332
		msg_set_mc_netid(msg, tn->net_id);
333 334
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
335
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
336

337
		tipc_bclink_lock(net);
338
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
339 340
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
341
		kfree_skb(buf);
P
Per Liden 已提交
342

343
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
344 345 346
	}
}

347
/**
348
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
349
 *
350 351
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
352
 */
353
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
354
{
355
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
356

357
	if (unlikely(!n_ptr))
P
Per Liden 已提交
358
		return;
359

360
	tipc_node_lock(n_ptr);
361

362
	if (n_ptr->bclink.recv_permitted &&
363 364 365 366
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;

367
	tipc_node_unlock(n_ptr);
P
Per Liden 已提交
368 369
}

370
/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
371
 *                    and to identified node local sockets
372
 * @net: the applicable net namespace
373
 * @list: chain of buffers containing message
374 375 376
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
377
int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
378
{
379 380 381
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
	struct tipc_bclink *bclink = tn->bclink;
382 383
	int rc = 0;
	int bc = 0;
384
	struct sk_buff *skb;
385 386
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
387 388

	/* Prepare clone of message for local node */
389 390 391
	skb = tipc_msg_reassemble(list);
	if (unlikely(!skb)) {
		__skb_queue_purge(list);
392 393 394
		return -EHOSTUNREACH;
	}

395
	/* Broadcast to all nodes */
396
	if (likely(bclink)) {
397
		tipc_bclink_lock(net);
398
		if (likely(bclink->bcast_nodes.count)) {
399
			rc = __tipc_link_xmit(net, bcl, list);
400
			if (likely(!rc)) {
401 402
				u32 len = skb_queue_len(&bcl->outqueue);

403
				bclink_set_last_sent(net);
404
				bcl->stats.queue_sz_counts++;
405
				bcl->stats.accu_queue_sz += len;
406 407 408
			}
			bc = 1;
		}
409
		tipc_bclink_unlock(net);
410 411 412
	}

	if (unlikely(!bc))
413
		__skb_queue_purge(list);
414

415
	if (unlikely(rc)) {
416
		kfree_skb(skb);
417 418 419 420 421 422 423
		return rc;
	}
	/* Deliver message clone */
	__skb_queue_head_init(&arrvq);
	skb_queue_head_init(&inputq);
	__skb_queue_tail(&arrvq, skb);
	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
424 425 426
	return rc;
}

427
/**
428 429
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
430
 * Called with both sending node's lock and bclink_lock taken.
431 432 433
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
434 435
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

436 437 438
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
439
	tn->bcl->stats.recv_info++;
440 441 442 443 444

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
445
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
446 447
		tipc_link_proto_xmit(node->active_links[node->addr & 1],
				     STATE_MSG, 0, 0, 0, 0, 0);
448
		tn->bcl->stats.sent_acks++;
449 450 451
	}
}

452
/**
453
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
454
 *
Y
Ying Xue 已提交
455
 * RCU is locked, no other locks set
P
Per Liden 已提交
456
 */
457
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
458
{
459
	struct tipc_net *tn = net_generic(net, tipc_net_id);
460
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
461
	struct tipc_msg *msg = buf_msg(buf);
462
	struct tipc_node *node;
P
Per Liden 已提交
463 464
	u32 next_in;
	u32 seqno;
465
	int deferred = 0;
466 467
	int pos = 0;
	struct sk_buff *iskb;
468
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
469

470
	/* Screen out unwanted broadcast messages */
471
	if (msg_mc_netid(msg) != tn->net_id)
472 473
		goto exit;

474
	node = tipc_node_find(net, msg_prevnode(msg));
475 476 477 478
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
479
	if (unlikely(!node->bclink.recv_permitted))
480
		goto unlock;
P
Per Liden 已提交
481

482
	/* Handle broadcast protocol message */
P
Per Liden 已提交
483
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
484 485
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
486
		if (msg_destnode(msg) == tn->own_addr) {
487 488
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
489
			tipc_bclink_lock(net);
P
Per Liden 已提交
490
			bcl->stats.recv_nacks++;
491 492
			tn->bclink->retransmit_to = node;
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
493
					      msg_bcgap_to(msg));
494
			tipc_bclink_unlock(net);
P
Per Liden 已提交
495
		} else {
496
			tipc_node_unlock(node);
497
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
498
		}
499
		goto exit;
P
Per Liden 已提交
500 501
	}

502
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
503
	seqno = msg_seqno(msg);
504
	next_in = mod(node->bclink.last_in + 1);
505 506
	arrvq = &tn->bclink->arrvq;
	inputq = &tn->bclink->inputq;
P
Per Liden 已提交
507 508

	if (likely(seqno == next_in)) {
509
receive:
510
		/* Deliver message to destination */
P
Per Liden 已提交
511
		if (likely(msg_isdata(msg))) {
512
			tipc_bclink_lock(net);
513
			bclink_accept_pkt(node, seqno);
514 515 516 517
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
518
			tipc_bclink_unlock(net);
519
			tipc_node_unlock(node);
P
Per Liden 已提交
520
		} else if (msg_user(msg) == MSG_BUNDLER) {
521
			tipc_bclink_lock(net);
522
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
523 524
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
525 526 527 528 529 530 531
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
532
			tipc_bclink_unlock(net);
533
			tipc_node_unlock(node);
P
Per Liden 已提交
534
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
535 536
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf))
537
				goto unlock;
538
			tipc_bclink_lock(net);
539
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
540
			bcl->stats.recv_fragments++;
541
			if (buf) {
P
Per Liden 已提交
542
				bcl->stats.recv_fragmented++;
543
				msg = buf_msg(buf);
544
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
545 546
				goto receive;
			}
547
			tipc_bclink_unlock(net);
548
			tipc_node_unlock(node);
P
Per Liden 已提交
549
		} else {
550
			tipc_bclink_lock(net);
551
			bclink_accept_pkt(node, seqno);
552
			tipc_bclink_unlock(net);
553
			tipc_node_unlock(node);
554
			kfree_skb(buf);
P
Per Liden 已提交
555
		}
556
		buf = NULL;
557 558

		/* Determine new synchronization state */
559
		tipc_node_lock(node);
560 561 562
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

563
		if (node->bclink.last_in == node->bclink.last_sent)
564 565
			goto unlock;

566
		if (skb_queue_empty(&node->bclink.deferred_queue)) {
567 568 569 570
			node->bclink.oos_state = 1;
			goto unlock;
		}

571
		msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
572 573 574 575 576 577
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
578
		buf = __skb_dequeue(&node->bclink.deferred_queue);
579 580 581 582 583
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
584
		deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
585
					       buf);
586
		bclink_update_last_sent(node, seqno);
587
		buf = NULL;
588
	}
589

590
	tipc_bclink_lock(net);
591

592 593
	if (deferred)
		bcl->stats.deferred_recv++;
594 595
	else
		bcl->stats.duplicates++;
596

597
	tipc_bclink_unlock(net);
598

599
unlock:
600
	tipc_node_unlock(node);
601
exit:
602
	kfree_skb(buf);
P
Per Liden 已提交
603 604
}

605
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
606
{
607
	return (n_ptr->bclink.recv_permitted &&
608
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
609 610 611 612
}


/**
613
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
614
 *
615 616
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
617
 *
618 619
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
620
 */
621 622
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
623
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
624 625
{
	int bp_index;
626
	struct tipc_msg *msg = buf_msg(buf);
627
	struct tipc_net *tn = net_generic(net, tipc_net_id);
628 629
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
	struct tipc_bclink *bclink = tn->bclink;
P
Per Liden 已提交
630

631
	/* Prepare broadcast link message for reliable transmission,
632 633 634 635
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
636
	if (likely(!msg_non_seq(buf_msg(buf)))) {
637
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
638
		msg_set_non_seq(msg, 1);
639
		msg_set_mc_netid(msg, tn->net_id);
640
		tn->bcl->stats.sent_info++;
641

642
		if (WARN_ON(!bclink->bcast_nodes.count)) {
643 644 645
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
646 647 648
	}

	/* Send buffer over bearers until all targets reached */
649
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
650 651

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
652 653
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
654 655
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
656
		struct sk_buff *tbuf;
P
Per Liden 已提交
657 658

		if (!p)
659
			break; /* No more bearers to try */
660 661
		if (!b)
			b = p;
662
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
663
			       &bcbearer->remains_new);
664
		if (bcbearer->remains_new.count == bcbearer->remains.count)
665
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
666

667 668
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
669
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
670 671
		} else {
			/* Avoid concurrent buffer access */
672
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
673 674
			if (!tbuf)
				break;
675 676
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
677 678
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
679
		if (bcbearer->remains_new.count == 0)
680
			break; /* All targets reached */
P
Per Liden 已提交
681

682
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
683
	}
684

685
	return 0;
P
Per Liden 已提交
686 687 688
}

/**
689
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
690
 */
691 692
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
693
{
694
	struct tipc_net *tn = net_generic(net, tipc_net_id);
695
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
696 697
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
698
	struct tipc_bearer *b;
P
Per Liden 已提交
699 700 701
	int b_index;
	int pri;

702
	tipc_bclink_lock(net);
P
Per Liden 已提交
703

704 705 706 707 708
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
709 710 711
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
712
	rcu_read_lock();
P
Per Liden 已提交
713
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
714
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
715
		if (!b || !b->nodes.count)
P
Per Liden 已提交
716 717 718 719 720 721 722
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
723
	rcu_read_unlock();
P
Per Liden 已提交
724 725 726 727 728

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
729
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
730 731 732 733 734 735 736

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
737 738
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
739 740 741 742 743 744 745 746 747 748
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

749
	tipc_bclink_unlock(net);
P
Per Liden 已提交
750 751
}

752 753
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

803
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
804 805 806 807 808
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
809 810
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
811 812 813 814

	if (!bcl)
		return 0;

815
	tipc_bclink_lock(net);
816

817
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

850
	tipc_bclink_unlock(net);
851 852 853 854 855 856 857 858 859 860
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
861
	tipc_bclink_unlock(net);
862 863 864 865
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
866

867
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
868
{
869 870 871
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
872 873 874
	if (!bcl)
		return -ENOPROTOOPT;

875
	tipc_bclink_lock(net);
P
Per Liden 已提交
876
	memset(&bcl->stats, 0, sizeof(bcl->stats));
877
	tipc_bclink_unlock(net);
878
	return 0;
P
Per Liden 已提交
879 880
}

881
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
882
{
883 884 885
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
886 887 888 889 890
	if (!bcl)
		return -ENOPROTOOPT;
	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
		return -EINVAL;

891
	tipc_bclink_lock(net);
892
	tipc_link_set_queue_limits(bcl, limit);
893
	tipc_bclink_unlock(net);
894
	return 0;
P
Per Liden 已提交
895 896
}

897
int tipc_bclink_init(struct net *net)
P
Per Liden 已提交
898
{
899
	struct tipc_net *tn = net_generic(net, tipc_net_id);
900 901 902
	struct tipc_bcbearer *bcbearer;
	struct tipc_bclink *bclink;
	struct tipc_link *bcl;
903

904 905 906 907 908 909 910 911 912 913 914
	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
	if (!bcbearer)
		return -ENOMEM;

	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
	if (!bclink) {
		kfree(bcbearer);
		return -ENOMEM;
	}

	bcl = &bclink->link;
P
Per Liden 已提交
915
	bcbearer->bearer.media = &bcbearer->media;
916
	bcbearer->media.send_msg = tipc_bcbearer_send;
917
	sprintf(bcbearer->media.name, "tipc-broadcast");
P
Per Liden 已提交
918

919
	spin_lock_init(&bclink->lock);
920
	__skb_queue_head_init(&bcl->outqueue);
921
	__skb_queue_head_init(&bcl->deferred_queue);
922
	skb_queue_head_init(&bcl->wakeupq);
P
Per Liden 已提交
923
	bcl->next_out_no = 1;
I
Ingo Molnar 已提交
924
	spin_lock_init(&bclink->node.lock);
925 926
	__skb_queue_head_init(&bclink->arrvq);
	skb_queue_head_init(&bclink->inputq);
P
Per Liden 已提交
927
	bcl->owner = &bclink->node;
928
	bcl->owner->net = net;
929
	bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
930
	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
931
	bcl->bearer_id = MAX_BEARERS;
932
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
P
Per Liden 已提交
933
	bcl->state = WORKING_WORKING;
934 935
	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
	msg_set_prevnode(bcl->pmsg, tn->own_addr);
936
	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
937 938 939
	tn->bcbearer = bcbearer;
	tn->bclink = bclink;
	tn->bcl = bcl;
940
	return 0;
P
Per Liden 已提交
941 942
}

943
void tipc_bclink_stop(struct net *net)
P
Per Liden 已提交
944
{
945 946
	struct tipc_net *tn = net_generic(net, tipc_net_id);

947 948 949
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
950

951
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
952
	synchronize_net();
953 954
	kfree(tn->bcbearer);
	kfree(tn->bclink);
P
Per Liden 已提交
955 956
}

957 958 959
/**
 * tipc_nmap_add - add a node to a node map
 */
960
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
961 962 963 964 965 966 967 968 969 970 971 972 973 974
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
975
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
993 994 995
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}