bcast.c 27.7 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/bcast.c: TIPC broadcast code
3
 *
4
 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
P
Per Liden 已提交
5
 * Copyright (c) 2004, Intel Corporation.
6
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
7 8
 * All rights reserved.
 *
P
Per Liden 已提交
9
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
10 11
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
12 13 14 15 16 17 18 19
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
20
 *
P
Per Liden 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

38 39
#include "socket.h"
#include "msg.h"
P
Per Liden 已提交
40
#include "bcast.h"
41
#include "name_distr.h"
42
#include "core.h"
P
Per Liden 已提交
43

44 45
#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
#define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
P
Per Liden 已提交
46

47
const char tipc_bclink_name[] = "broadcast-link";
P
Per Liden 已提交
48

49 50 51
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff);
52 53
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
P
Per Liden 已提交
54

55
static void tipc_bclink_lock(struct net *net)
56
{
57 58 59
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	spin_lock_bh(&tn->bclink->lock);
60 61
}

62
static void tipc_bclink_unlock(struct net *net)
63
{
64
	struct tipc_net *tn = net_generic(net, tipc_net_id);
65

66
	spin_unlock_bh(&tn->bclink->lock);
67 68
}

69 70 71 72 73 74 75
void tipc_bclink_input(struct net *net)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
}

76 77 78 79 80
uint  tipc_bclink_get_mtu(void)
{
	return MAX_PKT_DEFAULT_MCAST;
}

S
Sam Ravnborg 已提交
81
static u32 bcbuf_acks(struct sk_buff *buf)
P
Per Liden 已提交
82
{
83
	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
P
Per Liden 已提交
84 85
}

S
Sam Ravnborg 已提交
86
static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
P
Per Liden 已提交
87
{
88
	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
P
Per Liden 已提交
89 90
}

S
Sam Ravnborg 已提交
91
static void bcbuf_decr_acks(struct sk_buff *buf)
P
Per Liden 已提交
92 93 94 95
{
	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}

96
void tipc_bclink_add_node(struct net *net, u32 addr)
97
{
98 99 100 101 102
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
	tipc_bclink_unlock(net);
103 104
}

105
void tipc_bclink_remove_node(struct net *net, u32 addr)
106
{
107 108 109 110
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	tipc_bclink_lock(net);
	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
111 112 113 114 115

	/* Last node? => reset backlog queue */
	if (!tn->bclink->bcast_nodes.count)
		tipc_link_purge_backlog(&tn->bclink->link);

116
	tipc_bclink_unlock(net);
117
}
P
Per Liden 已提交
118

119
static void bclink_set_last_sent(struct net *net)
120
{
121 122 123
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

124
	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
125 126
}

127
u32 tipc_bclink_get_last_sent(struct net *net)
128
{
129 130
	struct tipc_net *tn = net_generic(net, tipc_net_id);

131
	return tn->bcl->silent_intv_cnt;
132 133
}

134
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
P
Per Liden 已提交
135
{
136 137
	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
						seqno : node->bclink.last_sent;
P
Per Liden 已提交
138 139
}

140
/**
141 142
 * tipc_bclink_retransmit_to - get most recent node to request retransmission
 *
143
 * Called with bclink_lock locked
144
 */
145
struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
146
{
147 148 149
	struct tipc_net *tn = net_generic(net, tipc_net_id);

	return tn->bclink->retransmit_to;
150 151
}

152
/**
P
Per Liden 已提交
153 154 155
 * bclink_retransmit_pkt - retransmit broadcast packets
 * @after: sequence number of last packet to *not* retransmit
 * @to: sequence number of last packet to retransmit
156
 *
157
 * Called with bclink_lock locked
P
Per Liden 已提交
158
 */
159
static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
P
Per Liden 已提交
160
{
161
	struct sk_buff *skb;
162
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
163

J
Jon Paul Maloy 已提交
164
	skb_queue_walk(&bcl->transmq, skb) {
165 166
		if (more(buf_seqno(skb), after)) {
			tipc_link_retransmit(bcl, skb, mod(to - after));
167
			break;
168
		}
169
	}
P
Per Liden 已提交
170 171
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
/**
 * bclink_prepare_wakeup - prepare users for wakeup after congestion
 * @bcl: broadcast link
 * @resultq: queue for users which can be woken up
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to specified queue for wakeup
 */
static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
{
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = bcl->window + bcl->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
			continue;
		skb_unlink(skb, &bcl->wakeupq);
		skb_queue_tail(resultq, skb);
	}
}

196 197 198 199 200
/**
 * tipc_bclink_wakeup_users - wake up pending users
 *
 * Called with no locks taken
 */
201
void tipc_bclink_wakeup_users(struct net *net)
202
{
203
	struct tipc_net *tn = net_generic(net, tipc_net_id);
204 205
	struct tipc_link *bcl = tn->bcl;
	struct sk_buff_head resultq;
206

207 208 209
	skb_queue_head_init(&resultq);
	bclink_prepare_wakeup(bcl, &resultq);
	tipc_sk_rcv(net, &resultq);
210 211
}

212
/**
213
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
P
Per Liden 已提交
214 215
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
216
 *
217
 * Node is locked, bclink_lock unlocked.
P
Per Liden 已提交
218
 */
219
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
P
Per Liden 已提交
220
{
221
	struct sk_buff *skb, *tmp;
P
Per Liden 已提交
222
	unsigned int released = 0;
223 224
	struct net *net = n_ptr->net;
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
225

226 227 228
	if (unlikely(!n_ptr->bclink.recv_permitted))
		return;

229
	tipc_bclink_lock(net);
230

231
	/* Bail out if tx queue is empty (no clean up is required) */
J
Jon Paul Maloy 已提交
232
	skb = skb_peek(&tn->bcl->transmq);
233
	if (!skb)
234 235 236 237 238 239 240 241 242
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
243
		if (tn->bclink->bcast_nodes.count)
244
			acked = tn->bcl->silent_intv_cnt;
245
		else
246
			acked = tn->bcl->snd_nxt;
247 248 249 250 251
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
252
		if (less(acked, buf_seqno(skb)) ||
253
		    less(tn->bcl->silent_intv_cnt, acked) ||
254 255 256 257 258
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
J
Jon Paul Maloy 已提交
259
	skb_queue_walk(&tn->bcl->transmq, skb) {
260 261 262
		if (more(buf_seqno(skb), n_ptr->bclink.acked))
			break;
	}
P
Per Liden 已提交
263 264

	/* Update packets that node is now acknowledging */
J
Jon Paul Maloy 已提交
265
	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
266 267
		if (more(buf_seqno(skb), acked))
			break;
J
Jon Paul Maloy 已提交
268 269
		bcbuf_decr_acks(skb);
		bclink_set_last_sent(net);
270
		if (bcbuf_acks(skb) == 0) {
J
Jon Paul Maloy 已提交
271
			__skb_unlink(skb, &tn->bcl->transmq);
272
			kfree_skb(skb);
P
Per Liden 已提交
273 274 275 276 277 278
			released = 1;
		}
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */
J
Jon Paul Maloy 已提交
279
	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
280 281
		tipc_link_push_packets(tn->bcl);
		bclink_set_last_sent(net);
282
	}
283
	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
284
		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
285
exit:
286
	tipc_bclink_unlock(net);
P
Per Liden 已提交
287 288
}

289
/**
290
 * tipc_bclink_update_link_state - update broadcast link state
291
 *
Y
Ying Xue 已提交
292
 * RCU and node lock set
P
Per Liden 已提交
293
 */
294
void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
295
				   u32 last_sent)
P
Per Liden 已提交
296
{
297
	struct sk_buff *buf;
298
	struct net *net = n_ptr->net;
299
	struct tipc_net *tn = net_generic(net, tipc_net_id);
P
Per Liden 已提交
300

301 302 303
	/* Ignore "stale" link state info */
	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;
P
Per Liden 已提交
304

305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	/* Update link synchronization state; quit if in sync */
	bclink_update_last_sent(n_ptr, last_sent);

	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */
	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */
	if (n_ptr->bclink.oos_state & 0x1)
P
Per Liden 已提交
320 321
		return;

322
	/* Send NACK */
323
	buf = tipc_buf_acquire(INT_H_SIZE);
P
Per Liden 已提交
324
	if (buf) {
325
		struct tipc_msg *msg = buf_msg(buf);
J
Jon Paul Maloy 已提交
326
		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
327
		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
328

329
		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
330
			      INT_H_SIZE, n_ptr->addr);
331
		msg_set_non_seq(msg, 1);
332
		msg_set_mc_netid(msg, tn->net_id);
333 334
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
335
		msg_set_bcgap_to(msg, to);
P
Per Liden 已提交
336

337
		tipc_bclink_lock(net);
338
		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
339 340
		tn->bcl->stats.sent_nacks++;
		tipc_bclink_unlock(net);
341
		kfree_skb(buf);
P
Per Liden 已提交
342

343
		n_ptr->bclink.oos_state++;
P
Per Liden 已提交
344 345 346
	}
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
{
	u16 last = msg_last_bcast(hdr);
	int mtyp = msg_type(hdr);

	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
		return;
	if (mtyp == STATE_MSG) {
		tipc_bclink_update_link_state(n, last);
		return;
	}
	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
	 * and transfer synch info in LINK_PROTOCOL messages.
	 */
	if (tipc_node_is_up(n))
		return;
	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
		return;
	n->bclink.last_sent = last;
	n->bclink.last_in = last;
	n->bclink.oos_state = 0;
}

370
/**
371
 * bclink_peek_nack - monitor retransmission requests sent by other nodes
P
Per Liden 已提交
372
 *
373 374
 * Delay any upcoming NACK by this node if another node has already
 * requested the first message this node is going to ask for.
P
Per Liden 已提交
375
 */
376
static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
P
Per Liden 已提交
377
{
378
	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
P
Per Liden 已提交
379

380
	if (unlikely(!n_ptr))
P
Per Liden 已提交
381
		return;
382

383
	tipc_node_lock(n_ptr);
384
	if (n_ptr->bclink.recv_permitted &&
385 386 387
	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
		n_ptr->bclink.oos_state = 2;
388
	tipc_node_unlock(n_ptr);
389
	tipc_node_put(n_ptr);
P
Per Liden 已提交
390 391
}

392
/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
393
 *                    and to identified node local sockets
394
 * @net: the applicable net namespace
395
 * @list: chain of buffers containing message
396 397 398
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
399
int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
400
{
401 402 403
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
	struct tipc_bclink *bclink = tn->bclink;
404 405
	int rc = 0;
	int bc = 0;
406
	struct sk_buff *skb;
407 408
	struct sk_buff_head arrvq;
	struct sk_buff_head inputq;
409 410

	/* Prepare clone of message for local node */
411
	skb = tipc_msg_reassemble(list);
412
	if (unlikely(!skb))
413
		return -EHOSTUNREACH;
414

415
	/* Broadcast to all nodes */
416
	if (likely(bclink)) {
417
		tipc_bclink_lock(net);
418
		if (likely(bclink->bcast_nodes.count)) {
419
			rc = __tipc_link_xmit(net, bcl, list);
420
			if (likely(!rc)) {
J
Jon Paul Maloy 已提交
421
				u32 len = skb_queue_len(&bcl->transmq);
422

423
				bclink_set_last_sent(net);
424
				bcl->stats.queue_sz_counts++;
425
				bcl->stats.accu_queue_sz += len;
426 427 428
			}
			bc = 1;
		}
429
		tipc_bclink_unlock(net);
430 431 432
	}

	if (unlikely(!bc))
433
		__skb_queue_purge(list);
434

435
	if (unlikely(rc)) {
436
		kfree_skb(skb);
437 438 439 440 441 442 443
		return rc;
	}
	/* Deliver message clone */
	__skb_queue_head_init(&arrvq);
	skb_queue_head_init(&inputq);
	__skb_queue_tail(&arrvq, skb);
	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
444 445 446
	return rc;
}

447
/**
448 449
 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
 *
450
 * Called with both sending node's lock and bclink_lock taken.
451 452 453
 */
static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
{
454 455
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);

456 457 458
	bclink_update_last_sent(node, seqno);
	node->bclink.last_in = seqno;
	node->bclink.oos_state = 0;
459
	tn->bcl->stats.recv_info++;
460 461 462 463 464

	/*
	 * Unicast an ACK periodically, ensuring that
	 * all nodes in the cluster don't ACK at the same time
	 */
465
	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
466
		tipc_link_proto_xmit(node_active_link(node, node->addr),
467
				     STATE_MSG, 0, 0, 0, 0);
468
		tn->bcl->stats.sent_acks++;
469 470 471
	}
}

472
/**
473
 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
474
 *
Y
Ying Xue 已提交
475
 * RCU is locked, no other locks set
P
Per Liden 已提交
476
 */
477
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
478
{
479
	struct tipc_net *tn = net_generic(net, tipc_net_id);
480
	struct tipc_link *bcl = tn->bcl;
P
Per Liden 已提交
481
	struct tipc_msg *msg = buf_msg(buf);
482
	struct tipc_node *node;
P
Per Liden 已提交
483 484
	u32 next_in;
	u32 seqno;
485
	int deferred = 0;
486 487
	int pos = 0;
	struct sk_buff *iskb;
488
	struct sk_buff_head *arrvq, *inputq;
P
Per Liden 已提交
489

490
	/* Screen out unwanted broadcast messages */
491
	if (msg_mc_netid(msg) != tn->net_id)
492 493
		goto exit;

494
	node = tipc_node_find(net, msg_prevnode(msg));
495 496 497 498
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
499
	if (unlikely(!node->bclink.recv_permitted))
500
		goto unlock;
P
Per Liden 已提交
501

502
	/* Handle broadcast protocol message */
P
Per Liden 已提交
503
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
504 505
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
506
		if (msg_destnode(msg) == tn->own_addr) {
507
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
508
			tipc_bclink_lock(net);
P
Per Liden 已提交
509
			bcl->stats.recv_nacks++;
510 511
			tn->bclink->retransmit_to = node;
			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
P
Per Liden 已提交
512
					      msg_bcgap_to(msg));
513
			tipc_bclink_unlock(net);
514
			tipc_node_unlock(node);
P
Per Liden 已提交
515
		} else {
516
			tipc_node_unlock(node);
517
			bclink_peek_nack(net, msg);
P
Per Liden 已提交
518
		}
519
		tipc_node_put(node);
520
		goto exit;
P
Per Liden 已提交
521 522
	}

523
	/* Handle in-sequence broadcast message */
P
Per Liden 已提交
524
	seqno = msg_seqno(msg);
525
	next_in = mod(node->bclink.last_in + 1);
526 527
	arrvq = &tn->bclink->arrvq;
	inputq = &tn->bclink->inputq;
P
Per Liden 已提交
528 529

	if (likely(seqno == next_in)) {
530
receive:
531
		/* Deliver message to destination */
P
Per Liden 已提交
532
		if (likely(msg_isdata(msg))) {
533
			tipc_bclink_lock(net);
534
			bclink_accept_pkt(node, seqno);
535 536 537 538
			spin_lock_bh(&inputq->lock);
			__skb_queue_tail(arrvq, buf);
			spin_unlock_bh(&inputq->lock);
			node->action_flags |= TIPC_BCAST_MSG_EVT;
539
			tipc_bclink_unlock(net);
540
			tipc_node_unlock(node);
P
Per Liden 已提交
541
		} else if (msg_user(msg) == MSG_BUNDLER) {
542
			tipc_bclink_lock(net);
543
			bclink_accept_pkt(node, seqno);
P
Per Liden 已提交
544 545
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
546 547 548 549 550 551 552
			pos = 0;
			while (tipc_msg_extract(buf, &iskb, &pos)) {
				spin_lock_bh(&inputq->lock);
				__skb_queue_tail(arrvq, iskb);
				spin_unlock_bh(&inputq->lock);
			}
			node->action_flags |= TIPC_BCAST_MSG_EVT;
553
			tipc_bclink_unlock(net);
554
			tipc_node_unlock(node);
P
Per Liden 已提交
555
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
556
			tipc_bclink_lock(net);
557
			bclink_accept_pkt(node, seqno);
558 559 560 561 562
			tipc_buf_append(&node->bclink.reasm_buf, &buf);
			if (unlikely(!buf && !node->bclink.reasm_buf)) {
				tipc_bclink_unlock(net);
				goto unlock;
			}
P
Per Liden 已提交
563
			bcl->stats.recv_fragments++;
564
			if (buf) {
P
Per Liden 已提交
565
				bcl->stats.recv_fragmented++;
566
				msg = buf_msg(buf);
567
				tipc_bclink_unlock(net);
E
Erik Hugne 已提交
568 569
				goto receive;
			}
570
			tipc_bclink_unlock(net);
571
			tipc_node_unlock(node);
P
Per Liden 已提交
572
		} else {
573
			tipc_bclink_lock(net);
574
			bclink_accept_pkt(node, seqno);
575
			tipc_bclink_unlock(net);
576
			tipc_node_unlock(node);
577
			kfree_skb(buf);
P
Per Liden 已提交
578
		}
579
		buf = NULL;
580 581

		/* Determine new synchronization state */
582
		tipc_node_lock(node);
583 584 585
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

586
		if (node->bclink.last_in == node->bclink.last_sent)
587 588
			goto unlock;

J
Jon Paul Maloy 已提交
589
		if (skb_queue_empty(&node->bclink.deferdq)) {
590 591 592 593
			node->bclink.oos_state = 1;
			goto unlock;
		}

J
Jon Paul Maloy 已提交
594
		msg = buf_msg(skb_peek(&node->bclink.deferdq));
595 596 597 598 599 600
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */
J
Jon Paul Maloy 已提交
601
		buf = __skb_dequeue(&node->bclink.deferdq);
602 603 604 605 606
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */
	if (less(next_in, seqno)) {
J
Jon Paul Maloy 已提交
607
		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
608
					       buf);
609
		bclink_update_last_sent(node, seqno);
610
		buf = NULL;
611
	}
612

613
	tipc_bclink_lock(net);
614

615 616
	if (deferred)
		bcl->stats.deferred_recv++;
617 618
	else
		bcl->stats.duplicates++;
619

620
	tipc_bclink_unlock(net);
621

622
unlock:
623
	tipc_node_unlock(node);
624
	tipc_node_put(node);
625
exit:
626
	kfree_skb(buf);
P
Per Liden 已提交
627 628
}

629
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
P
Per Liden 已提交
630
{
631
	return (n_ptr->bclink.recv_permitted &&
632
		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
P
Per Liden 已提交
633 634 635 636
}


/**
637
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
638
 *
639 640
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
641
 *
642 643
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
P
Per Liden 已提交
644
 */
645 646
static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
			      struct tipc_bearer *unused1,
A
Adrian Bunk 已提交
647
			      struct tipc_media_addr *unused2)
P
Per Liden 已提交
648 649
{
	int bp_index;
650
	struct tipc_msg *msg = buf_msg(buf);
651
	struct tipc_net *tn = net_generic(net, tipc_net_id);
652 653
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
	struct tipc_bclink *bclink = tn->bclink;
P
Per Liden 已提交
654

655
	/* Prepare broadcast link message for reliable transmission,
656 657 658 659
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
P
Per Liden 已提交
660
	if (likely(!msg_non_seq(buf_msg(buf)))) {
661
		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
662
		msg_set_non_seq(msg, 1);
663
		msg_set_mc_netid(msg, tn->net_id);
664
		tn->bcl->stats.sent_info++;
665
		if (WARN_ON(!bclink->bcast_nodes.count)) {
666 667 668
			dump_stack();
			return 0;
		}
P
Per Liden 已提交
669 670 671
	}

	/* Send buffer over bearers until all targets reached */
672
	bcbearer->remains = bclink->bcast_nodes;
P
Per Liden 已提交
673 674

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
675 676
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
677 678
		struct tipc_bearer *bp[2] = {p, s};
		struct tipc_bearer *b = bp[msg_link_selector(msg)];
679
		struct sk_buff *tbuf;
P
Per Liden 已提交
680 681

		if (!p)
682
			break; /* No more bearers to try */
683 684
		if (!b)
			b = p;
685
		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
686
			       &bcbearer->remains_new);
687
		if (bcbearer->remains_new.count == bcbearer->remains.count)
688
			continue; /* Nothing added by bearer pair */
P
Per Liden 已提交
689

690 691
		if (bp_index == 0) {
			/* Use original buffer for first bearer */
692
			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
693 694
		} else {
			/* Avoid concurrent buffer access */
695
			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
696 697
			if (!tbuf)
				break;
698 699
			tipc_bearer_send(net, b->identity, tbuf,
					 &b->bcast_addr);
700 701
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}
702
		if (bcbearer->remains_new.count == 0)
703
			break; /* All targets reached */
P
Per Liden 已提交
704

705
		bcbearer->remains = bcbearer->remains_new;
P
Per Liden 已提交
706
	}
707

708
	return 0;
P
Per Liden 已提交
709 710 711
}

/**
712
 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
P
Per Liden 已提交
713
 */
714 715
void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
			u32 node, bool action)
P
Per Liden 已提交
716
{
717
	struct tipc_net *tn = net_generic(net, tipc_net_id);
718
	struct tipc_bcbearer *bcbearer = tn->bcbearer;
719 720
	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
	struct tipc_bcbearer_pair *bp_curr;
Y
Ying Xue 已提交
721
	struct tipc_bearer *b;
P
Per Liden 已提交
722 723 724
	int b_index;
	int pri;

725
	tipc_bclink_lock(net);
P
Per Liden 已提交
726

727 728 729 730 731
	if (action)
		tipc_nmap_add(nm_ptr, node);
	else
		tipc_nmap_remove(nm_ptr, node);

P
Per Liden 已提交
732 733 734
	/* Group bearers by priority (can assume max of two per priority) */
	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));

Y
Ying Xue 已提交
735
	rcu_read_lock();
P
Per Liden 已提交
736
	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
737
		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
738
		if (!b || !b->nodes.count)
P
Per Liden 已提交
739 740 741 742 743 744 745
			continue;

		if (!bp_temp[b->priority].primary)
			bp_temp[b->priority].primary = b;
		else
			bp_temp[b->priority].secondary = b;
	}
Y
Ying Xue 已提交
746
	rcu_read_unlock();
P
Per Liden 已提交
747 748 749 750 751

	/* Create array of bearer pairs for broadcasting */
	bp_curr = bcbearer->bpairs;
	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));

P
Per Liden 已提交
752
	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
P
Per Liden 已提交
753 754 755 756 757 758 759

		if (!bp_temp[pri].primary)
			continue;

		bp_curr->primary = bp_temp[pri].primary;

		if (bp_temp[pri].secondary) {
760 761
			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
					    &bp_temp[pri].secondary->nodes)) {
P
Per Liden 已提交
762 763 764 765 766 767 768 769 770 771
				bp_curr->secondary = bp_temp[pri].secondary;
			} else {
				bp_curr++;
				bp_curr->primary = bp_temp[pri].secondary;
			}
		}

		bp_curr++;
	}

772
	tipc_bclink_unlock(net);
P
Per Liden 已提交
773 774
}

775 776
static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
				      struct tipc_stats *stats)
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
{
	int i;
	struct nlattr *nest;

	struct nla_map {
		__u32 key;
		__u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
	};

	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!nest)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, nest);

	return 0;
msg_full:
	nla_nest_cancel(skb, nest);

	return -EMSGSIZE;
}

826
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
827 828 829 830 831
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
832 833
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;
834 835 836 837

	if (!bcl)
		return 0;

838
	tipc_bclink_lock(net);
839

840
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	/* The broadcast link is always up */
	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
		goto attr_msg_full;

	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
		goto attr_msg_full;
	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
		goto attr_msg_full;
857
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
858
		goto attr_msg_full;
859
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
860 861 862 863 864
		goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
865
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
866 867 868 869 870 871 872
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
	if (err)
		goto attr_msg_full;

873
	tipc_bclink_unlock(net);
874 875 876 877 878 879 880 881 882 883
	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
884
	tipc_bclink_unlock(net);
885 886 887 888
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}
P
Per Liden 已提交
889

890
int tipc_bclink_reset_stats(struct net *net)
P
Per Liden 已提交
891
{
892 893 894
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
895 896 897
	if (!bcl)
		return -ENOPROTOOPT;

898
	tipc_bclink_lock(net);
P
Per Liden 已提交
899
	memset(&bcl->stats, 0, sizeof(bcl->stats));
900
	tipc_bclink_unlock(net);
901
	return 0;
P
Per Liden 已提交
902 903
}

904
int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
P
Per Liden 已提交
905
{
906 907 908
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_link *bcl = tn->bcl;

P
Per Liden 已提交
909 910 911 912 913
	if (!bcl)
		return -ENOPROTOOPT;
	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
		return -EINVAL;

914
	tipc_bclink_lock(net);
915
	tipc_link_set_queue_limits(bcl, limit);
916
	tipc_bclink_unlock(net);
917
	return 0;
P
Per Liden 已提交
918 919
}

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
{
	int err;
	u32 win;
	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

	if (!attrs[TIPC_NLA_LINK_PROP])
		return -EINVAL;

	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
	if (err)
		return err;

	if (!props[TIPC_NLA_PROP_WIN])
		return -EOPNOTSUPP;

	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);

	return tipc_bclink_set_queue_limits(net, win);
}

941
int tipc_bclink_init(struct net *net)
P
Per Liden 已提交
942
{
943
	struct tipc_net *tn = net_generic(net, tipc_net_id);
944 945 946
	struct tipc_bcbearer *bcbearer;
	struct tipc_bclink *bclink;
	struct tipc_link *bcl;
947

948 949 950 951 952 953 954 955 956 957 958
	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
	if (!bcbearer)
		return -ENOMEM;

	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
	if (!bclink) {
		kfree(bcbearer);
		return -ENOMEM;
	}

	bcl = &bclink->link;
P
Per Liden 已提交
959
	bcbearer->bearer.media = &bcbearer->media;
960
	bcbearer->media.send_msg = tipc_bcbearer_send;
961
	sprintf(bcbearer->media.name, "tipc-broadcast");
P
Per Liden 已提交
962

963
	spin_lock_init(&bclink->lock);
J
Jon Paul Maloy 已提交
964 965 966
	__skb_queue_head_init(&bcl->transmq);
	__skb_queue_head_init(&bcl->backlogq);
	__skb_queue_head_init(&bcl->deferdq);
967
	skb_queue_head_init(&bcl->wakeupq);
968
	bcl->snd_nxt = 1;
I
Ingo Molnar 已提交
969
	spin_lock_init(&bclink->node.lock);
970 971
	__skb_queue_head_init(&bclink->arrvq);
	skb_queue_head_init(&bclink->inputq);
P
Per Liden 已提交
972
	bcl->owner = &bclink->node;
973
	bcl->owner->net = net;
974
	bcl->mtu = MAX_PKT_DEFAULT_MCAST;
975
	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
976
	bcl->bearer_id = MAX_BEARERS;
977
	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
978 979
	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
	msg_set_prevnode(bcl->pmsg, tn->own_addr);
980
	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
981 982 983
	tn->bcbearer = bcbearer;
	tn->bclink = bclink;
	tn->bcl = bcl;
984
	return 0;
P
Per Liden 已提交
985 986
}

987
void tipc_bclink_stop(struct net *net)
P
Per Liden 已提交
988
{
989 990
	struct tipc_net *tn = net_generic(net, tipc_net_id);

991 992 993
	tipc_bclink_lock(net);
	tipc_link_purge_queues(tn->bcl);
	tipc_bclink_unlock(net);
994

995
	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
996
	synchronize_net();
997 998
	kfree(tn->bcbearer);
	kfree(tn->bclink);
P
Per Liden 已提交
999 1000
}

1001 1002 1003
/**
 * tipc_nmap_add - add a node to a node map
 */
1004
static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) == 0) {
		nm_ptr->count++;
		nm_ptr->map[w] |= mask;
	}
}

/**
 * tipc_nmap_remove - remove a node from a node map
 */
1019
static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
{
	int n = tipc_node(node);
	int w = n / WSIZE;
	u32 mask = (1 << (n % WSIZE));

	if ((nm_ptr->map[w] & mask) != 0) {
		nm_ptr->map[w] &= ~mask;
		nm_ptr->count--;
	}
}

/**
 * tipc_nmap_diff - find differences between node maps
 * @nm_a: input node map A
 * @nm_b: input node map B
 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
 */
1037 1038 1039
static void tipc_nmap_diff(struct tipc_node_map *nm_a,
			   struct tipc_node_map *nm_b,
			   struct tipc_node_map *nm_diff)
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
{
	int stop = ARRAY_SIZE(nm_a->map);
	int w;
	int b;
	u32 map;

	memset(nm_diff, 0, sizeof(*nm_diff));
	for (w = 0; w < stop; w++) {
		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
		nm_diff->map[w] = map;
		if (map != 0) {
			for (b = 0 ; b < WSIZE; b++) {
				if (map & (1 << b))
					nm_diff->count++;
			}
		}
	}
}