link.c 57.8 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/link.c: TIPC link code
3
 *
4
 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5
 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "subscr.h"
P
Per Liden 已提交
39
#include "link.h"
40
#include "bcast.h"
41
#include "socket.h"
P
Per Liden 已提交
42 43
#include "name_distr.h"
#include "discover.h"
44
#include "netlink.h"
P
Per Liden 已提交
45

46 47
#include <linux/pkt_sched.h>

48 49 50 51 52 53
/*
 * Error message prefixes
 */
static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
P
Per Liden 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_LINK_NAME] = {
		.type = NLA_STRING,
		.len = TIPC_MAX_LINK_NAME
	},
	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
};

71 72 73 74 75 76 77 78
/* Properties valid for media, bearar and link */
static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
};

79 80 81 82
/*
 * Interval between NACKs when packets arrive out of order
 */
#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
83 84 85
/*
 * Out-of-range value for link session numbers
 */
86
#define WILDCARD_SESSION 0x10000
87

88
/* State value stored in 'failover_pkts'
P
Per Liden 已提交
89
 */
90
#define FIRST_FAILOVER 0xffffu
P
Per Liden 已提交
91

92
/* Link FSM states and events:
P
Per Liden 已提交
93
 */
94
enum {
95 96 97 98
	TIPC_LINK_WORKING,
	TIPC_LINK_PROBING,
	TIPC_LINK_RESETTING,
	TIPC_LINK_ESTABLISHING
99 100 101 102 103 104 105 106 107 108 109
};

enum {
	PEER_RESET_EVT    = RESET_MSG,
	ACTIVATE_EVT      = ACTIVATE_MSG,
	TRAFFIC_EVT,      /* Any other valid msg from peer */
	SILENCE_EVT       /* Peer was silent during last timer interval*/
};

/* Link FSM state checking routines
 */
110
static int link_working(struct tipc_link *l)
111
{
112
	return l->state == TIPC_LINK_WORKING;
113 114
}

115
static int link_probing(struct tipc_link *l)
116
{
117
	return l->state == TIPC_LINK_PROBING;
118 119
}

120
static int link_resetting(struct tipc_link *l)
121
{
122
	return l->state == TIPC_LINK_RESETTING;
123 124
}

125
static int link_establishing(struct tipc_link *l)
126
{
127
	return l->state == TIPC_LINK_ESTABLISHING;
128
}
P
Per Liden 已提交
129

130 131
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
			       struct sk_buff_head *xmitq);
132 133 134
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
				      u16 rcvgap, int tolerance, int priority,
				      struct sk_buff_head *xmitq);
135 136
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
137 138
static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
					   struct sk_buff_head *xmitq);
139
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
140 141
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
142
static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
143

P
Per Liden 已提交
144
/*
S
Sam Ravnborg 已提交
145
 *  Simple link routines
P
Per Liden 已提交
146
 */
S
Sam Ravnborg 已提交
147
static unsigned int align(unsigned int i)
P
Per Liden 已提交
148 149 150 151
{
	return (i + 3) & ~3u;
}

152 153
static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
{
154 155 156 157 158
	struct tipc_node *n = l->owner;

	if (node_active_link(n, 0) != l)
		return node_active_link(n, 0);
	return node_active_link(n, 1);
159 160
}

P
Per Liden 已提交
161
/*
S
Sam Ravnborg 已提交
162
 *  Simple non-static link routines (i.e. referenced outside this file)
P
Per Liden 已提交
163
 */
164
int tipc_link_is_up(struct tipc_link *l_ptr)
P
Per Liden 已提交
165 166 167
{
	if (!l_ptr)
		return 0;
168
	return link_working(l_ptr) || link_probing(l_ptr);
P
Per Liden 已提交
169 170
}

171
int tipc_link_is_active(struct tipc_link *l)
P
Per Liden 已提交
172
{
173 174 175
	struct tipc_node *n = l->owner;

	return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
P
Per Liden 已提交
176 177 178
}

/**
179
 * tipc_link_create - create a new link
180
 * @n_ptr: pointer to associated node
P
Per Liden 已提交
181 182
 * @b_ptr: pointer to associated bearer
 * @media_addr: media address to use when sending messages over link
183
 *
P
Per Liden 已提交
184 185
 * Returns pointer to link.
 */
186
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
187
				   struct tipc_bearer *b_ptr,
188 189 190
				   const struct tipc_media_addr *media_addr,
				   struct sk_buff_head *inputq,
				   struct sk_buff_head *namedq)
P
Per Liden 已提交
191
{
192
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
193
	struct tipc_link *l_ptr;
P
Per Liden 已提交
194 195
	struct tipc_msg *msg;
	char *if_name;
196 197 198
	char addr_string[16];
	u32 peer = n_ptr->addr;

199
	if (n_ptr->link_cnt >= MAX_BEARERS) {
200
		tipc_addr_string_fill(addr_string, n_ptr->addr);
201 202
		pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
		       n_ptr->link_cnt, addr_string, MAX_BEARERS);
203 204 205
		return NULL;
	}

206
	if (n_ptr->links[b_ptr->identity].link) {
207
		tipc_addr_string_fill(addr_string, n_ptr->addr);
208 209
		pr_err("Attempt to establish second link on <%s> to %s\n",
		       b_ptr->name, addr_string);
210 211
		return NULL;
	}
P
Per Liden 已提交
212

213
	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
P
Per Liden 已提交
214
	if (!l_ptr) {
215
		pr_warn("Link creation failed, no memory\n");
P
Per Liden 已提交
216 217 218
		return NULL;
	}
	l_ptr->addr = peer;
219
	if_name = strchr(b_ptr->name, ':') + 1;
220
	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
221 222
		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
		tipc_node(tn->own_addr),
P
Per Liden 已提交
223 224
		if_name,
		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
225
		/* note: peer i/f name is updated by reset/activate message */
P
Per Liden 已提交
226
	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
227
	l_ptr->owner = n_ptr;
228
	l_ptr->peer_session = WILDCARD_SESSION;
229
	l_ptr->bearer_id = b_ptr->identity;
230
	l_ptr->tolerance = b_ptr->tolerance;
231
	l_ptr->state = TIPC_LINK_RESETTING;
P
Per Liden 已提交
232 233 234

	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
	msg = l_ptr->pmsg;
235
	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
236
		      l_ptr->addr);
P
Per Liden 已提交
237
	msg_set_size(msg, sizeof(l_ptr->proto_msg));
238
	msg_set_session(msg, (tn->random & 0xffff));
P
Per Liden 已提交
239 240
	msg_set_bearer_id(msg, b_ptr->identity);
	strcpy((char *)msg_data(msg), if_name);
241
	l_ptr->net_plane = b_ptr->net_plane;
242 243
	l_ptr->advertised_mtu = b_ptr->mtu;
	l_ptr->mtu = l_ptr->advertised_mtu;
244 245
	l_ptr->priority = b_ptr->priority;
	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
246
	l_ptr->snd_nxt = 1;
J
Jon Paul Maloy 已提交
247 248 249
	__skb_queue_head_init(&l_ptr->transmq);
	__skb_queue_head_init(&l_ptr->backlogq);
	__skb_queue_head_init(&l_ptr->deferdq);
250
	skb_queue_head_init(&l_ptr->wakeupq);
251 252 253
	l_ptr->inputq = inputq;
	l_ptr->namedq = namedq;
	skb_queue_head_init(l_ptr->inputq);
P
Per Liden 已提交
254
	link_reset_statistics(l_ptr);
255
	tipc_node_attach_link(n_ptr, l_ptr);
P
Per Liden 已提交
256 257 258
	return l_ptr;
}

259
/**
260 261
 * tipc_link_delete - Delete a link
 * @l: link to be deleted
262
 */
263
void tipc_link_delete(struct tipc_link *l)
264
{
265 266 267
	tipc_link_reset(l);
	tipc_link_reset_fragments(l);
	tipc_node_detach_link(l->owner, l);
268 269
}

270
void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
271
{
272
	struct tipc_net *tn = net_generic(net, tipc_net_id);
273 274
	struct tipc_link *link;
	struct tipc_node *node;
275

276
	rcu_read_lock();
277 278
	list_for_each_entry_rcu(node, &tn->node_list, list) {
		tipc_node_lock(node);
279
		link = node->links[bearer_id].link;
280
		if (link)
281 282
			tipc_link_delete(link);
		tipc_node_unlock(node);
283
	}
284
	rcu_read_unlock();
285
}
P
Per Liden 已提交
286

287 288 289 290 291 292 293 294 295 296
/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
 *
 * Give a newly added peer node the sequence number where it should
 * start receiving and acking broadcast packets.
 */
static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
					   struct sk_buff_head *xmitq)
{
	struct sk_buff *skb;
	struct sk_buff_head list;
297
	u16 last_sent;
298 299 300 301 302

	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
			      0, l->addr, link_own_addr(l), 0, 0, 0);
	if (!skb)
		return;
303 304
	last_sent = tipc_bclink_get_last_sent(l->owner->net);
	msg_set_last_bcast(buf_msg(skb), last_sent);
305 306 307 308 309
	__skb_queue_head_init(&list);
	__skb_queue_tail(&list, skb);
	tipc_link_xmit(l, &list, xmitq);
}

310 311 312 313 314 315 316 317 318 319 320 321
/**
 * tipc_link_fsm_evt - link finite state machine
 * @l: pointer to link
 * @evt: state machine event to be processed
 * @xmitq: queue to prepend created protocol message, if any
 */
static int tipc_link_fsm_evt(struct tipc_link *l, int evt,
			     struct sk_buff_head *xmitq)
{
	int mtyp = 0, rc = 0;
	struct tipc_link *pl;
	enum {
322 323 324 325 326 327 328
		LINK_RESET     = 1,
		LINK_ACTIVATE  = (1 << 1),
		SND_PROBE      = (1 << 2),
		SND_STATE      = (1 << 3),
		SND_RESET      = (1 << 4),
		SND_ACTIVATE   = (1 << 5),
		SND_BCAST_SYNC = (1 << 6)
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	} actions = 0;

	if (l->exec_mode == TIPC_LINK_BLOCKED)
		return rc;

	switch (l->state) {
	case TIPC_LINK_WORKING:
		switch (evt) {
		case TRAFFIC_EVT:
		case ACTIVATE_EVT:
			break;
		case SILENCE_EVT:
			l->state = TIPC_LINK_PROBING;
			actions |= SND_PROBE;
			break;
		case PEER_RESET_EVT:
			actions |= LINK_RESET | SND_ACTIVATE;
			break;
		default:
			pr_debug("%s%u WORKING\n", link_unk_evt, evt);
		}
		break;
	case TIPC_LINK_PROBING:
		switch (evt) {
		case TRAFFIC_EVT:
		case ACTIVATE_EVT:
			l->state = TIPC_LINK_WORKING;
			break;
		case PEER_RESET_EVT:
			actions |= LINK_RESET | SND_ACTIVATE;
			break;
		case SILENCE_EVT:
			if (l->silent_intv_cnt <= l->abort_limit) {
				actions |= SND_PROBE;
				break;
			}
			actions |= LINK_RESET | SND_RESET;
			break;
		default:
			pr_err("%s%u PROBING\n", link_unk_evt, evt);
		}
		break;
	case TIPC_LINK_RESETTING:
		switch (evt) {
		case TRAFFIC_EVT:
			break;
		case ACTIVATE_EVT:
			pl = node_active_link(l->owner, 0);
			if (pl && link_probing(pl))
				break;
			actions |= LINK_ACTIVATE;
380 381
			if (!l->owner->working_links)
				actions |= SND_BCAST_SYNC;
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
			break;
		case PEER_RESET_EVT:
			l->state = TIPC_LINK_ESTABLISHING;
			actions |= SND_ACTIVATE;
			break;
		case SILENCE_EVT:
			actions |= SND_RESET;
			break;
		default:
			pr_err("%s%u in RESETTING\n", link_unk_evt, evt);
		}
		break;
	case TIPC_LINK_ESTABLISHING:
		switch (evt) {
		case TRAFFIC_EVT:
		case ACTIVATE_EVT:
			pl = node_active_link(l->owner, 0);
			if (pl && link_probing(pl))
				break;
			actions |= LINK_ACTIVATE;
402 403
			if (!l->owner->working_links)
				actions |= SND_BCAST_SYNC;
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
			break;
		case PEER_RESET_EVT:
			break;
		case SILENCE_EVT:
			actions |= SND_ACTIVATE;
			break;
		default:
			pr_err("%s%u ESTABLISHING\n", link_unk_evt, evt);
		}
		break;
	default:
		pr_err("Unknown link state %u/%u\n", l->state, evt);
	}

	/* Perform actions as decided by FSM */
	if (actions & LINK_RESET) {
		l->exec_mode = TIPC_LINK_BLOCKED;
		rc |= TIPC_LINK_DOWN_EVT;
	}
	if (actions & LINK_ACTIVATE) {
		l->exec_mode = TIPC_LINK_OPEN;
		rc |= TIPC_LINK_UP_EVT;
	}
	if (actions & (SND_STATE | SND_PROBE))
		mtyp = STATE_MSG;
	if (actions & SND_RESET)
		mtyp = RESET_MSG;
	if (actions & SND_ACTIVATE)
		mtyp = ACTIVATE_MSG;
	if (actions & (SND_PROBE | SND_STATE | SND_RESET | SND_ACTIVATE))
		tipc_link_build_proto_msg(l, mtyp, actions & SND_PROBE,
					  0, 0, 0, xmitq);
436 437
	if (actions & SND_BCAST_SYNC)
		tipc_link_build_bcast_sync_msg(l, xmitq);
438 439 440
	return rc;
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
/* link_profile_stats - update statistical profiling of traffic
 */
static void link_profile_stats(struct tipc_link *l)
{
	struct sk_buff *skb;
	struct tipc_msg *msg;
	int length;

	/* Update counters used in statistical profiling of send traffic */
	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
	l->stats.queue_sz_counts++;

	skb = skb_peek(&l->transmq);
	if (!skb)
		return;
	msg = buf_msg(skb);
	length = msg_size(msg);

	if (msg_user(msg) == MSG_FRAGMENTER) {
		if (msg_type(msg) != FIRST_FRAGMENT)
			return;
		length = msg_size(msg_get_wrapped(msg));
	}
	l->stats.msg_lengths_total += length;
	l->stats.msg_length_counts++;
	if (length <= 64)
		l->stats.msg_length_profile[0]++;
	else if (length <= 256)
		l->stats.msg_length_profile[1]++;
	else if (length <= 1024)
		l->stats.msg_length_profile[2]++;
	else if (length <= 4096)
		l->stats.msg_length_profile[3]++;
	else if (length <= 16384)
		l->stats.msg_length_profile[4]++;
	else if (length <= 32768)
		l->stats.msg_length_profile[5]++;
	else
		l->stats.msg_length_profile[6]++;
}

/* tipc_link_timeout - perform periodic task as instructed from node timeout
 */
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
	int rc = 0;

	link_profile_stats(l);
	if (l->silent_intv_cnt)
		rc = tipc_link_fsm_evt(l, SILENCE_EVT, xmitq);
	else if (link_working(l) && tipc_bclink_acks_missing(l->owner))
		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
	l->silent_intv_cnt++;
	return rc;
}

P
Per Liden 已提交
497
/**
498
 * link_schedule_user - schedule a message sender for wakeup after congestion
499
 * @link: congested link
500
 * @list: message that was attempted sent
501
 * Create pseudo msg to send back to user when congestion abates
502
 * Does not consume buffer list
P
Per Liden 已提交
503
 */
504
static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
P
Per Liden 已提交
505
{
506 507 508 509 510 511 512 513 514
	struct tipc_msg *msg = buf_msg(skb_peek(list));
	int imp = msg_importance(msg);
	u32 oport = msg_origport(msg);
	u32 addr = link_own_addr(link);
	struct sk_buff *skb;

	/* This really cannot happen...  */
	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
515
		return -ENOBUFS;
516 517 518 519 520 521 522 523 524
	}
	/* Non-blocking sender: */
	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
		return -ELINKCONG;

	/* Create and schedule wakeup pseudo message */
	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
			      addr, addr, oport, 0, 0);
	if (!skb)
525
		return -ENOBUFS;
526 527 528
	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
	TIPC_SKB_CB(skb)->chain_imp = imp;
	skb_queue_tail(&link->wakeupq, skb);
529
	link->stats.link_congs++;
530
	return -ELINKCONG;
P
Per Liden 已提交
531 532
}

533 534 535 536 537 538
/**
 * link_prepare_wakeup - prepare users for wakeup after congestion
 * @link: congested link
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to node wait queue for wakeup
 */
539
void link_prepare_wakeup(struct tipc_link *l)
P
Per Liden 已提交
540
{
541 542
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
543
	struct sk_buff *skb, *tmp;
544

545 546 547 548 549
	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = l->window + l->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + l->backlog[imp].len) >= lim)
P
Per Liden 已提交
550
			break;
551
		skb_unlink(skb, &l->wakeupq);
552 553
		skb_queue_tail(l->inputq, skb);
		l->owner->inputq = l->inputq;
554
		l->owner->action_flags |= TIPC_MSG_EVT;
P
Per Liden 已提交
555 556 557 558
	}
}

/**
559
 * tipc_link_reset_fragments - purge link's inbound message fragments queue
P
Per Liden 已提交
560 561
 * @l_ptr: pointer to link
 */
562
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
P
Per Liden 已提交
563
{
564 565
	kfree_skb(l_ptr->reasm_buf);
	l_ptr->reasm_buf = NULL;
P
Per Liden 已提交
566 567
}

568
void tipc_link_purge_backlog(struct tipc_link *l)
569 570 571 572 573 574 575 576 577
{
	__skb_queue_purge(&l->backlogq);
	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
}

578
/**
579
 * tipc_link_purge_queues - purge all pkt queues associated with link
P
Per Liden 已提交
580 581
 * @l_ptr: pointer to link
 */
582
void tipc_link_purge_queues(struct tipc_link *l_ptr)
P
Per Liden 已提交
583
{
J
Jon Paul Maloy 已提交
584 585
	__skb_queue_purge(&l_ptr->deferdq);
	__skb_queue_purge(&l_ptr->transmq);
586
	tipc_link_purge_backlog(l_ptr);
587
	tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
588 589
}

590
void tipc_link_reset(struct tipc_link *l_ptr)
P
Per Liden 已提交
591 592
{
	u32 prev_state = l_ptr->state;
593
	int was_active_link = tipc_link_is_active(l_ptr);
594
	struct tipc_node *owner = l_ptr->owner;
595
	struct tipc_link *pl = tipc_parallel_link(l_ptr);
596

597
	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
P
Per Liden 已提交
598

599
	/* Link is down, accept any session */
600
	l_ptr->peer_session = WILDCARD_SESSION;
P
Per Liden 已提交
601

602 603
	/* Prepare for renewed mtu size negotiation */
	l_ptr->mtu = l_ptr->advertised_mtu;
604

605
	l_ptr->state = TIPC_LINK_RESETTING;
P
Per Liden 已提交
606

607 608
	if ((prev_state == TIPC_LINK_RESETTING) ||
	    (prev_state == TIPC_LINK_ESTABLISHING))
P
Per Liden 已提交
609 610
		return;

611
	tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
612
	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
613

614
	if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
615
		l_ptr->exec_mode = TIPC_LINK_BLOCKED;
616
		l_ptr->failover_checkpt = l_ptr->rcv_nxt;
617
		pl->failover_pkts = FIRST_FAILOVER;
618
		pl->failover_checkpt = l_ptr->rcv_nxt;
619 620 621
		pl->failover_skb = l_ptr->reasm_buf;
	} else {
		kfree_skb(l_ptr->reasm_buf);
P
Per Liden 已提交
622
	}
623
	/* Clean up all queues, except inputq: */
J
Jon Paul Maloy 已提交
624 625
	__skb_queue_purge(&l_ptr->transmq);
	__skb_queue_purge(&l_ptr->deferdq);
626
	if (!owner->inputq)
627
		owner->inputq = l_ptr->inputq;
628 629
	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
	if (!skb_queue_empty(owner->inputq))
630
		owner->action_flags |= TIPC_MSG_EVT;
631
	tipc_link_purge_backlog(l_ptr);
632
	l_ptr->reasm_buf = NULL;
J
Jon Paul Maloy 已提交
633
	l_ptr->rcv_unacked = 0;
634
	l_ptr->snd_nxt = 1;
635
	l_ptr->rcv_nxt = 1;
636
	l_ptr->silent_intv_cnt = 0;
637
	l_ptr->stats.recv_info = 0;
P
Per Liden 已提交
638 639 640 641
	l_ptr->stale_count = 0;
	link_reset_statistics(l_ptr);
}

642
void tipc_link_activate(struct tipc_link *link)
P
Per Liden 已提交
643
{
644 645
	struct tipc_node *node = link->owner;

646
	link->rcv_nxt = 1;
647
	link->stats.recv_info = 1;
648
	link->silent_intv_cnt = 0;
649 650
	link->state = TIPC_LINK_WORKING;
	link->exec_mode = TIPC_LINK_OPEN;
651
	tipc_node_link_up(node, link->bearer_id);
652
	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
P
Per Liden 已提交
653 654
}

655
/**
656
 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
657
 * @link: link to use
658 659
 * @list: chain of buffers containing message
 *
660
 * Consumes the buffer chain, except when returning an error code,
661 662
 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
663
 */
664 665
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
		     struct sk_buff_head *list)
666
{
667
	struct tipc_msg *msg = buf_msg(skb_peek(list));
J
Jon Paul Maloy 已提交
668
	unsigned int maxwin = link->window;
669
	unsigned int i, imp = msg_importance(msg);
670
	uint mtu = link->mtu;
671 672
	u16 ack = mod(link->rcv_nxt - 1);
	u16 seqno = link->snd_nxt;
673
	u16 bc_last_in = link->owner->bclink.last_in;
674
	struct tipc_media_addr *addr = &link->media_addr;
J
Jon Paul Maloy 已提交
675 676
	struct sk_buff_head *transmq = &link->transmq;
	struct sk_buff_head *backlogq = &link->backlogq;
677
	struct sk_buff *skb, *bskb;
678

679 680 681 682 683
	/* Match msg importance against this and all higher backlog limits: */
	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
			return link_schedule_user(link, list);
	}
684
	if (unlikely(msg_size(msg) > mtu))
685
		return -EMSGSIZE;
686

J
Jon Paul Maloy 已提交
687
	/* Prepare each packet for sending, and add to relevant queue: */
688 689
	while (skb_queue_len(list)) {
		skb = skb_peek(list);
690
		msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
691 692
		msg_set_seqno(msg, seqno);
		msg_set_ack(msg, ack);
693 694
		msg_set_bcast_ack(msg, bc_last_in);

J
Jon Paul Maloy 已提交
695
		if (likely(skb_queue_len(transmq) < maxwin)) {
696
			__skb_dequeue(list);
J
Jon Paul Maloy 已提交
697 698 699 700 701 702
			__skb_queue_tail(transmq, skb);
			tipc_bearer_send(net, link->bearer_id, skb, addr);
			link->rcv_unacked = 0;
			seqno++;
			continue;
		}
703 704
		if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
			kfree_skb(__skb_dequeue(list));
705 706
			link->stats.sent_bundled++;
			continue;
J
Jon Paul Maloy 已提交
707
		}
708 709 710 711
		if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
			kfree_skb(__skb_dequeue(list));
			__skb_queue_tail(backlogq, bskb);
			link->backlog[msg_importance(buf_msg(bskb))].len++;
712 713
			link->stats.sent_bundled++;
			link->stats.sent_bundles++;
714
			continue;
715
		}
716 717
		link->backlog[imp].len += skb_queue_len(list);
		skb_queue_splice_tail_init(list, backlogq);
718
	}
719
	link->snd_nxt = seqno;
720 721 722
	return 0;
}

723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
/**
 * tipc_link_xmit(): enqueue buffer list according to queue situation
 * @link: link to use
 * @list: chain of buffers containing message
 * @xmitq: returned list of packets to be sent by caller
 *
 * Consumes the buffer chain, except when returning -ELINKCONG,
 * since the caller then may want to make more send attempts.
 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
 */
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
		   struct sk_buff_head *xmitq)
{
	struct tipc_msg *hdr = buf_msg(skb_peek(list));
	unsigned int maxwin = l->window;
	unsigned int i, imp = msg_importance(hdr);
	unsigned int mtu = l->mtu;
	u16 ack = l->rcv_nxt - 1;
	u16 seqno = l->snd_nxt;
	u16 bc_last_in = l->owner->bclink.last_in;
	struct sk_buff_head *transmq = &l->transmq;
	struct sk_buff_head *backlogq = &l->backlogq;
	struct sk_buff *skb, *_skb, *bskb;

	/* Match msg importance against this and all higher backlog limits: */
	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
			return link_schedule_user(l, list);
	}
	if (unlikely(msg_size(hdr) > mtu))
		return -EMSGSIZE;

	/* Prepare each packet for sending, and add to relevant queue: */
	while (skb_queue_len(list)) {
		skb = skb_peek(list);
		hdr = buf_msg(skb);
		msg_set_seqno(hdr, seqno);
		msg_set_ack(hdr, ack);
		msg_set_bcast_ack(hdr, bc_last_in);

		if (likely(skb_queue_len(transmq) < maxwin)) {
			_skb = skb_clone(skb, GFP_ATOMIC);
			if (!_skb)
				return -ENOBUFS;
			__skb_dequeue(list);
			__skb_queue_tail(transmq, skb);
			__skb_queue_tail(xmitq, _skb);
			l->rcv_unacked = 0;
			seqno++;
			continue;
		}
		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
			kfree_skb(__skb_dequeue(list));
			l->stats.sent_bundled++;
			continue;
		}
		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
			kfree_skb(__skb_dequeue(list));
			__skb_queue_tail(backlogq, bskb);
			l->backlog[msg_importance(buf_msg(bskb))].len++;
			l->stats.sent_bundled++;
			l->stats.sent_bundles++;
			continue;
		}
		l->backlog[imp].len += skb_queue_len(list);
		skb_queue_splice_tail_init(list, backlogq);
	}
	l->snd_nxt = seqno;
	return 0;
}

795 796
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
797
	skb_queue_head_init(list);
798 799 800 801 802 803 804 805
	__skb_queue_tail(list, skb);
}

static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
806
	return __tipc_link_xmit(link->owner->net, link, &head);
807 808
}

809
/*
810
 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
811 812 813 814 815 816
 * Receive the sequence number where we should start receiving and
 * acking broadcast packets from a newly added peer node, and open
 * up for reception of such packets.
 *
 * Called with node locked
 */
817
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
818 819 820 821 822 823 824 825
{
	struct tipc_msg *msg = buf_msg(buf);

	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
	n->bclink.recv_permitted = true;
	kfree_skb(buf);
}

826
/*
827 828 829 830 831 832
 * tipc_link_push_packets - push unsent packets to bearer
 *
 * Push out the unsent messages of a link where congestion
 * has abated. Node is locked.
 *
 * Called with node locked
P
Per Liden 已提交
833
 */
J
Jon Paul Maloy 已提交
834
void tipc_link_push_packets(struct tipc_link *link)
P
Per Liden 已提交
835
{
J
Jon Paul Maloy 已提交
836
	struct sk_buff *skb;
837
	struct tipc_msg *msg;
838
	u16 seqno = link->snd_nxt;
839
	u16 ack = mod(link->rcv_nxt - 1);
P
Per Liden 已提交
840

J
Jon Paul Maloy 已提交
841 842 843
	while (skb_queue_len(&link->transmq) < link->window) {
		skb = __skb_dequeue(&link->backlogq);
		if (!skb)
844
			break;
J
Jon Paul Maloy 已提交
845
		msg = buf_msg(skb);
846
		link->backlog[msg_importance(msg)].len--;
J
Jon Paul Maloy 已提交
847
		msg_set_ack(msg, ack);
848 849
		msg_set_seqno(msg, seqno);
		seqno = mod(seqno + 1);
J
Jon Paul Maloy 已提交
850 851 852 853 854
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		link->rcv_unacked = 0;
		__skb_queue_tail(&link->transmq, skb);
		tipc_bearer_send(link->owner->net, link->bearer_id,
				 skb, &link->media_addr);
P
Per Liden 已提交
855
	}
856
	link->snd_nxt = seqno;
P
Per Liden 已提交
857 858
}

859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
{
	struct sk_buff *skb, *_skb;
	struct tipc_msg *hdr;
	u16 seqno = l->snd_nxt;
	u16 ack = l->rcv_nxt - 1;

	while (skb_queue_len(&l->transmq) < l->window) {
		skb = skb_peek(&l->backlogq);
		if (!skb)
			break;
		_skb = skb_clone(skb, GFP_ATOMIC);
		if (!_skb)
			break;
		__skb_dequeue(&l->backlogq);
		hdr = buf_msg(skb);
		l->backlog[msg_importance(hdr)].len--;
		__skb_queue_tail(&l->transmq, skb);
		__skb_queue_tail(xmitq, _skb);
		msg_set_ack(hdr, ack);
		msg_set_seqno(hdr, seqno);
		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
		l->rcv_unacked = 0;
		seqno++;
	}
	l->snd_nxt = seqno;
}

887
void tipc_link_reset_all(struct tipc_node *node)
888 889 890 891
{
	char addr_string[16];
	u32 i;

892
	tipc_node_lock(node);
893

894
	pr_warn("Resetting all links to %s\n",
895
		tipc_addr_string_fill(addr_string, node->addr));
896 897

	for (i = 0; i < MAX_BEARERS; i++) {
898 899 900
		if (node->links[i].link) {
			link_print(node->links[i].link, "Resetting link\n");
			tipc_link_reset(node->links[i].link);
901 902 903
		}
	}

904
	tipc_node_unlock(node);
905 906
}

907
static void link_retransmit_failure(struct tipc_link *l_ptr,
908
				    struct sk_buff *buf)
909 910
{
	struct tipc_msg *msg = buf_msg(buf);
911
	struct net *net = l_ptr->owner->net;
912

913
	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
914 915 916

	if (l_ptr->addr) {
		/* Handle failure on standard link */
917 918 919 920 921 922
		link_print(l_ptr, "Resetting link ");
		pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
			msg_user(msg), msg_type(msg), msg_size(msg),
			msg_errcode(msg));
		pr_info("sqno %u, prev: %x, src: %x\n",
			msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
923 924 925
		tipc_link_reset(l_ptr);
	} else {
		/* Handle failure on broadcast link */
926
		struct tipc_node *n_ptr;
927 928
		char addr_string[16];

929 930 931
		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
		pr_cont("Outstanding acks: %lu\n",
			(unsigned long) TIPC_SKB_CB(buf)->handle);
J
Jeff Garzik 已提交
932

933
		n_ptr = tipc_bclink_retransmit_to(net);
934

935
		tipc_addr_string_fill(addr_string, n_ptr->addr);
936
		pr_info("Broadcast link info for %s\n", addr_string);
937 938
		pr_info("Reception permitted: %d,  Acked: %u\n",
			n_ptr->bclink.recv_permitted,
939 940 941 942 943
			n_ptr->bclink.acked);
		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
			n_ptr->bclink.last_in,
			n_ptr->bclink.oos_state,
			n_ptr->bclink.last_sent);
944

945
		n_ptr->action_flags |= TIPC_BCAST_RESET;
946 947 948 949
		l_ptr->stale_count = 0;
	}
}

950
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
951
			  u32 retransmits)
P
Per Liden 已提交
952 953 954
{
	struct tipc_msg *msg;

955
	if (!skb)
956 957
		return;

958
	msg = buf_msg(skb);
959

960
	/* Detect repeated retransmit failures */
961
	if (l_ptr->last_retransm == msg_seqno(msg)) {
962
		if (++l_ptr->stale_count > 100) {
963
			link_retransmit_failure(l_ptr, skb);
964
			return;
965 966
		}
	} else {
967
		l_ptr->last_retransm = msg_seqno(msg);
968
		l_ptr->stale_count = 1;
P
Per Liden 已提交
969
	}
970

J
Jon Paul Maloy 已提交
971 972
	skb_queue_walk_from(&l_ptr->transmq, skb) {
		if (!retransmits)
973 974
			break;
		msg = buf_msg(skb);
975
		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
976
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
977 978
		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
				 &l_ptr->media_addr);
979 980
		retransmits--;
		l_ptr->stats.retransmitted++;
P
Per Liden 已提交
981 982 983
	}
}

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
static int tipc_link_retransm(struct tipc_link *l, int retransm,
			      struct sk_buff_head *xmitq)
{
	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
	struct tipc_msg *hdr;

	if (!skb)
		return 0;

	/* Detect repeated retransmit failures on same packet */
	if (likely(l->last_retransm != buf_seqno(skb))) {
		l->last_retransm = buf_seqno(skb);
		l->stale_count = 1;
	} else if (++l->stale_count > 100) {
		link_retransmit_failure(l, skb);
		return TIPC_LINK_DOWN_EVT;
	}
	skb_queue_walk(&l->transmq, skb) {
		if (!retransm)
			return 0;
		hdr = buf_msg(skb);
		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
		if (!_skb)
			return 0;
		hdr = buf_msg(_skb);
		msg_set_ack(hdr, l->rcv_nxt - 1);
		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
		_skb->priority = TC_PRIO_CONTROL;
		__skb_queue_tail(xmitq, _skb);
		retransm--;
		l->stats.retransmitted++;
	}
	return 0;
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
/* link_synch(): check if all packets arrived before the synch
 *               point have been consumed
 * Returns true if the parallel links are synched, otherwise false
 */
static bool link_synch(struct tipc_link *l)
{
	unsigned int post_synch;
	struct tipc_link *pl;

	pl  = tipc_parallel_link(l);
	if (pl == l)
		goto synched;

	/* Was last pre-synch packet added to input queue ? */
1033
	if (less_eq(pl->rcv_nxt, l->synch_point))
1034 1035 1036
		return false;

	/* Is it still in the input queue ? */
1037
	post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
1038
	if (skb_queue_len(pl->inputq) > post_synch)
1039 1040
		return false;
synched:
1041
	l->exec_mode = TIPC_LINK_OPEN;
1042 1043 1044
	return true;
}

1045
/* tipc_data_input - deliver data and name distr msgs to upper layer
1046
 *
1047
 * Consumes buffer if message is of right type
1048 1049
 * Node lock must be held
 */
1050
static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1051
{
1052 1053 1054
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	u32 dport = msg_destport(msg);
1055 1056

	switch (msg_user(msg)) {
1057 1058 1059 1060 1061
	case TIPC_LOW_IMPORTANCE:
	case TIPC_MEDIUM_IMPORTANCE:
	case TIPC_HIGH_IMPORTANCE:
	case TIPC_CRITICAL_IMPORTANCE:
	case CONN_MANAGER:
1062 1063
		if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
			node->inputq = link->inputq;
1064
			node->action_flags |= TIPC_MSG_EVT;
1065
		}
1066
		return true;
1067
	case NAME_DISTRIBUTOR:
1068
		node->bclink.recv_permitted = true;
1069 1070 1071
		node->namedq = link->namedq;
		skb_queue_tail(link->namedq, skb);
		if (skb_queue_len(link->namedq) == 1)
1072 1073 1074
			node->action_flags |= TIPC_NAMED_MSG_EVT;
		return true;
	case MSG_BUNDLER:
1075
	case TUNNEL_PROTOCOL:
1076
	case MSG_FRAGMENTER:
1077
	case BCAST_PROTOCOL:
1078
		return false;
1079
	default:
1080 1081 1082 1083
		pr_warn("Dropping received illegal msg type\n");
		kfree_skb(skb);
		return false;
	};
1084
}
1085 1086 1087 1088 1089

/* tipc_link_input - process packet that has passed link protocol check
 *
 * Consumes buffer
 * Node lock must be held
1090
 */
1091
static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1092
{
1093 1094 1095 1096 1097
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	struct sk_buff *iskb;
	int pos = 0;

1098
	switch (msg_user(msg)) {
1099
	case TUNNEL_PROTOCOL:
1100
		if (msg_dup(msg)) {
1101
			link->exec_mode = TIPC_LINK_TUNNEL;
1102
			link->synch_point = msg_seqno(msg_get_wrapped(msg));
1103 1104
			kfree_skb(skb);
			break;
1105
		}
1106
		if (!tipc_link_failover_rcv(link, &skb))
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
			break;
		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
			tipc_data_input(link, skb);
			break;
		}
	case MSG_BUNDLER:
		link->stats.recv_bundles++;
		link->stats.recv_bundled += msg_msgcnt(msg);

		while (tipc_msg_extract(skb, &iskb, &pos))
			tipc_data_input(link, iskb);
1118
		break;
1119 1120 1121 1122 1123 1124 1125 1126
	case MSG_FRAGMENTER:
		link->stats.recv_fragments++;
		if (tipc_buf_append(&link->reasm_buf, &skb)) {
			link->stats.recv_fragmented++;
			tipc_data_input(link, skb);
		} else if (!link->reasm_buf) {
			tipc_link_reset(link);
		}
1127
		break;
1128 1129
	case BCAST_PROTOCOL:
		tipc_link_sync_rcv(node, skb);
1130 1131
		break;
	default:
1132 1133
		break;
	};
1134 1135
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
{
	bool released = false;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&l->transmq, skb, tmp) {
		if (more(buf_seqno(skb), acked))
			break;
		__skb_unlink(skb, &l->transmq);
		kfree_skb(skb);
		released = true;
	}
	return released;
}

/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
 * @link: the link that should handle the message
 * @skb: TIPC packet
 * @xmitq: queue to place packets to be sent after this call
 */
int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
		  struct sk_buff_head *xmitq)
{
	struct sk_buff_head *arrvq = &l->deferdq;
	struct sk_buff *tmp;
	struct tipc_msg *hdr;
	u16 seqno, rcv_nxt;
	int rc = 0;

	if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
		if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
			tipc_link_build_proto_msg(l, STATE_MSG, 0,
						  0, 0, 0, xmitq);
		return rc;
	}

	skb_queue_walk_safe(arrvq, skb, tmp) {
		hdr = buf_msg(skb);

		/* Verify and update link state */
		if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
			__skb_dequeue(arrvq);
			rc |= tipc_link_proto_rcv(l, skb, xmitq);
			continue;
		}

		if (unlikely(!link_working(l))) {
			rc |= tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
			if (!link_working(l)) {
				kfree_skb(__skb_dequeue(arrvq));
				return rc;
			}
		}

		l->silent_intv_cnt = 0;

		/* Forward queues and wake up waiting users */
		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
			tipc_link_advance_backlog(l, xmitq);
			if (unlikely(!skb_queue_empty(&l->wakeupq)))
				link_prepare_wakeup(l);
		}

		/* Defer reception if there is a gap in the sequence */
		seqno = msg_seqno(hdr);
		rcv_nxt = l->rcv_nxt;
		if (unlikely(less(rcv_nxt, seqno))) {
			l->stats.deferred_recv++;
			return rc;
		}

		__skb_dequeue(arrvq);

		/* Drop if packet already received */
		if (unlikely(more(rcv_nxt, seqno))) {
			l->stats.duplicates++;
			kfree_skb(skb);
			return rc;
		}

		/* Synchronize with parallel link if applicable */
		if (unlikely(l->exec_mode == TIPC_LINK_TUNNEL))
			if (!msg_dup(hdr) && !link_synch(l)) {
				kfree_skb(skb);
				return rc;
			}

		/* Packet can be delivered */
		l->rcv_nxt++;
		l->stats.recv_info++;
		if (unlikely(!tipc_data_input(l, skb)))
			tipc_link_input(l, skb);

		/* Ack at regular intervals */
		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
			l->rcv_unacked = 0;
			l->stats.sent_acks++;
			tipc_link_build_proto_msg(l, STATE_MSG,
						  0, 0, 0, 0, xmitq);
		}
	}
	return rc;
}

1240
/**
1241 1242 1243
 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
 *
 * Returns increase in queue length (i.e. 0 or 1)
P
Per Liden 已提交
1244
 */
1245
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
P
Per Liden 已提交
1246
{
1247
	struct sk_buff *skb1;
1248
	u16 seq_no = buf_seqno(skb);
P
Per Liden 已提交
1249 1250

	/* Empty queue ? */
1251 1252
	if (skb_queue_empty(list)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1253 1254 1255 1256
		return 1;
	}

	/* Last ? */
1257 1258
	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1259 1260 1261
		return 1;
	}

1262
	/* Locate insertion point in queue, then insert; discard if duplicate */
1263
	skb_queue_walk(list, skb1) {
1264
		u16 curr_seqno = buf_seqno(skb1);
P
Per Liden 已提交
1265

1266
		if (seq_no == curr_seqno) {
1267
			kfree_skb(skb);
1268
			return 0;
P
Per Liden 已提交
1269
		}
1270 1271

		if (less(seq_no, curr_seqno))
P
Per Liden 已提交
1272
			break;
1273
	}
P
Per Liden 已提交
1274

1275
	__skb_queue_before(list, skb1, skb);
1276
	return 1;
P
Per Liden 已提交
1277 1278 1279 1280 1281
}

/*
 * Send protocol message to the other endpoint.
 */
1282
void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
1283
			  u32 gap, u32 tolerance, u32 priority)
P
Per Liden 已提交
1284
{
1285 1286
	struct sk_buff *skb = NULL;
	struct sk_buff_head xmitq;
P
Per Liden 已提交
1287

1288 1289 1290 1291 1292
	__skb_queue_head_init(&xmitq);
	tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
				  tolerance, priority, &xmitq);
	skb = __skb_dequeue(&xmitq);
	if (!skb)
P
Per Liden 已提交
1293
		return;
1294 1295 1296
	tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
	l->rcv_unacked = 0;
	kfree_skb(skb);
P
Per Liden 已提交
1297 1298
}

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
/* tipc_link_build_proto_msg: prepare link protocol message for transmission
 */
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
				      u16 rcvgap, int tolerance, int priority,
				      struct sk_buff_head *xmitq)
{
	struct sk_buff *skb = NULL;
	struct tipc_msg *hdr = l->pmsg;
	u16 snd_nxt = l->snd_nxt;
	u16 rcv_nxt = l->rcv_nxt;
	u16 rcv_last = rcv_nxt - 1;
	int node_up = l->owner->bclink.recv_permitted;

	/* Don't send protocol message during reset or link failover */
	if (l->exec_mode == TIPC_LINK_BLOCKED)
		return;

	msg_set_type(hdr, mtyp);
	msg_set_net_plane(hdr, l->net_plane);
	msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
	msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
	msg_set_link_tolerance(hdr, tolerance);
	msg_set_linkprio(hdr, priority);
	msg_set_redundant_link(hdr, node_up);
	msg_set_seq_gap(hdr, 0);

	/* Compatibility: created msg must not be in sequence with pkt flow */
	msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);

	if (mtyp == STATE_MSG) {
		if (!tipc_link_is_up(l))
			return;
		msg_set_next_sent(hdr, snd_nxt);

		/* Override rcvgap if there are packets in deferred queue */
		if (!skb_queue_empty(&l->deferdq))
			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
		if (rcvgap) {
			msg_set_seq_gap(hdr, rcvgap);
			l->stats.sent_nacks++;
		}
		msg_set_ack(hdr, rcv_last);
		msg_set_probe(hdr, probe);
		if (probe)
			l->stats.sent_probes++;
		l->stats.sent_states++;
	} else {
		/* RESET_MSG or ACTIVATE_MSG */
		msg_set_max_pkt(hdr, l->advertised_mtu);
		msg_set_ack(hdr, l->failover_checkpt - 1);
		msg_set_next_sent(hdr, 1);
	}
	skb = tipc_buf_acquire(msg_size(hdr));
	if (!skb)
		return;
	skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
	skb->priority = TC_PRIO_CONTROL;
	__skb_queue_head(xmitq, skb);
}
P
Per Liden 已提交
1358

1359 1360
/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 * a different bearer. Owner node is locked.
P
Per Liden 已提交
1361
 */
1362 1363 1364 1365
static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
				  struct tipc_msg *tunnel_hdr,
				  struct tipc_msg *msg,
				  u32 selector)
P
Per Liden 已提交
1366
{
1367
	struct tipc_link *tunnel;
1368
	struct sk_buff *skb;
P
Per Liden 已提交
1369 1370
	u32 length = msg_size(msg);

1371
	tunnel = node_active_link(l_ptr->owner, selector & 1);
1372
	if (!tipc_link_is_up(tunnel)) {
1373
		pr_warn("%stunnel link no longer available\n", link_co_err);
P
Per Liden 已提交
1374
		return;
1375
	}
P
Per Liden 已提交
1376
	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1377 1378
	skb = tipc_buf_acquire(length + INT_H_SIZE);
	if (!skb) {
1379
		pr_warn("%sunable to send tunnel msg\n", link_co_err);
P
Per Liden 已提交
1380
		return;
1381
	}
1382 1383 1384
	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
	__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1385 1386 1387
}


1388 1389 1390 1391 1392
/* tipc_link_failover_send_queue(): A link has gone down, but a second
 * link is still active. We can do failover. Tunnel the failing link's
 * whole send queue via the remaining link. This way, we don't lose
 * any packets, and sequence order is preserved for subsequent traffic
 * sent over the remaining link. Owner node is locked.
P
Per Liden 已提交
1393
 */
1394
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
P
Per Liden 已提交
1395
{
J
Jon Paul Maloy 已提交
1396
	int msgcount;
1397
	struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
P
Per Liden 已提交
1398
	struct tipc_msg tunnel_hdr;
1399
	struct sk_buff *skb;
1400
	int split_bundles;
P
Per Liden 已提交
1401 1402 1403 1404

	if (!tunnel)
		return;

1405 1406
	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
		      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1407 1408 1409 1410 1411

	skb_queue_walk(&l_ptr->backlogq, skb) {
		msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
		l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
	}
J
Jon Paul Maloy 已提交
1412
	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1413
	tipc_link_purge_backlog(l_ptr);
J
Jon Paul Maloy 已提交
1414
	msgcount = skb_queue_len(&l_ptr->transmq);
P
Per Liden 已提交
1415 1416
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
	msg_set_msgcnt(&tunnel_hdr, msgcount);
1417

J
Jon Paul Maloy 已提交
1418
	if (skb_queue_empty(&l_ptr->transmq)) {
1419 1420 1421
		skb = tipc_buf_acquire(INT_H_SIZE);
		if (skb) {
			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
P
Per Liden 已提交
1422
			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1423
			__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1424
		} else {
1425 1426
			pr_warn("%sunable to send changeover msg\n",
				link_co_err);
P
Per Liden 已提交
1427 1428 1429
		}
		return;
	}
1430

1431 1432
	split_bundles = (node_active_link(l_ptr->owner, 0) !=
			 node_active_link(l_ptr->owner, 0));
1433

J
Jon Paul Maloy 已提交
1434
	skb_queue_walk(&l_ptr->transmq, skb) {
1435
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1436 1437 1438

		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
			struct tipc_msg *m = msg_get_wrapped(msg);
1439
			unchar *pos = (unchar *)m;
P
Per Liden 已提交
1440

1441
			msgcount = msg_msgcnt(msg);
P
Per Liden 已提交
1442
			while (msgcount--) {
1443
				msg_set_seqno(m, msg_seqno(msg));
1444 1445
				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
						      msg_link_selector(m));
P
Per Liden 已提交
1446 1447 1448 1449
				pos += align(msg_size(m));
				m = (struct tipc_msg *)pos;
			}
		} else {
1450 1451
			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
					      msg_link_selector(msg));
P
Per Liden 已提交
1452 1453 1454 1455
		}
	}
}

1456
/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1457 1458 1459 1460 1461 1462 1463 1464
 * duplicate of the first link's send queue via the new link. This way, we
 * are guaranteed that currently queued packets from a socket are delivered
 * before future traffic from the same socket, even if this is using the
 * new link. The last arriving copy of each duplicate packet is dropped at
 * the receiving end by the regular protocol check, so packet cardinality
 * and sequence order is preserved per sender/receiver socket pair.
 * Owner node is locked.
 */
J
Jon Paul Maloy 已提交
1465 1466
void tipc_link_dup_queue_xmit(struct tipc_link *link,
			      struct tipc_link *tnl)
P
Per Liden 已提交
1467
{
1468
	struct sk_buff *skb;
J
Jon Paul Maloy 已提交
1469 1470 1471
	struct tipc_msg tnl_hdr;
	struct sk_buff_head *queue = &link->transmq;
	int mcnt;
1472
	u16 seqno;
J
Jon Paul Maloy 已提交
1473

1474 1475
	tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
		      SYNCH_MSG, INT_H_SIZE, link->addr);
J
Jon Paul Maloy 已提交
1476 1477 1478 1479 1480 1481
	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
	msg_set_msgcnt(&tnl_hdr, mcnt);
	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);

tunnel_queue:
	skb_queue_walk(queue, skb) {
1482 1483
		struct sk_buff *outskb;
		struct tipc_msg *msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
1484
		u32 len = msg_size(msg);
P
Per Liden 已提交
1485

1486
		msg_set_ack(msg, mod(link->rcv_nxt - 1));
J
Jon Paul Maloy 已提交
1487 1488 1489
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
		outskb = tipc_buf_acquire(len + INT_H_SIZE);
1490
		if (outskb == NULL) {
1491 1492
			pr_warn("%sunable to send duplicate msg\n",
				link_co_err);
P
Per Liden 已提交
1493 1494
			return;
		}
J
Jon Paul Maloy 已提交
1495 1496 1497 1498 1499
		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
					       skb->data, len);
		__tipc_link_xmit_skb(tnl, outskb);
		if (!tipc_link_is_up(link))
P
Per Liden 已提交
1500 1501
			return;
	}
J
Jon Paul Maloy 已提交
1502 1503
	if (queue == &link->backlogq)
		return;
1504 1505 1506 1507 1508
	seqno = link->snd_nxt;
	skb_queue_walk(&link->backlogq, skb) {
		msg_set_seqno(buf_msg(skb), seqno);
		seqno = mod(seqno + 1);
	}
J
Jon Paul Maloy 已提交
1509 1510
	queue = &link->backlogq;
	goto tunnel_queue;
P
Per Liden 已提交
1511 1512
}

1513
/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1514 1515
 *  Owner node is locked.
 */
1516
static bool tipc_link_failover_rcv(struct tipc_link *link,
1517
				   struct sk_buff **skb)
1518
{
1519 1520
	struct tipc_msg *msg = buf_msg(*skb);
	struct sk_buff *iskb = NULL;
1521
	struct tipc_link *pl = NULL;
1522
	int bearer_id = msg_bearer_id(msg);
1523
	int pos = 0;
1524

1525
	if (msg_type(msg) != FAILOVER_MSG) {
1526 1527
		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
		goto exit;
1528
	}
1529 1530
	if (bearer_id >= MAX_BEARERS)
		goto exit;
1531 1532

	if (bearer_id == link->bearer_id)
1533
		goto exit;
P
Per Liden 已提交
1534

1535
	pl = link->owner->links[bearer_id].link;
1536 1537 1538 1539 1540
	if (pl && tipc_link_is_up(pl))
		tipc_link_reset(pl);

	if (link->failover_pkts == FIRST_FAILOVER)
		link->failover_pkts = msg_msgcnt(msg);
1541

1542
	/* Should we expect an inner packet? */
1543
	if (!link->failover_pkts)
1544
		goto exit;
1545

1546 1547 1548
	if (!tipc_msg_extract(*skb, &iskb, &pos)) {
		pr_warn("%sno inner failover pkt\n", link_co_err);
		*skb = NULL;
P
Per Liden 已提交
1549
		goto exit;
1550
	}
1551
	link->failover_pkts--;
1552
	*skb = NULL;
P
Per Liden 已提交
1553

1554 1555
	/* Was this packet already delivered? */
	if (less(buf_seqno(iskb), link->failover_checkpt)) {
1556 1557 1558 1559 1560 1561
		kfree_skb(iskb);
		iskb = NULL;
		goto exit;
	}
	if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
		link->stats.recv_fragments++;
1562
		tipc_buf_append(&link->failover_skb, &iskb);
1563
	}
P
Per Liden 已提交
1564
exit:
1565
	if (!link->failover_pkts && pl)
1566
		pl->exec_mode = TIPC_LINK_OPEN;
1567 1568 1569
	kfree_skb(*skb);
	*skb = iskb;
	return *skb;
P
Per Liden 已提交
1570 1571
}

1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
/* tipc_link_proto_rcv(): receive link level protocol message :
 * Note that network plane id propagates through the network, and may
 * change at any time. The node with lowest numerical id determines
 * network plane
 */
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
			       struct sk_buff_head *xmitq)
{
	struct tipc_msg *hdr = buf_msg(skb);
	u16 rcvgap = 0;
	u16 nacked_gap = msg_seq_gap(hdr);
	u16 peers_snd_nxt =  msg_next_sent(hdr);
	u16 peers_tol = msg_link_tolerance(hdr);
	u16 peers_prio = msg_linkprio(hdr);
	char *if_name;
	int rc = 0;

	if (l->exec_mode == TIPC_LINK_BLOCKED)
		goto exit;

	if (link_own_addr(l) > msg_prevnode(hdr))
		l->net_plane = msg_net_plane(hdr);

	switch (msg_type(hdr)) {
	case RESET_MSG:

		/* Ignore duplicate RESET with old session number */
		if ((less_eq(msg_session(hdr), l->peer_session)) &&
		    (l->peer_session != WILDCARD_SESSION))
			break;
		/* fall thru' */
	case ACTIVATE_MSG:

		/* Complete own link name with peer's interface name */
		if_name =  strrchr(l->name, ':') + 1;
		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
			break;
		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
			break;
		strncpy(if_name, msg_data(hdr),	TIPC_MAX_IF_NAME);

		/* Update own tolerance if peer indicates a non-zero value */
		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
			l->tolerance = peers_tol;

		/* Update own priority if peer's priority is higher */
		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
			l->priority = peers_prio;

		l->peer_session = msg_session(hdr);
		l->peer_bearer_id = msg_bearer_id(hdr);
		rc = tipc_link_fsm_evt(l, msg_type(hdr), xmitq);
		if (l->mtu > msg_max_pkt(hdr))
			l->mtu = msg_max_pkt(hdr);
		break;
	case STATE_MSG:
		/* Update own tolerance if peer indicates a non-zero value */
		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
			l->tolerance = peers_tol;

		l->silent_intv_cnt = 0;
		l->stats.recv_states++;
		if (msg_probe(hdr))
			l->stats.recv_probes++;
		rc = tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
		if (!tipc_link_is_up(l))
			break;

		/* Has peer sent packets we haven't received yet ? */
		if (more(peers_snd_nxt, l->rcv_nxt))
			rcvgap = peers_snd_nxt - l->rcv_nxt;
		if (rcvgap || (msg_probe(hdr)))
			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
J
Jon Paul Maloy 已提交
1645
						  0, 0, xmitq);
1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
		tipc_link_release_pkts(l, msg_ack(hdr));

		/* If NACK, retransmit will now start at right position */
		if (nacked_gap) {
			rc |= tipc_link_retransm(l, nacked_gap, xmitq);
			l->stats.recv_nacks++;
		}
		tipc_link_advance_backlog(l, xmitq);
		if (unlikely(!skb_queue_empty(&l->wakeupq)))
			link_prepare_wakeup(l);
	}
exit:
	kfree_skb(skb);
	return rc;
}

1662
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
P
Per Liden 已提交
1663
{
1664
	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1665 1666

	l->window = win;
1667 1668 1669 1670 1671
	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
P
Per Liden 已提交
1672 1673
}

1674
/* tipc_link_find_owner - locate owner node of link by link's name
1675
 * @net: the applicable net namespace
1676 1677
 * @name: pointer to link name string
 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1678
 *
1679
 * Returns pointer to node owning the link, or 0 if no matching link is found.
P
Per Liden 已提交
1680
 */
1681 1682
static struct tipc_node *tipc_link_find_owner(struct net *net,
					      const char *link_name,
1683
					      unsigned int *bearer_id)
P
Per Liden 已提交
1684
{
1685
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1686
	struct tipc_link *l_ptr;
1687
	struct tipc_node *n_ptr;
1688
	struct tipc_node *found_node = NULL;
1689
	int i;
P
Per Liden 已提交
1690

1691
	*bearer_id = 0;
1692
	rcu_read_lock();
1693
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1694
		tipc_node_lock(n_ptr);
1695
		for (i = 0; i < MAX_BEARERS; i++) {
1696
			l_ptr = n_ptr->links[i].link;
1697 1698 1699 1700 1701
			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
				*bearer_id = i;
				found_node = n_ptr;
				break;
			}
1702
		}
1703
		tipc_node_unlock(n_ptr);
1704 1705
		if (found_node)
			break;
1706
	}
1707 1708
	rcu_read_unlock();

1709
	return found_node;
P
Per Liden 已提交
1710 1711 1712 1713 1714 1715
}

/**
 * link_reset_statistics - reset link statistics
 * @l_ptr: pointer to link
 */
1716
static void link_reset_statistics(struct tipc_link *l_ptr)
P
Per Liden 已提交
1717 1718
{
	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1719 1720
	l_ptr->stats.sent_info = l_ptr->snd_nxt;
	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
P
Per Liden 已提交
1721 1722
}

1723
static void link_print(struct tipc_link *l, const char *str)
P
Per Liden 已提交
1724
{
1725 1726 1727
	struct sk_buff *hskb = skb_peek(&l->transmq);
	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
	u16 tail = l->snd_nxt - 1;
1728

1729
	pr_info("%s Link <%s>:", str, l->name);
1730

1731
	if (link_probing(l))
1732
		pr_cont(":P\n");
1733
	else if (link_establishing(l))
1734
		pr_cont(":E\n");
1735
	else if (link_resetting(l))
1736
		pr_cont(":R\n");
1737
	else if (link_working(l))
1738
		pr_cont(":W\n");
1739 1740
	else
		pr_cont("\n");
1741 1742 1743 1744

	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
		skb_queue_len(&l->transmq), head, tail,
		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
P
Per Liden 已提交
1745
}
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783

/* Parse and validate nested (link) properties valid for media, bearer and link
 */
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
	int err;

	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
			       tipc_nl_prop_policy);
	if (err)
		return err;

	if (props[TIPC_NLA_PROP_PRIO]) {
		u32 prio;

		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
		if (prio > TIPC_MAX_LINK_PRI)
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_TOL]) {
		u32 tol;

		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_WIN]) {
		u32 win;

		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
			return -EINVAL;
	}

	return 0;
}
1784

1785 1786 1787 1788 1789 1790 1791 1792 1793
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	int res = 0;
	int bearer_id;
	char *name;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1794
	struct net *net = sock_net(skb->sk);
1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

1810 1811 1812
	if (strcmp(name, tipc_bclink_name) == 0)
		return tipc_nl_bc_link_set(net, attrs);

1813
	node = tipc_link_find_owner(net, name, &bearer_id);
1814 1815 1816 1817 1818
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

1819
	link = node->links[bearer_id].link;
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
	if (!link) {
		res = -EINVAL;
		goto out;
	}

	if (attrs[TIPC_NLA_LINK_PROP]) {
		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
					      props);
		if (err) {
			res = err;
			goto out;
		}

		if (props[TIPC_NLA_PROP_TOL]) {
			u32 tol;

			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1839
			link->tolerance = tol;
1840
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1841 1842 1843 1844 1845 1846
		}
		if (props[TIPC_NLA_PROP_PRIO]) {
			u32 prio;

			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
			link->priority = prio;
1847
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
		}
		if (props[TIPC_NLA_PROP_WIN]) {
			u32 win;

			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
			tipc_link_set_queue_limits(link, win);
		}
	}

out:
	tipc_node_unlock(node);

	return res;
}
1862 1863

static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
{
	int i;
	struct nlattr *stats;

	struct nla_map {
		u32 key;
		u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
			s->msg_length_counts : 1},
		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
			(s->accu_queue_sz / s->queue_sz_counts) : 0}
	};

	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!stats)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, stats);

	return 0;
msg_full:
	nla_nest_cancel(skb, stats);

	return -EMSGSIZE;
}

/* Caller should hold appropriate locks to protect the link */
1929
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1930
			      struct tipc_link *link, int nlflags)
1931 1932 1933 1934 1935
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
1936
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1937

1938
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1939
			  nlflags, TIPC_NL_LINK_GET);
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1950
			tipc_cluster_mask(tn->own_addr)))
1951
		goto attr_msg_full;
1952
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1953
		goto attr_msg_full;
1954
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1955
		goto attr_msg_full;
1956
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
		goto attr_msg_full;

	if (tipc_link_is_up(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
			goto attr_msg_full;
	if (tipc_link_is_active(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
			goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1974
			link->window))
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_stats(msg->skb, &link->stats);
	if (err)
		goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

/* Caller should hold node lock  */
2000 2001
static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
				    struct tipc_node *node, u32 *prev_link)
2002 2003 2004 2005 2006 2007 2008
{
	u32 i;
	int err;

	for (i = *prev_link; i < MAX_BEARERS; i++) {
		*prev_link = i;

2009
		if (!node->links[i].link)
2010 2011
			continue;

2012 2013
		err = __tipc_nl_add_link(net, msg,
					 node->links[i].link, NLM_F_MULTI);
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
		if (err)
			return err;
	}
	*prev_link = 0;

	return 0;
}

int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
2024 2025
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
	struct tipc_node *node;
	struct tipc_nl_msg msg;
	u32 prev_node = cb->args[0];
	u32 prev_link = cb->args[1];
	int done = cb->args[2];
	int err;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();
	if (prev_node) {
2042
		node = tipc_node_find(net, prev_node);
2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
		if (!node) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			goto out;
		}
2053
		tipc_node_put(node);
2054

2055 2056
		list_for_each_entry_continue_rcu(node, &tn->node_list,
						 list) {
2057
			tipc_node_lock(node);
2058 2059
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2060 2061 2062 2063 2064 2065 2066
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	} else {
2067
		err = tipc_nl_add_bc_link(net, &msg);
2068 2069 2070
		if (err)
			goto out;

2071
		list_for_each_entry_rcu(node, &tn->node_list, list) {
2072
			tipc_node_lock(node);
2073 2074
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	}
	done = 1;
out:
	rcu_read_unlock();

	cb->args[0] = prev_node;
	cb->args[1] = prev_link;
	cb->args[2] = done;

	return skb->len;
}

int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
2095
	struct net *net = genl_info_net(info);
2096 2097 2098 2099
	struct tipc_nl_msg msg;
	char *name;
	int err;

2100 2101 2102
	msg.portid = info->snd_portid;
	msg.seq = info->snd_seq;

2103 2104 2105 2106
	if (!info->attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;
	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);

2107 2108
	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!msg.skb)
2109 2110
		return -ENOMEM;

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
	if (strcmp(name, tipc_bclink_name) == 0) {
		err = tipc_nl_add_bc_link(net, &msg);
		if (err) {
			nlmsg_free(msg.skb);
			return err;
		}
	} else {
		int bearer_id;
		struct tipc_node *node;
		struct tipc_link *link;
2121

2122 2123 2124
		node = tipc_link_find_owner(net, name, &bearer_id);
		if (!node)
			return -EINVAL;
2125

2126
		tipc_node_lock(node);
2127
		link = node->links[bearer_id].link;
2128 2129 2130 2131 2132
		if (!link) {
			tipc_node_unlock(node);
			nlmsg_free(msg.skb);
			return -EINVAL;
		}
2133

2134 2135 2136 2137 2138 2139 2140
		err = __tipc_nl_add_link(net, &msg, link, 0);
		tipc_node_unlock(node);
		if (err) {
			nlmsg_free(msg.skb);
			return err;
		}
	}
2141

2142
	return genlmsg_reply(msg.skb, info);
2143
}
2144 2145 2146 2147 2148 2149 2150 2151 2152

int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	char *link_name;
	unsigned int bearer_id;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2153
	struct net *net = sock_net(skb->sk);
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	if (strcmp(link_name, tipc_bclink_name) == 0) {
2170
		err = tipc_bclink_reset_stats(net);
2171 2172 2173 2174 2175
		if (err)
			return err;
		return 0;
	}

2176
	node = tipc_link_find_owner(net, link_name, &bearer_id);
2177 2178 2179 2180 2181
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

2182
	link = node->links[bearer_id].link;
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
	if (!link) {
		tipc_node_unlock(node);
		return -EINVAL;
	}

	link_reset_statistics(link);

	tipc_node_unlock(node);

	return 0;
}