link.c 57.7 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/link.c: TIPC link code
3
 *
4
 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5
 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "subscr.h"
P
Per Liden 已提交
39
#include "link.h"
40
#include "bcast.h"
41
#include "socket.h"
P
Per Liden 已提交
42 43
#include "name_distr.h"
#include "discover.h"
44
#include "netlink.h"
P
Per Liden 已提交
45

46 47
#include <linux/pkt_sched.h>

48 49 50 51 52 53
/*
 * Error message prefixes
 */
static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
P
Per Liden 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_LINK_NAME] = {
		.type = NLA_STRING,
		.len = TIPC_MAX_LINK_NAME
	},
	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
};

71 72 73 74 75 76 77 78
/* Properties valid for media, bearar and link */
static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
};

79 80 81 82
/*
 * Interval between NACKs when packets arrive out of order
 */
#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
83 84 85
/*
 * Out-of-range value for link session numbers
 */
86
#define WILDCARD_SESSION 0x10000
87

88
/* State value stored in 'failover_pkts'
P
Per Liden 已提交
89
 */
90
#define FIRST_FAILOVER 0xffffu
P
Per Liden 已提交
91

92
/* Link FSM states and events:
P
Per Liden 已提交
93
 */
94
enum {
95 96 97 98
	TIPC_LINK_WORKING,
	TIPC_LINK_PROBING,
	TIPC_LINK_RESETTING,
	TIPC_LINK_ESTABLISHING
99 100 101 102 103 104 105 106 107 108 109
};

enum {
	PEER_RESET_EVT    = RESET_MSG,
	ACTIVATE_EVT      = ACTIVATE_MSG,
	TRAFFIC_EVT,      /* Any other valid msg from peer */
	SILENCE_EVT       /* Peer was silent during last timer interval*/
};

/* Link FSM state checking routines
 */
110
static int link_working(struct tipc_link *l)
111
{
112
	return l->state == TIPC_LINK_WORKING;
113 114
}

115
static int link_probing(struct tipc_link *l)
116
{
117
	return l->state == TIPC_LINK_PROBING;
118 119
}

120
static int link_resetting(struct tipc_link *l)
121
{
122
	return l->state == TIPC_LINK_RESETTING;
123 124
}

125
static int link_establishing(struct tipc_link *l)
126
{
127
	return l->state == TIPC_LINK_ESTABLISHING;
128
}
P
Per Liden 已提交
129

130 131
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
			       struct sk_buff_head *xmitq);
132 133 134
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
				      u16 rcvgap, int tolerance, int priority,
				      struct sk_buff_head *xmitq);
135 136
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
137 138
static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
					   struct sk_buff_head *xmitq);
139
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
140 141
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
142
static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
143

P
Per Liden 已提交
144
/*
S
Sam Ravnborg 已提交
145
 *  Simple link routines
P
Per Liden 已提交
146
 */
S
Sam Ravnborg 已提交
147
static unsigned int align(unsigned int i)
P
Per Liden 已提交
148 149 150 151
{
	return (i + 3) & ~3u;
}

152 153
static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
{
154 155 156 157 158
	struct tipc_node *n = l->owner;

	if (node_active_link(n, 0) != l)
		return node_active_link(n, 0);
	return node_active_link(n, 1);
159 160
}

P
Per Liden 已提交
161
/*
S
Sam Ravnborg 已提交
162
 *  Simple non-static link routines (i.e. referenced outside this file)
P
Per Liden 已提交
163
 */
164
int tipc_link_is_up(struct tipc_link *l_ptr)
P
Per Liden 已提交
165 166 167
{
	if (!l_ptr)
		return 0;
168
	return link_working(l_ptr) || link_probing(l_ptr);
P
Per Liden 已提交
169 170
}

171
int tipc_link_is_active(struct tipc_link *l)
P
Per Liden 已提交
172
{
173 174 175
	struct tipc_node *n = l->owner;

	return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
P
Per Liden 已提交
176 177 178
}

/**
179
 * tipc_link_create - create a new link
180
 * @n_ptr: pointer to associated node
P
Per Liden 已提交
181 182
 * @b_ptr: pointer to associated bearer
 * @media_addr: media address to use when sending messages over link
183
 *
P
Per Liden 已提交
184 185
 * Returns pointer to link.
 */
186
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
187
				   struct tipc_bearer *b_ptr,
188 189 190
				   const struct tipc_media_addr *media_addr,
				   struct sk_buff_head *inputq,
				   struct sk_buff_head *namedq)
P
Per Liden 已提交
191
{
192
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
193
	struct tipc_link *l_ptr;
P
Per Liden 已提交
194 195
	struct tipc_msg *msg;
	char *if_name;
196 197 198
	char addr_string[16];
	u32 peer = n_ptr->addr;

199
	if (n_ptr->link_cnt >= MAX_BEARERS) {
200
		tipc_addr_string_fill(addr_string, n_ptr->addr);
201 202
		pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
		       n_ptr->link_cnt, addr_string, MAX_BEARERS);
203 204 205
		return NULL;
	}

206
	if (n_ptr->links[b_ptr->identity].link) {
207
		tipc_addr_string_fill(addr_string, n_ptr->addr);
208 209
		pr_err("Attempt to establish second link on <%s> to %s\n",
		       b_ptr->name, addr_string);
210 211
		return NULL;
	}
P
Per Liden 已提交
212

213
	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
P
Per Liden 已提交
214
	if (!l_ptr) {
215
		pr_warn("Link creation failed, no memory\n");
P
Per Liden 已提交
216 217 218
		return NULL;
	}
	l_ptr->addr = peer;
219
	if_name = strchr(b_ptr->name, ':') + 1;
220
	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
221 222
		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
		tipc_node(tn->own_addr),
P
Per Liden 已提交
223 224
		if_name,
		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
225
		/* note: peer i/f name is updated by reset/activate message */
P
Per Liden 已提交
226
	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
227
	l_ptr->owner = n_ptr;
228
	l_ptr->peer_session = WILDCARD_SESSION;
229
	l_ptr->bearer_id = b_ptr->identity;
230
	l_ptr->tolerance = b_ptr->tolerance;
231
	l_ptr->state = TIPC_LINK_RESETTING;
P
Per Liden 已提交
232 233 234

	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
	msg = l_ptr->pmsg;
235
	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
236
		      l_ptr->addr);
P
Per Liden 已提交
237
	msg_set_size(msg, sizeof(l_ptr->proto_msg));
238
	msg_set_session(msg, (tn->random & 0xffff));
P
Per Liden 已提交
239 240
	msg_set_bearer_id(msg, b_ptr->identity);
	strcpy((char *)msg_data(msg), if_name);
241
	l_ptr->net_plane = b_ptr->net_plane;
242 243
	l_ptr->advertised_mtu = b_ptr->mtu;
	l_ptr->mtu = l_ptr->advertised_mtu;
244 245
	l_ptr->priority = b_ptr->priority;
	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
246
	l_ptr->snd_nxt = 1;
J
Jon Paul Maloy 已提交
247 248 249
	__skb_queue_head_init(&l_ptr->transmq);
	__skb_queue_head_init(&l_ptr->backlogq);
	__skb_queue_head_init(&l_ptr->deferdq);
250
	skb_queue_head_init(&l_ptr->wakeupq);
251 252 253
	l_ptr->inputq = inputq;
	l_ptr->namedq = namedq;
	skb_queue_head_init(l_ptr->inputq);
P
Per Liden 已提交
254
	link_reset_statistics(l_ptr);
255
	tipc_node_attach_link(n_ptr, l_ptr);
P
Per Liden 已提交
256 257 258
	return l_ptr;
}

259
/**
260 261
 * tipc_link_delete - Delete a link
 * @l: link to be deleted
262
 */
263
void tipc_link_delete(struct tipc_link *l)
264
{
265 266 267
	tipc_link_reset(l);
	tipc_link_reset_fragments(l);
	tipc_node_detach_link(l->owner, l);
268 269
}

270
void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
271
{
272
	struct tipc_net *tn = net_generic(net, tipc_net_id);
273 274
	struct tipc_link *link;
	struct tipc_node *node;
275

276
	rcu_read_lock();
277 278
	list_for_each_entry_rcu(node, &tn->node_list, list) {
		tipc_node_lock(node);
279
		link = node->links[bearer_id].link;
280
		if (link)
281 282
			tipc_link_delete(link);
		tipc_node_unlock(node);
283
	}
284
	rcu_read_unlock();
285
}
P
Per Liden 已提交
286

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
 *
 * Give a newly added peer node the sequence number where it should
 * start receiving and acking broadcast packets.
 */
static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
					   struct sk_buff_head *xmitq)
{
	struct sk_buff *skb;
	struct sk_buff_head list;

	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
			      0, l->addr, link_own_addr(l), 0, 0, 0);
	if (!skb)
		return;
	__skb_queue_head_init(&list);
	__skb_queue_tail(&list, skb);
	tipc_link_xmit(l, &list, xmitq);
}

307 308 309 310 311 312 313 314 315 316 317 318
/**
 * tipc_link_fsm_evt - link finite state machine
 * @l: pointer to link
 * @evt: state machine event to be processed
 * @xmitq: queue to prepend created protocol message, if any
 */
static int tipc_link_fsm_evt(struct tipc_link *l, int evt,
			     struct sk_buff_head *xmitq)
{
	int mtyp = 0, rc = 0;
	struct tipc_link *pl;
	enum {
319 320 321 322 323 324 325
		LINK_RESET     = 1,
		LINK_ACTIVATE  = (1 << 1),
		SND_PROBE      = (1 << 2),
		SND_STATE      = (1 << 3),
		SND_RESET      = (1 << 4),
		SND_ACTIVATE   = (1 << 5),
		SND_BCAST_SYNC = (1 << 6)
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
	} actions = 0;

	if (l->exec_mode == TIPC_LINK_BLOCKED)
		return rc;

	switch (l->state) {
	case TIPC_LINK_WORKING:
		switch (evt) {
		case TRAFFIC_EVT:
		case ACTIVATE_EVT:
			break;
		case SILENCE_EVT:
			l->state = TIPC_LINK_PROBING;
			actions |= SND_PROBE;
			break;
		case PEER_RESET_EVT:
			actions |= LINK_RESET | SND_ACTIVATE;
			break;
		default:
			pr_debug("%s%u WORKING\n", link_unk_evt, evt);
		}
		break;
	case TIPC_LINK_PROBING:
		switch (evt) {
		case TRAFFIC_EVT:
		case ACTIVATE_EVT:
			l->state = TIPC_LINK_WORKING;
			break;
		case PEER_RESET_EVT:
			actions |= LINK_RESET | SND_ACTIVATE;
			break;
		case SILENCE_EVT:
			if (l->silent_intv_cnt <= l->abort_limit) {
				actions |= SND_PROBE;
				break;
			}
			actions |= LINK_RESET | SND_RESET;
			break;
		default:
			pr_err("%s%u PROBING\n", link_unk_evt, evt);
		}
		break;
	case TIPC_LINK_RESETTING:
		switch (evt) {
		case TRAFFIC_EVT:
			break;
		case ACTIVATE_EVT:
			pl = node_active_link(l->owner, 0);
			if (pl && link_probing(pl))
				break;
			actions |= LINK_ACTIVATE;
377 378
			if (!l->owner->working_links)
				actions |= SND_BCAST_SYNC;
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
			break;
		case PEER_RESET_EVT:
			l->state = TIPC_LINK_ESTABLISHING;
			actions |= SND_ACTIVATE;
			break;
		case SILENCE_EVT:
			actions |= SND_RESET;
			break;
		default:
			pr_err("%s%u in RESETTING\n", link_unk_evt, evt);
		}
		break;
	case TIPC_LINK_ESTABLISHING:
		switch (evt) {
		case TRAFFIC_EVT:
		case ACTIVATE_EVT:
			pl = node_active_link(l->owner, 0);
			if (pl && link_probing(pl))
				break;
			actions |= LINK_ACTIVATE;
399 400
			if (!l->owner->working_links)
				actions |= SND_BCAST_SYNC;
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
			break;
		case PEER_RESET_EVT:
			break;
		case SILENCE_EVT:
			actions |= SND_ACTIVATE;
			break;
		default:
			pr_err("%s%u ESTABLISHING\n", link_unk_evt, evt);
		}
		break;
	default:
		pr_err("Unknown link state %u/%u\n", l->state, evt);
	}

	/* Perform actions as decided by FSM */
	if (actions & LINK_RESET) {
		l->exec_mode = TIPC_LINK_BLOCKED;
		rc |= TIPC_LINK_DOWN_EVT;
	}
	if (actions & LINK_ACTIVATE) {
		l->exec_mode = TIPC_LINK_OPEN;
		rc |= TIPC_LINK_UP_EVT;
	}
	if (actions & (SND_STATE | SND_PROBE))
		mtyp = STATE_MSG;
	if (actions & SND_RESET)
		mtyp = RESET_MSG;
	if (actions & SND_ACTIVATE)
		mtyp = ACTIVATE_MSG;
	if (actions & (SND_PROBE | SND_STATE | SND_RESET | SND_ACTIVATE))
		tipc_link_build_proto_msg(l, mtyp, actions & SND_PROBE,
					  0, 0, 0, xmitq);
433 434
	if (actions & SND_BCAST_SYNC)
		tipc_link_build_bcast_sync_msg(l, xmitq);
435 436 437
	return rc;
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
/* link_profile_stats - update statistical profiling of traffic
 */
static void link_profile_stats(struct tipc_link *l)
{
	struct sk_buff *skb;
	struct tipc_msg *msg;
	int length;

	/* Update counters used in statistical profiling of send traffic */
	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
	l->stats.queue_sz_counts++;

	skb = skb_peek(&l->transmq);
	if (!skb)
		return;
	msg = buf_msg(skb);
	length = msg_size(msg);

	if (msg_user(msg) == MSG_FRAGMENTER) {
		if (msg_type(msg) != FIRST_FRAGMENT)
			return;
		length = msg_size(msg_get_wrapped(msg));
	}
	l->stats.msg_lengths_total += length;
	l->stats.msg_length_counts++;
	if (length <= 64)
		l->stats.msg_length_profile[0]++;
	else if (length <= 256)
		l->stats.msg_length_profile[1]++;
	else if (length <= 1024)
		l->stats.msg_length_profile[2]++;
	else if (length <= 4096)
		l->stats.msg_length_profile[3]++;
	else if (length <= 16384)
		l->stats.msg_length_profile[4]++;
	else if (length <= 32768)
		l->stats.msg_length_profile[5]++;
	else
		l->stats.msg_length_profile[6]++;
}

/* tipc_link_timeout - perform periodic task as instructed from node timeout
 */
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
{
	int rc = 0;

	link_profile_stats(l);
	if (l->silent_intv_cnt)
		rc = tipc_link_fsm_evt(l, SILENCE_EVT, xmitq);
	else if (link_working(l) && tipc_bclink_acks_missing(l->owner))
		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
	l->silent_intv_cnt++;
	return rc;
}

P
Per Liden 已提交
494
/**
495
 * link_schedule_user - schedule a message sender for wakeup after congestion
496
 * @link: congested link
497
 * @list: message that was attempted sent
498
 * Create pseudo msg to send back to user when congestion abates
499
 * Does not consume buffer list
P
Per Liden 已提交
500
 */
501
static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
P
Per Liden 已提交
502
{
503 504 505 506 507 508 509 510 511
	struct tipc_msg *msg = buf_msg(skb_peek(list));
	int imp = msg_importance(msg);
	u32 oport = msg_origport(msg);
	u32 addr = link_own_addr(link);
	struct sk_buff *skb;

	/* This really cannot happen...  */
	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
512
		return -ENOBUFS;
513 514 515 516 517 518 519 520 521
	}
	/* Non-blocking sender: */
	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
		return -ELINKCONG;

	/* Create and schedule wakeup pseudo message */
	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
			      addr, addr, oport, 0, 0);
	if (!skb)
522
		return -ENOBUFS;
523 524 525
	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
	TIPC_SKB_CB(skb)->chain_imp = imp;
	skb_queue_tail(&link->wakeupq, skb);
526
	link->stats.link_congs++;
527
	return -ELINKCONG;
P
Per Liden 已提交
528 529
}

530 531 532 533 534 535
/**
 * link_prepare_wakeup - prepare users for wakeup after congestion
 * @link: congested link
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to node wait queue for wakeup
 */
536
void link_prepare_wakeup(struct tipc_link *l)
P
Per Liden 已提交
537
{
538 539
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
540
	struct sk_buff *skb, *tmp;
541

542 543 544 545 546
	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = l->window + l->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + l->backlog[imp].len) >= lim)
P
Per Liden 已提交
547
			break;
548
		skb_unlink(skb, &l->wakeupq);
549 550
		skb_queue_tail(l->inputq, skb);
		l->owner->inputq = l->inputq;
551
		l->owner->action_flags |= TIPC_MSG_EVT;
P
Per Liden 已提交
552 553 554 555
	}
}

/**
556
 * tipc_link_reset_fragments - purge link's inbound message fragments queue
P
Per Liden 已提交
557 558
 * @l_ptr: pointer to link
 */
559
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
P
Per Liden 已提交
560
{
561 562
	kfree_skb(l_ptr->reasm_buf);
	l_ptr->reasm_buf = NULL;
P
Per Liden 已提交
563 564
}

565
void tipc_link_purge_backlog(struct tipc_link *l)
566 567 568 569 570 571 572 573 574
{
	__skb_queue_purge(&l->backlogq);
	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
}

575
/**
576
 * tipc_link_purge_queues - purge all pkt queues associated with link
P
Per Liden 已提交
577 578
 * @l_ptr: pointer to link
 */
579
void tipc_link_purge_queues(struct tipc_link *l_ptr)
P
Per Liden 已提交
580
{
J
Jon Paul Maloy 已提交
581 582
	__skb_queue_purge(&l_ptr->deferdq);
	__skb_queue_purge(&l_ptr->transmq);
583
	tipc_link_purge_backlog(l_ptr);
584
	tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
585 586
}

587
void tipc_link_reset(struct tipc_link *l_ptr)
P
Per Liden 已提交
588 589
{
	u32 prev_state = l_ptr->state;
590
	int was_active_link = tipc_link_is_active(l_ptr);
591
	struct tipc_node *owner = l_ptr->owner;
592
	struct tipc_link *pl = tipc_parallel_link(l_ptr);
593

594
	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
P
Per Liden 已提交
595

596
	/* Link is down, accept any session */
597
	l_ptr->peer_session = WILDCARD_SESSION;
P
Per Liden 已提交
598

599 600
	/* Prepare for renewed mtu size negotiation */
	l_ptr->mtu = l_ptr->advertised_mtu;
601

602
	l_ptr->state = TIPC_LINK_RESETTING;
P
Per Liden 已提交
603

604 605
	if ((prev_state == TIPC_LINK_RESETTING) ||
	    (prev_state == TIPC_LINK_ESTABLISHING))
P
Per Liden 已提交
606 607
		return;

608
	tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
609
	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
610

611
	if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
612
		l_ptr->exec_mode = TIPC_LINK_BLOCKED;
613
		l_ptr->failover_checkpt = l_ptr->rcv_nxt;
614
		pl->failover_pkts = FIRST_FAILOVER;
615
		pl->failover_checkpt = l_ptr->rcv_nxt;
616 617 618
		pl->failover_skb = l_ptr->reasm_buf;
	} else {
		kfree_skb(l_ptr->reasm_buf);
P
Per Liden 已提交
619
	}
620
	/* Clean up all queues, except inputq: */
J
Jon Paul Maloy 已提交
621 622
	__skb_queue_purge(&l_ptr->transmq);
	__skb_queue_purge(&l_ptr->deferdq);
623
	if (!owner->inputq)
624
		owner->inputq = l_ptr->inputq;
625 626
	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
	if (!skb_queue_empty(owner->inputq))
627
		owner->action_flags |= TIPC_MSG_EVT;
628
	tipc_link_purge_backlog(l_ptr);
629
	l_ptr->reasm_buf = NULL;
J
Jon Paul Maloy 已提交
630
	l_ptr->rcv_unacked = 0;
631
	l_ptr->snd_nxt = 1;
632
	l_ptr->rcv_nxt = 1;
633
	l_ptr->silent_intv_cnt = 0;
634
	l_ptr->stats.recv_info = 0;
P
Per Liden 已提交
635 636 637 638
	l_ptr->stale_count = 0;
	link_reset_statistics(l_ptr);
}

639
void tipc_link_activate(struct tipc_link *link)
P
Per Liden 已提交
640
{
641 642
	struct tipc_node *node = link->owner;

643
	link->rcv_nxt = 1;
644
	link->stats.recv_info = 1;
645
	link->silent_intv_cnt = 0;
646 647
	link->state = TIPC_LINK_WORKING;
	link->exec_mode = TIPC_LINK_OPEN;
648
	tipc_node_link_up(node, link->bearer_id);
649
	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
P
Per Liden 已提交
650 651
}

652
/**
653
 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
654
 * @link: link to use
655 656
 * @list: chain of buffers containing message
 *
657
 * Consumes the buffer chain, except when returning an error code,
658 659
 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
660
 */
661 662
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
		     struct sk_buff_head *list)
663
{
664
	struct tipc_msg *msg = buf_msg(skb_peek(list));
J
Jon Paul Maloy 已提交
665
	unsigned int maxwin = link->window;
666
	unsigned int i, imp = msg_importance(msg);
667
	uint mtu = link->mtu;
668 669
	u16 ack = mod(link->rcv_nxt - 1);
	u16 seqno = link->snd_nxt;
670
	u16 bc_last_in = link->owner->bclink.last_in;
671
	struct tipc_media_addr *addr = &link->media_addr;
J
Jon Paul Maloy 已提交
672 673
	struct sk_buff_head *transmq = &link->transmq;
	struct sk_buff_head *backlogq = &link->backlogq;
674
	struct sk_buff *skb, *bskb;
675

676 677 678 679 680
	/* Match msg importance against this and all higher backlog limits: */
	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
			return link_schedule_user(link, list);
	}
681
	if (unlikely(msg_size(msg) > mtu))
682
		return -EMSGSIZE;
683

J
Jon Paul Maloy 已提交
684
	/* Prepare each packet for sending, and add to relevant queue: */
685 686
	while (skb_queue_len(list)) {
		skb = skb_peek(list);
687
		msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
688 689
		msg_set_seqno(msg, seqno);
		msg_set_ack(msg, ack);
690 691
		msg_set_bcast_ack(msg, bc_last_in);

J
Jon Paul Maloy 已提交
692
		if (likely(skb_queue_len(transmq) < maxwin)) {
693
			__skb_dequeue(list);
J
Jon Paul Maloy 已提交
694 695 696 697 698 699
			__skb_queue_tail(transmq, skb);
			tipc_bearer_send(net, link->bearer_id, skb, addr);
			link->rcv_unacked = 0;
			seqno++;
			continue;
		}
700 701
		if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
			kfree_skb(__skb_dequeue(list));
702 703
			link->stats.sent_bundled++;
			continue;
J
Jon Paul Maloy 已提交
704
		}
705 706 707 708
		if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
			kfree_skb(__skb_dequeue(list));
			__skb_queue_tail(backlogq, bskb);
			link->backlog[msg_importance(buf_msg(bskb))].len++;
709 710
			link->stats.sent_bundled++;
			link->stats.sent_bundles++;
711
			continue;
712
		}
713 714
		link->backlog[imp].len += skb_queue_len(list);
		skb_queue_splice_tail_init(list, backlogq);
715
	}
716
	link->snd_nxt = seqno;
717 718 719
	return 0;
}

720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
/**
 * tipc_link_xmit(): enqueue buffer list according to queue situation
 * @link: link to use
 * @list: chain of buffers containing message
 * @xmitq: returned list of packets to be sent by caller
 *
 * Consumes the buffer chain, except when returning -ELINKCONG,
 * since the caller then may want to make more send attempts.
 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
 */
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
		   struct sk_buff_head *xmitq)
{
	struct tipc_msg *hdr = buf_msg(skb_peek(list));
	unsigned int maxwin = l->window;
	unsigned int i, imp = msg_importance(hdr);
	unsigned int mtu = l->mtu;
	u16 ack = l->rcv_nxt - 1;
	u16 seqno = l->snd_nxt;
	u16 bc_last_in = l->owner->bclink.last_in;
	struct sk_buff_head *transmq = &l->transmq;
	struct sk_buff_head *backlogq = &l->backlogq;
	struct sk_buff *skb, *_skb, *bskb;

	/* Match msg importance against this and all higher backlog limits: */
	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
			return link_schedule_user(l, list);
	}
	if (unlikely(msg_size(hdr) > mtu))
		return -EMSGSIZE;

	/* Prepare each packet for sending, and add to relevant queue: */
	while (skb_queue_len(list)) {
		skb = skb_peek(list);
		hdr = buf_msg(skb);
		msg_set_seqno(hdr, seqno);
		msg_set_ack(hdr, ack);
		msg_set_bcast_ack(hdr, bc_last_in);

		if (likely(skb_queue_len(transmq) < maxwin)) {
			_skb = skb_clone(skb, GFP_ATOMIC);
			if (!_skb)
				return -ENOBUFS;
			__skb_dequeue(list);
			__skb_queue_tail(transmq, skb);
			__skb_queue_tail(xmitq, _skb);
			l->rcv_unacked = 0;
			seqno++;
			continue;
		}
		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
			kfree_skb(__skb_dequeue(list));
			l->stats.sent_bundled++;
			continue;
		}
		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
			kfree_skb(__skb_dequeue(list));
			__skb_queue_tail(backlogq, bskb);
			l->backlog[msg_importance(buf_msg(bskb))].len++;
			l->stats.sent_bundled++;
			l->stats.sent_bundles++;
			continue;
		}
		l->backlog[imp].len += skb_queue_len(list);
		skb_queue_splice_tail_init(list, backlogq);
	}
	l->snd_nxt = seqno;
	return 0;
}

792 793
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
794
	skb_queue_head_init(list);
795 796 797 798 799 800 801 802
	__skb_queue_tail(list, skb);
}

static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
803
	return __tipc_link_xmit(link->owner->net, link, &head);
804 805
}

806
/*
807
 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
808 809 810 811 812 813
 * Receive the sequence number where we should start receiving and
 * acking broadcast packets from a newly added peer node, and open
 * up for reception of such packets.
 *
 * Called with node locked
 */
814
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
815 816 817 818 819 820 821 822
{
	struct tipc_msg *msg = buf_msg(buf);

	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
	n->bclink.recv_permitted = true;
	kfree_skb(buf);
}

823
/*
824 825 826 827 828 829
 * tipc_link_push_packets - push unsent packets to bearer
 *
 * Push out the unsent messages of a link where congestion
 * has abated. Node is locked.
 *
 * Called with node locked
P
Per Liden 已提交
830
 */
J
Jon Paul Maloy 已提交
831
void tipc_link_push_packets(struct tipc_link *link)
P
Per Liden 已提交
832
{
J
Jon Paul Maloy 已提交
833
	struct sk_buff *skb;
834
	struct tipc_msg *msg;
835
	u16 seqno = link->snd_nxt;
836
	u16 ack = mod(link->rcv_nxt - 1);
P
Per Liden 已提交
837

J
Jon Paul Maloy 已提交
838 839 840
	while (skb_queue_len(&link->transmq) < link->window) {
		skb = __skb_dequeue(&link->backlogq);
		if (!skb)
841
			break;
J
Jon Paul Maloy 已提交
842
		msg = buf_msg(skb);
843
		link->backlog[msg_importance(msg)].len--;
J
Jon Paul Maloy 已提交
844
		msg_set_ack(msg, ack);
845 846
		msg_set_seqno(msg, seqno);
		seqno = mod(seqno + 1);
J
Jon Paul Maloy 已提交
847 848 849 850 851
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		link->rcv_unacked = 0;
		__skb_queue_tail(&link->transmq, skb);
		tipc_bearer_send(link->owner->net, link->bearer_id,
				 skb, &link->media_addr);
P
Per Liden 已提交
852
	}
853
	link->snd_nxt = seqno;
P
Per Liden 已提交
854 855
}

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
{
	struct sk_buff *skb, *_skb;
	struct tipc_msg *hdr;
	u16 seqno = l->snd_nxt;
	u16 ack = l->rcv_nxt - 1;

	while (skb_queue_len(&l->transmq) < l->window) {
		skb = skb_peek(&l->backlogq);
		if (!skb)
			break;
		_skb = skb_clone(skb, GFP_ATOMIC);
		if (!_skb)
			break;
		__skb_dequeue(&l->backlogq);
		hdr = buf_msg(skb);
		l->backlog[msg_importance(hdr)].len--;
		__skb_queue_tail(&l->transmq, skb);
		__skb_queue_tail(xmitq, _skb);
		msg_set_ack(hdr, ack);
		msg_set_seqno(hdr, seqno);
		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
		l->rcv_unacked = 0;
		seqno++;
	}
	l->snd_nxt = seqno;
}

884
void tipc_link_reset_all(struct tipc_node *node)
885 886 887 888
{
	char addr_string[16];
	u32 i;

889
	tipc_node_lock(node);
890

891
	pr_warn("Resetting all links to %s\n",
892
		tipc_addr_string_fill(addr_string, node->addr));
893 894

	for (i = 0; i < MAX_BEARERS; i++) {
895 896 897
		if (node->links[i].link) {
			link_print(node->links[i].link, "Resetting link\n");
			tipc_link_reset(node->links[i].link);
898 899 900
		}
	}

901
	tipc_node_unlock(node);
902 903
}

904
static void link_retransmit_failure(struct tipc_link *l_ptr,
905
				    struct sk_buff *buf)
906 907
{
	struct tipc_msg *msg = buf_msg(buf);
908
	struct net *net = l_ptr->owner->net;
909

910
	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
911 912 913

	if (l_ptr->addr) {
		/* Handle failure on standard link */
914 915 916 917 918 919
		link_print(l_ptr, "Resetting link ");
		pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
			msg_user(msg), msg_type(msg), msg_size(msg),
			msg_errcode(msg));
		pr_info("sqno %u, prev: %x, src: %x\n",
			msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
920 921 922
		tipc_link_reset(l_ptr);
	} else {
		/* Handle failure on broadcast link */
923
		struct tipc_node *n_ptr;
924 925
		char addr_string[16];

926 927 928
		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
		pr_cont("Outstanding acks: %lu\n",
			(unsigned long) TIPC_SKB_CB(buf)->handle);
J
Jeff Garzik 已提交
929

930
		n_ptr = tipc_bclink_retransmit_to(net);
931

932
		tipc_addr_string_fill(addr_string, n_ptr->addr);
933
		pr_info("Broadcast link info for %s\n", addr_string);
934 935
		pr_info("Reception permitted: %d,  Acked: %u\n",
			n_ptr->bclink.recv_permitted,
936 937 938 939 940
			n_ptr->bclink.acked);
		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
			n_ptr->bclink.last_in,
			n_ptr->bclink.oos_state,
			n_ptr->bclink.last_sent);
941

942
		n_ptr->action_flags |= TIPC_BCAST_RESET;
943 944 945 946
		l_ptr->stale_count = 0;
	}
}

947
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
948
			  u32 retransmits)
P
Per Liden 已提交
949 950 951
{
	struct tipc_msg *msg;

952
	if (!skb)
953 954
		return;

955
	msg = buf_msg(skb);
956

957
	/* Detect repeated retransmit failures */
958
	if (l_ptr->last_retransm == msg_seqno(msg)) {
959
		if (++l_ptr->stale_count > 100) {
960
			link_retransmit_failure(l_ptr, skb);
961
			return;
962 963
		}
	} else {
964
		l_ptr->last_retransm = msg_seqno(msg);
965
		l_ptr->stale_count = 1;
P
Per Liden 已提交
966
	}
967

J
Jon Paul Maloy 已提交
968 969
	skb_queue_walk_from(&l_ptr->transmq, skb) {
		if (!retransmits)
970 971
			break;
		msg = buf_msg(skb);
972
		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
973
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
974 975
		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
				 &l_ptr->media_addr);
976 977
		retransmits--;
		l_ptr->stats.retransmitted++;
P
Per Liden 已提交
978 979 980
	}
}

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
static int tipc_link_retransm(struct tipc_link *l, int retransm,
			      struct sk_buff_head *xmitq)
{
	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
	struct tipc_msg *hdr;

	if (!skb)
		return 0;

	/* Detect repeated retransmit failures on same packet */
	if (likely(l->last_retransm != buf_seqno(skb))) {
		l->last_retransm = buf_seqno(skb);
		l->stale_count = 1;
	} else if (++l->stale_count > 100) {
		link_retransmit_failure(l, skb);
		return TIPC_LINK_DOWN_EVT;
	}
	skb_queue_walk(&l->transmq, skb) {
		if (!retransm)
			return 0;
		hdr = buf_msg(skb);
		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
		if (!_skb)
			return 0;
		hdr = buf_msg(_skb);
		msg_set_ack(hdr, l->rcv_nxt - 1);
		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
		_skb->priority = TC_PRIO_CONTROL;
		__skb_queue_tail(xmitq, _skb);
		retransm--;
		l->stats.retransmitted++;
	}
	return 0;
}

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
/* link_synch(): check if all packets arrived before the synch
 *               point have been consumed
 * Returns true if the parallel links are synched, otherwise false
 */
static bool link_synch(struct tipc_link *l)
{
	unsigned int post_synch;
	struct tipc_link *pl;

	pl  = tipc_parallel_link(l);
	if (pl == l)
		goto synched;

	/* Was last pre-synch packet added to input queue ? */
1030
	if (less_eq(pl->rcv_nxt, l->synch_point))
1031 1032 1033
		return false;

	/* Is it still in the input queue ? */
1034
	post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
1035
	if (skb_queue_len(pl->inputq) > post_synch)
1036 1037
		return false;
synched:
1038
	l->exec_mode = TIPC_LINK_OPEN;
1039 1040 1041
	return true;
}

1042
/* tipc_data_input - deliver data and name distr msgs to upper layer
1043
 *
1044
 * Consumes buffer if message is of right type
1045 1046
 * Node lock must be held
 */
1047
static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1048
{
1049 1050 1051
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	u32 dport = msg_destport(msg);
1052 1053

	switch (msg_user(msg)) {
1054 1055 1056 1057 1058
	case TIPC_LOW_IMPORTANCE:
	case TIPC_MEDIUM_IMPORTANCE:
	case TIPC_HIGH_IMPORTANCE:
	case TIPC_CRITICAL_IMPORTANCE:
	case CONN_MANAGER:
1059 1060
		if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
			node->inputq = link->inputq;
1061
			node->action_flags |= TIPC_MSG_EVT;
1062
		}
1063
		return true;
1064
	case NAME_DISTRIBUTOR:
1065
		node->bclink.recv_permitted = true;
1066 1067 1068
		node->namedq = link->namedq;
		skb_queue_tail(link->namedq, skb);
		if (skb_queue_len(link->namedq) == 1)
1069 1070 1071
			node->action_flags |= TIPC_NAMED_MSG_EVT;
		return true;
	case MSG_BUNDLER:
1072
	case TUNNEL_PROTOCOL:
1073
	case MSG_FRAGMENTER:
1074
	case BCAST_PROTOCOL:
1075
		return false;
1076
	default:
1077 1078 1079 1080
		pr_warn("Dropping received illegal msg type\n");
		kfree_skb(skb);
		return false;
	};
1081
}
1082 1083 1084 1085 1086

/* tipc_link_input - process packet that has passed link protocol check
 *
 * Consumes buffer
 * Node lock must be held
1087
 */
1088
static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1089
{
1090 1091 1092 1093 1094
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	struct sk_buff *iskb;
	int pos = 0;

1095
	switch (msg_user(msg)) {
1096
	case TUNNEL_PROTOCOL:
1097
		if (msg_dup(msg)) {
1098
			link->exec_mode = TIPC_LINK_TUNNEL;
1099
			link->synch_point = msg_seqno(msg_get_wrapped(msg));
1100 1101
			kfree_skb(skb);
			break;
1102
		}
1103
		if (!tipc_link_failover_rcv(link, &skb))
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
			break;
		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
			tipc_data_input(link, skb);
			break;
		}
	case MSG_BUNDLER:
		link->stats.recv_bundles++;
		link->stats.recv_bundled += msg_msgcnt(msg);

		while (tipc_msg_extract(skb, &iskb, &pos))
			tipc_data_input(link, iskb);
1115
		break;
1116 1117 1118 1119 1120 1121 1122 1123
	case MSG_FRAGMENTER:
		link->stats.recv_fragments++;
		if (tipc_buf_append(&link->reasm_buf, &skb)) {
			link->stats.recv_fragmented++;
			tipc_data_input(link, skb);
		} else if (!link->reasm_buf) {
			tipc_link_reset(link);
		}
1124
		break;
1125 1126
	case BCAST_PROTOCOL:
		tipc_link_sync_rcv(node, skb);
1127 1128
		break;
	default:
1129 1130
		break;
	};
1131 1132
}

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
{
	bool released = false;
	struct sk_buff *skb, *tmp;

	skb_queue_walk_safe(&l->transmq, skb, tmp) {
		if (more(buf_seqno(skb), acked))
			break;
		__skb_unlink(skb, &l->transmq);
		kfree_skb(skb);
		released = true;
	}
	return released;
}

/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
 * @link: the link that should handle the message
 * @skb: TIPC packet
 * @xmitq: queue to place packets to be sent after this call
 */
int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
		  struct sk_buff_head *xmitq)
{
	struct sk_buff_head *arrvq = &l->deferdq;
	struct sk_buff *tmp;
	struct tipc_msg *hdr;
	u16 seqno, rcv_nxt;
	int rc = 0;

	if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
		if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
			tipc_link_build_proto_msg(l, STATE_MSG, 0,
						  0, 0, 0, xmitq);
		return rc;
	}

	skb_queue_walk_safe(arrvq, skb, tmp) {
		hdr = buf_msg(skb);

		/* Verify and update link state */
		if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
			__skb_dequeue(arrvq);
			rc |= tipc_link_proto_rcv(l, skb, xmitq);
			continue;
		}

		if (unlikely(!link_working(l))) {
			rc |= tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
			if (!link_working(l)) {
				kfree_skb(__skb_dequeue(arrvq));
				return rc;
			}
		}

		l->silent_intv_cnt = 0;

		/* Forward queues and wake up waiting users */
		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
			tipc_link_advance_backlog(l, xmitq);
			if (unlikely(!skb_queue_empty(&l->wakeupq)))
				link_prepare_wakeup(l);
		}

		/* Defer reception if there is a gap in the sequence */
		seqno = msg_seqno(hdr);
		rcv_nxt = l->rcv_nxt;
		if (unlikely(less(rcv_nxt, seqno))) {
			l->stats.deferred_recv++;
			return rc;
		}

		__skb_dequeue(arrvq);

		/* Drop if packet already received */
		if (unlikely(more(rcv_nxt, seqno))) {
			l->stats.duplicates++;
			kfree_skb(skb);
			return rc;
		}

		/* Synchronize with parallel link if applicable */
		if (unlikely(l->exec_mode == TIPC_LINK_TUNNEL))
			if (!msg_dup(hdr) && !link_synch(l)) {
				kfree_skb(skb);
				return rc;
			}

		/* Packet can be delivered */
		l->rcv_nxt++;
		l->stats.recv_info++;
		if (unlikely(!tipc_data_input(l, skb)))
			tipc_link_input(l, skb);

		/* Ack at regular intervals */
		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
			l->rcv_unacked = 0;
			l->stats.sent_acks++;
			tipc_link_build_proto_msg(l, STATE_MSG,
						  0, 0, 0, 0, xmitq);
		}
	}
	return rc;
}

1237
/**
1238 1239 1240
 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
 *
 * Returns increase in queue length (i.e. 0 or 1)
P
Per Liden 已提交
1241
 */
1242
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
P
Per Liden 已提交
1243
{
1244
	struct sk_buff *skb1;
1245
	u16 seq_no = buf_seqno(skb);
P
Per Liden 已提交
1246 1247

	/* Empty queue ? */
1248 1249
	if (skb_queue_empty(list)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1250 1251 1252 1253
		return 1;
	}

	/* Last ? */
1254 1255
	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1256 1257 1258
		return 1;
	}

1259
	/* Locate insertion point in queue, then insert; discard if duplicate */
1260
	skb_queue_walk(list, skb1) {
1261
		u16 curr_seqno = buf_seqno(skb1);
P
Per Liden 已提交
1262

1263
		if (seq_no == curr_seqno) {
1264
			kfree_skb(skb);
1265
			return 0;
P
Per Liden 已提交
1266
		}
1267 1268

		if (less(seq_no, curr_seqno))
P
Per Liden 已提交
1269
			break;
1270
	}
P
Per Liden 已提交
1271

1272
	__skb_queue_before(list, skb1, skb);
1273
	return 1;
P
Per Liden 已提交
1274 1275 1276 1277 1278
}

/*
 * Send protocol message to the other endpoint.
 */
1279
void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
1280
			  u32 gap, u32 tolerance, u32 priority)
P
Per Liden 已提交
1281
{
1282 1283
	struct sk_buff *skb = NULL;
	struct sk_buff_head xmitq;
P
Per Liden 已提交
1284

1285 1286 1287 1288 1289
	__skb_queue_head_init(&xmitq);
	tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
				  tolerance, priority, &xmitq);
	skb = __skb_dequeue(&xmitq);
	if (!skb)
P
Per Liden 已提交
1290
		return;
1291 1292 1293
	tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
	l->rcv_unacked = 0;
	kfree_skb(skb);
P
Per Liden 已提交
1294 1295
}

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
/* tipc_link_build_proto_msg: prepare link protocol message for transmission
 */
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
				      u16 rcvgap, int tolerance, int priority,
				      struct sk_buff_head *xmitq)
{
	struct sk_buff *skb = NULL;
	struct tipc_msg *hdr = l->pmsg;
	u16 snd_nxt = l->snd_nxt;
	u16 rcv_nxt = l->rcv_nxt;
	u16 rcv_last = rcv_nxt - 1;
	int node_up = l->owner->bclink.recv_permitted;

	/* Don't send protocol message during reset or link failover */
	if (l->exec_mode == TIPC_LINK_BLOCKED)
		return;

	msg_set_type(hdr, mtyp);
	msg_set_net_plane(hdr, l->net_plane);
	msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
	msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
	msg_set_link_tolerance(hdr, tolerance);
	msg_set_linkprio(hdr, priority);
	msg_set_redundant_link(hdr, node_up);
	msg_set_seq_gap(hdr, 0);

	/* Compatibility: created msg must not be in sequence with pkt flow */
	msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);

	if (mtyp == STATE_MSG) {
		if (!tipc_link_is_up(l))
			return;
		msg_set_next_sent(hdr, snd_nxt);

		/* Override rcvgap if there are packets in deferred queue */
		if (!skb_queue_empty(&l->deferdq))
			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
		if (rcvgap) {
			msg_set_seq_gap(hdr, rcvgap);
			l->stats.sent_nacks++;
		}
		msg_set_ack(hdr, rcv_last);
		msg_set_probe(hdr, probe);
		if (probe)
			l->stats.sent_probes++;
		l->stats.sent_states++;
	} else {
		/* RESET_MSG or ACTIVATE_MSG */
		msg_set_max_pkt(hdr, l->advertised_mtu);
		msg_set_ack(hdr, l->failover_checkpt - 1);
		msg_set_next_sent(hdr, 1);
	}
	skb = tipc_buf_acquire(msg_size(hdr));
	if (!skb)
		return;
	skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
	skb->priority = TC_PRIO_CONTROL;
	__skb_queue_head(xmitq, skb);
}
P
Per Liden 已提交
1355

1356 1357
/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 * a different bearer. Owner node is locked.
P
Per Liden 已提交
1358
 */
1359 1360 1361 1362
static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
				  struct tipc_msg *tunnel_hdr,
				  struct tipc_msg *msg,
				  u32 selector)
P
Per Liden 已提交
1363
{
1364
	struct tipc_link *tunnel;
1365
	struct sk_buff *skb;
P
Per Liden 已提交
1366 1367
	u32 length = msg_size(msg);

1368
	tunnel = node_active_link(l_ptr->owner, selector & 1);
1369
	if (!tipc_link_is_up(tunnel)) {
1370
		pr_warn("%stunnel link no longer available\n", link_co_err);
P
Per Liden 已提交
1371
		return;
1372
	}
P
Per Liden 已提交
1373
	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1374 1375
	skb = tipc_buf_acquire(length + INT_H_SIZE);
	if (!skb) {
1376
		pr_warn("%sunable to send tunnel msg\n", link_co_err);
P
Per Liden 已提交
1377
		return;
1378
	}
1379 1380 1381
	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
	__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1382 1383 1384
}


1385 1386 1387 1388 1389
/* tipc_link_failover_send_queue(): A link has gone down, but a second
 * link is still active. We can do failover. Tunnel the failing link's
 * whole send queue via the remaining link. This way, we don't lose
 * any packets, and sequence order is preserved for subsequent traffic
 * sent over the remaining link. Owner node is locked.
P
Per Liden 已提交
1390
 */
1391
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
P
Per Liden 已提交
1392
{
J
Jon Paul Maloy 已提交
1393
	int msgcount;
1394
	struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
P
Per Liden 已提交
1395
	struct tipc_msg tunnel_hdr;
1396
	struct sk_buff *skb;
1397
	int split_bundles;
P
Per Liden 已提交
1398 1399 1400 1401

	if (!tunnel)
		return;

1402 1403
	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
		      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1404 1405 1406 1407 1408

	skb_queue_walk(&l_ptr->backlogq, skb) {
		msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
		l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
	}
J
Jon Paul Maloy 已提交
1409
	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1410
	tipc_link_purge_backlog(l_ptr);
J
Jon Paul Maloy 已提交
1411
	msgcount = skb_queue_len(&l_ptr->transmq);
P
Per Liden 已提交
1412 1413
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
	msg_set_msgcnt(&tunnel_hdr, msgcount);
1414

J
Jon Paul Maloy 已提交
1415
	if (skb_queue_empty(&l_ptr->transmq)) {
1416 1417 1418
		skb = tipc_buf_acquire(INT_H_SIZE);
		if (skb) {
			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
P
Per Liden 已提交
1419
			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1420
			__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1421
		} else {
1422 1423
			pr_warn("%sunable to send changeover msg\n",
				link_co_err);
P
Per Liden 已提交
1424 1425 1426
		}
		return;
	}
1427

1428 1429
	split_bundles = (node_active_link(l_ptr->owner, 0) !=
			 node_active_link(l_ptr->owner, 0));
1430

J
Jon Paul Maloy 已提交
1431
	skb_queue_walk(&l_ptr->transmq, skb) {
1432
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1433 1434 1435

		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
			struct tipc_msg *m = msg_get_wrapped(msg);
1436
			unchar *pos = (unchar *)m;
P
Per Liden 已提交
1437

1438
			msgcount = msg_msgcnt(msg);
P
Per Liden 已提交
1439
			while (msgcount--) {
1440
				msg_set_seqno(m, msg_seqno(msg));
1441 1442
				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
						      msg_link_selector(m));
P
Per Liden 已提交
1443 1444 1445 1446
				pos += align(msg_size(m));
				m = (struct tipc_msg *)pos;
			}
		} else {
1447 1448
			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
					      msg_link_selector(msg));
P
Per Liden 已提交
1449 1450 1451 1452
		}
	}
}

1453
/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1454 1455 1456 1457 1458 1459 1460 1461
 * duplicate of the first link's send queue via the new link. This way, we
 * are guaranteed that currently queued packets from a socket are delivered
 * before future traffic from the same socket, even if this is using the
 * new link. The last arriving copy of each duplicate packet is dropped at
 * the receiving end by the regular protocol check, so packet cardinality
 * and sequence order is preserved per sender/receiver socket pair.
 * Owner node is locked.
 */
J
Jon Paul Maloy 已提交
1462 1463
void tipc_link_dup_queue_xmit(struct tipc_link *link,
			      struct tipc_link *tnl)
P
Per Liden 已提交
1464
{
1465
	struct sk_buff *skb;
J
Jon Paul Maloy 已提交
1466 1467 1468
	struct tipc_msg tnl_hdr;
	struct sk_buff_head *queue = &link->transmq;
	int mcnt;
1469
	u16 seqno;
J
Jon Paul Maloy 已提交
1470

1471 1472
	tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
		      SYNCH_MSG, INT_H_SIZE, link->addr);
J
Jon Paul Maloy 已提交
1473 1474 1475 1476 1477 1478
	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
	msg_set_msgcnt(&tnl_hdr, mcnt);
	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);

tunnel_queue:
	skb_queue_walk(queue, skb) {
1479 1480
		struct sk_buff *outskb;
		struct tipc_msg *msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
1481
		u32 len = msg_size(msg);
P
Per Liden 已提交
1482

1483
		msg_set_ack(msg, mod(link->rcv_nxt - 1));
J
Jon Paul Maloy 已提交
1484 1485 1486
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
		outskb = tipc_buf_acquire(len + INT_H_SIZE);
1487
		if (outskb == NULL) {
1488 1489
			pr_warn("%sunable to send duplicate msg\n",
				link_co_err);
P
Per Liden 已提交
1490 1491
			return;
		}
J
Jon Paul Maloy 已提交
1492 1493 1494 1495 1496
		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
					       skb->data, len);
		__tipc_link_xmit_skb(tnl, outskb);
		if (!tipc_link_is_up(link))
P
Per Liden 已提交
1497 1498
			return;
	}
J
Jon Paul Maloy 已提交
1499 1500
	if (queue == &link->backlogq)
		return;
1501 1502 1503 1504 1505
	seqno = link->snd_nxt;
	skb_queue_walk(&link->backlogq, skb) {
		msg_set_seqno(buf_msg(skb), seqno);
		seqno = mod(seqno + 1);
	}
J
Jon Paul Maloy 已提交
1506 1507
	queue = &link->backlogq;
	goto tunnel_queue;
P
Per Liden 已提交
1508 1509
}

1510
/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1511 1512
 *  Owner node is locked.
 */
1513
static bool tipc_link_failover_rcv(struct tipc_link *link,
1514
				   struct sk_buff **skb)
1515
{
1516 1517
	struct tipc_msg *msg = buf_msg(*skb);
	struct sk_buff *iskb = NULL;
1518
	struct tipc_link *pl = NULL;
1519
	int bearer_id = msg_bearer_id(msg);
1520
	int pos = 0;
1521

1522
	if (msg_type(msg) != FAILOVER_MSG) {
1523 1524
		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
		goto exit;
1525
	}
1526 1527
	if (bearer_id >= MAX_BEARERS)
		goto exit;
1528 1529

	if (bearer_id == link->bearer_id)
1530
		goto exit;
P
Per Liden 已提交
1531

1532
	pl = link->owner->links[bearer_id].link;
1533 1534 1535 1536 1537
	if (pl && tipc_link_is_up(pl))
		tipc_link_reset(pl);

	if (link->failover_pkts == FIRST_FAILOVER)
		link->failover_pkts = msg_msgcnt(msg);
1538

1539
	/* Should we expect an inner packet? */
1540
	if (!link->failover_pkts)
1541
		goto exit;
1542

1543 1544 1545
	if (!tipc_msg_extract(*skb, &iskb, &pos)) {
		pr_warn("%sno inner failover pkt\n", link_co_err);
		*skb = NULL;
P
Per Liden 已提交
1546
		goto exit;
1547
	}
1548
	link->failover_pkts--;
1549
	*skb = NULL;
P
Per Liden 已提交
1550

1551 1552
	/* Was this packet already delivered? */
	if (less(buf_seqno(iskb), link->failover_checkpt)) {
1553 1554 1555 1556 1557 1558
		kfree_skb(iskb);
		iskb = NULL;
		goto exit;
	}
	if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
		link->stats.recv_fragments++;
1559
		tipc_buf_append(&link->failover_skb, &iskb);
1560
	}
P
Per Liden 已提交
1561
exit:
1562
	if (!link->failover_pkts && pl)
1563
		pl->exec_mode = TIPC_LINK_OPEN;
1564 1565 1566
	kfree_skb(*skb);
	*skb = iskb;
	return *skb;
P
Per Liden 已提交
1567 1568
}

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
/* tipc_link_proto_rcv(): receive link level protocol message :
 * Note that network plane id propagates through the network, and may
 * change at any time. The node with lowest numerical id determines
 * network plane
 */
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
			       struct sk_buff_head *xmitq)
{
	struct tipc_msg *hdr = buf_msg(skb);
	u16 rcvgap = 0;
	u16 nacked_gap = msg_seq_gap(hdr);
	u16 peers_snd_nxt =  msg_next_sent(hdr);
	u16 peers_tol = msg_link_tolerance(hdr);
	u16 peers_prio = msg_linkprio(hdr);
	char *if_name;
	int rc = 0;

	if (l->exec_mode == TIPC_LINK_BLOCKED)
		goto exit;

	if (link_own_addr(l) > msg_prevnode(hdr))
		l->net_plane = msg_net_plane(hdr);

	switch (msg_type(hdr)) {
	case RESET_MSG:

		/* Ignore duplicate RESET with old session number */
		if ((less_eq(msg_session(hdr), l->peer_session)) &&
		    (l->peer_session != WILDCARD_SESSION))
			break;
		/* fall thru' */
	case ACTIVATE_MSG:

		/* Complete own link name with peer's interface name */
		if_name =  strrchr(l->name, ':') + 1;
		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
			break;
		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
			break;
		strncpy(if_name, msg_data(hdr),	TIPC_MAX_IF_NAME);

		/* Update own tolerance if peer indicates a non-zero value */
		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
			l->tolerance = peers_tol;

		/* Update own priority if peer's priority is higher */
		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
			l->priority = peers_prio;

		l->peer_session = msg_session(hdr);
		l->peer_bearer_id = msg_bearer_id(hdr);
		rc = tipc_link_fsm_evt(l, msg_type(hdr), xmitq);
		if (l->mtu > msg_max_pkt(hdr))
			l->mtu = msg_max_pkt(hdr);
		break;
	case STATE_MSG:
		/* Update own tolerance if peer indicates a non-zero value */
		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
			l->tolerance = peers_tol;

		l->silent_intv_cnt = 0;
		l->stats.recv_states++;
		if (msg_probe(hdr))
			l->stats.recv_probes++;
		rc = tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
		if (!tipc_link_is_up(l))
			break;

		/* Has peer sent packets we haven't received yet ? */
		if (more(peers_snd_nxt, l->rcv_nxt))
			rcvgap = peers_snd_nxt - l->rcv_nxt;
		if (rcvgap || (msg_probe(hdr)))
			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
						  0, l->mtu, xmitq);
		tipc_link_release_pkts(l, msg_ack(hdr));

		/* If NACK, retransmit will now start at right position */
		if (nacked_gap) {
			rc |= tipc_link_retransm(l, nacked_gap, xmitq);
			l->stats.recv_nacks++;
		}
		tipc_link_advance_backlog(l, xmitq);
		if (unlikely(!skb_queue_empty(&l->wakeupq)))
			link_prepare_wakeup(l);
	}
exit:
	kfree_skb(skb);
	return rc;
}

1659
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
P
Per Liden 已提交
1660
{
1661
	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1662 1663

	l->window = win;
1664 1665 1666 1667 1668
	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
P
Per Liden 已提交
1669 1670
}

1671
/* tipc_link_find_owner - locate owner node of link by link's name
1672
 * @net: the applicable net namespace
1673 1674
 * @name: pointer to link name string
 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1675
 *
1676
 * Returns pointer to node owning the link, or 0 if no matching link is found.
P
Per Liden 已提交
1677
 */
1678 1679
static struct tipc_node *tipc_link_find_owner(struct net *net,
					      const char *link_name,
1680
					      unsigned int *bearer_id)
P
Per Liden 已提交
1681
{
1682
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1683
	struct tipc_link *l_ptr;
1684
	struct tipc_node *n_ptr;
1685
	struct tipc_node *found_node = NULL;
1686
	int i;
P
Per Liden 已提交
1687

1688
	*bearer_id = 0;
1689
	rcu_read_lock();
1690
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1691
		tipc_node_lock(n_ptr);
1692
		for (i = 0; i < MAX_BEARERS; i++) {
1693
			l_ptr = n_ptr->links[i].link;
1694 1695 1696 1697 1698
			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
				*bearer_id = i;
				found_node = n_ptr;
				break;
			}
1699
		}
1700
		tipc_node_unlock(n_ptr);
1701 1702
		if (found_node)
			break;
1703
	}
1704 1705
	rcu_read_unlock();

1706
	return found_node;
P
Per Liden 已提交
1707 1708 1709 1710 1711 1712
}

/**
 * link_reset_statistics - reset link statistics
 * @l_ptr: pointer to link
 */
1713
static void link_reset_statistics(struct tipc_link *l_ptr)
P
Per Liden 已提交
1714 1715
{
	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1716 1717
	l_ptr->stats.sent_info = l_ptr->snd_nxt;
	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
P
Per Liden 已提交
1718 1719
}

1720
static void link_print(struct tipc_link *l, const char *str)
P
Per Liden 已提交
1721
{
1722 1723 1724
	struct sk_buff *hskb = skb_peek(&l->transmq);
	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
	u16 tail = l->snd_nxt - 1;
1725

1726
	pr_info("%s Link <%s>:", str, l->name);
1727

1728
	if (link_probing(l))
1729
		pr_cont(":P\n");
1730
	else if (link_establishing(l))
1731
		pr_cont(":E\n");
1732
	else if (link_resetting(l))
1733
		pr_cont(":R\n");
1734
	else if (link_working(l))
1735
		pr_cont(":W\n");
1736 1737
	else
		pr_cont("\n");
1738 1739 1740 1741

	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
		skb_queue_len(&l->transmq), head, tail,
		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
P
Per Liden 已提交
1742
}
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780

/* Parse and validate nested (link) properties valid for media, bearer and link
 */
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
	int err;

	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
			       tipc_nl_prop_policy);
	if (err)
		return err;

	if (props[TIPC_NLA_PROP_PRIO]) {
		u32 prio;

		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
		if (prio > TIPC_MAX_LINK_PRI)
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_TOL]) {
		u32 tol;

		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_WIN]) {
		u32 win;

		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
			return -EINVAL;
	}

	return 0;
}
1781

1782 1783 1784 1785 1786 1787 1788 1789 1790
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	int res = 0;
	int bearer_id;
	char *name;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1791
	struct net *net = sock_net(skb->sk);
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

1807 1808 1809
	if (strcmp(name, tipc_bclink_name) == 0)
		return tipc_nl_bc_link_set(net, attrs);

1810
	node = tipc_link_find_owner(net, name, &bearer_id);
1811 1812 1813 1814 1815
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

1816
	link = node->links[bearer_id].link;
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
	if (!link) {
		res = -EINVAL;
		goto out;
	}

	if (attrs[TIPC_NLA_LINK_PROP]) {
		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
					      props);
		if (err) {
			res = err;
			goto out;
		}

		if (props[TIPC_NLA_PROP_TOL]) {
			u32 tol;

			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1836
			link->tolerance = tol;
1837
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1838 1839 1840 1841 1842 1843
		}
		if (props[TIPC_NLA_PROP_PRIO]) {
			u32 prio;

			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
			link->priority = prio;
1844
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
		}
		if (props[TIPC_NLA_PROP_WIN]) {
			u32 win;

			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
			tipc_link_set_queue_limits(link, win);
		}
	}

out:
	tipc_node_unlock(node);

	return res;
}
1859 1860

static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
{
	int i;
	struct nlattr *stats;

	struct nla_map {
		u32 key;
		u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
			s->msg_length_counts : 1},
		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
			(s->accu_queue_sz / s->queue_sz_counts) : 0}
	};

	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!stats)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, stats);

	return 0;
msg_full:
	nla_nest_cancel(skb, stats);

	return -EMSGSIZE;
}

/* Caller should hold appropriate locks to protect the link */
1926
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1927
			      struct tipc_link *link, int nlflags)
1928 1929 1930 1931 1932
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
1933
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1934

1935
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1936
			  nlflags, TIPC_NL_LINK_GET);
1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1947
			tipc_cluster_mask(tn->own_addr)))
1948
		goto attr_msg_full;
1949
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1950
		goto attr_msg_full;
1951
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1952
		goto attr_msg_full;
1953
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
		goto attr_msg_full;

	if (tipc_link_is_up(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
			goto attr_msg_full;
	if (tipc_link_is_active(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
			goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1971
			link->window))
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_stats(msg->skb, &link->stats);
	if (err)
		goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

/* Caller should hold node lock  */
1997 1998
static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
				    struct tipc_node *node, u32 *prev_link)
1999 2000 2001 2002 2003 2004 2005
{
	u32 i;
	int err;

	for (i = *prev_link; i < MAX_BEARERS; i++) {
		*prev_link = i;

2006
		if (!node->links[i].link)
2007 2008
			continue;

2009 2010
		err = __tipc_nl_add_link(net, msg,
					 node->links[i].link, NLM_F_MULTI);
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
		if (err)
			return err;
	}
	*prev_link = 0;

	return 0;
}

int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
2021 2022
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
	struct tipc_node *node;
	struct tipc_nl_msg msg;
	u32 prev_node = cb->args[0];
	u32 prev_link = cb->args[1];
	int done = cb->args[2];
	int err;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();
	if (prev_node) {
2039
		node = tipc_node_find(net, prev_node);
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049
		if (!node) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			goto out;
		}
2050
		tipc_node_put(node);
2051

2052 2053
		list_for_each_entry_continue_rcu(node, &tn->node_list,
						 list) {
2054
			tipc_node_lock(node);
2055 2056
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2057 2058 2059 2060 2061 2062 2063
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	} else {
2064
		err = tipc_nl_add_bc_link(net, &msg);
2065 2066 2067
		if (err)
			goto out;

2068
		list_for_each_entry_rcu(node, &tn->node_list, list) {
2069
			tipc_node_lock(node);
2070 2071
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	}
	done = 1;
out:
	rcu_read_unlock();

	cb->args[0] = prev_node;
	cb->args[1] = prev_link;
	cb->args[2] = done;

	return skb->len;
}

int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
2092
	struct net *net = genl_info_net(info);
2093 2094 2095 2096
	struct tipc_nl_msg msg;
	char *name;
	int err;

2097 2098 2099
	msg.portid = info->snd_portid;
	msg.seq = info->snd_seq;

2100 2101 2102 2103
	if (!info->attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;
	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);

2104 2105
	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!msg.skb)
2106 2107
		return -ENOMEM;

2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
	if (strcmp(name, tipc_bclink_name) == 0) {
		err = tipc_nl_add_bc_link(net, &msg);
		if (err) {
			nlmsg_free(msg.skb);
			return err;
		}
	} else {
		int bearer_id;
		struct tipc_node *node;
		struct tipc_link *link;
2118

2119 2120 2121
		node = tipc_link_find_owner(net, name, &bearer_id);
		if (!node)
			return -EINVAL;
2122

2123
		tipc_node_lock(node);
2124
		link = node->links[bearer_id].link;
2125 2126 2127 2128 2129
		if (!link) {
			tipc_node_unlock(node);
			nlmsg_free(msg.skb);
			return -EINVAL;
		}
2130

2131 2132 2133 2134 2135 2136 2137
		err = __tipc_nl_add_link(net, &msg, link, 0);
		tipc_node_unlock(node);
		if (err) {
			nlmsg_free(msg.skb);
			return err;
		}
	}
2138

2139
	return genlmsg_reply(msg.skb, info);
2140
}
2141 2142 2143 2144 2145 2146 2147 2148 2149

int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	char *link_name;
	unsigned int bearer_id;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2150
	struct net *net = sock_net(skb->sk);
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	if (strcmp(link_name, tipc_bclink_name) == 0) {
2167
		err = tipc_bclink_reset_stats(net);
2168 2169 2170 2171 2172
		if (err)
			return err;
		return 0;
	}

2173
	node = tipc_link_find_owner(net, link_name, &bearer_id);
2174 2175 2176 2177 2178
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

2179
	link = node->links[bearer_id].link;
2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
	if (!link) {
		tipc_node_unlock(node);
		return -EINVAL;
	}

	link_reset_statistics(link);

	tipc_node_unlock(node);

	return 0;
}