link.c 62.5 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/link.c: TIPC link code
3
 *
4
 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5
 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "subscr.h"
P
Per Liden 已提交
39
#include "link.h"
40
#include "bcast.h"
41
#include "socket.h"
P
Per Liden 已提交
42 43
#include "name_distr.h"
#include "discover.h"
44
#include "netlink.h"
P
Per Liden 已提交
45

46 47
#include <linux/pkt_sched.h>

48 49 50 51 52 53
/*
 * Error message prefixes
 */
static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
P
Per Liden 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_LINK_NAME] = {
		.type = NLA_STRING,
		.len = TIPC_MAX_LINK_NAME
	},
	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
};

71 72 73 74 75 76 77 78
/* Properties valid for media, bearar and link */
static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
};

79 80 81 82 83
/*
 * Out-of-range value for link session numbers
 */
#define INVALID_SESSION 0x10000

84 85
/*
 * Link state events:
P
Per Liden 已提交
86 87 88 89 90
 */
#define  STARTING_EVT    856384768	/* link processing trigger */
#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
#define  TIMEOUT_EVT     560817u	/* link timer expired */

91 92 93
/*
 * The following two 'message types' is really just implementation
 * data conveniently stored in the message header.
P
Per Liden 已提交
94 95 96 97 98
 * They must not be considered part of the protocol
 */
#define OPEN_MSG   0
#define CLOSED_MSG 1

99
/*
P
Per Liden 已提交
100 101 102 103
 * State value stored in 'exp_msg_count'
 */
#define START_CHANGEOVER 100000u

104 105 106 107 108 109
static void link_handle_out_of_seq_msg(struct tipc_link *link,
				       struct sk_buff *skb);
static void tipc_link_proto_rcv(struct tipc_link *link,
				struct sk_buff *skb);
static int  tipc_link_tunnel_rcv(struct tipc_node *node,
				 struct sk_buff **skb);
110
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
111 112 113
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
114 115
static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
116 117
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
118

P
Per Liden 已提交
119
/*
S
Sam Ravnborg 已提交
120
 *  Simple link routines
P
Per Liden 已提交
121
 */
S
Sam Ravnborg 已提交
122
static unsigned int align(unsigned int i)
P
Per Liden 已提交
123 124 125 126
{
	return (i + 3) & ~3u;
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
static void tipc_link_release(struct kref *kref)
{
	kfree(container_of(kref, struct tipc_link, ref));
}

static void tipc_link_get(struct tipc_link *l_ptr)
{
	kref_get(&l_ptr->ref);
}

static void tipc_link_put(struct tipc_link *l_ptr)
{
	kref_put(&l_ptr->ref, tipc_link_release);
}

142
static void link_init_max_pkt(struct tipc_link *l_ptr)
P
Per Liden 已提交
143
{
144 145
	struct tipc_node *node = l_ptr->owner;
	struct tipc_net *tn = net_generic(node->net, tipc_net_id);
146
	struct tipc_bearer *b_ptr;
P
Per Liden 已提交
147
	u32 max_pkt;
148

149
	rcu_read_lock();
150
	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
151 152 153 154 155 156 157
	if (!b_ptr) {
		rcu_read_unlock();
		return;
	}
	max_pkt = (b_ptr->mtu & ~3);
	rcu_read_unlock();

P
Per Liden 已提交
158 159 160
	if (max_pkt > MAX_MSG_SIZE)
		max_pkt = MAX_MSG_SIZE;

161
	l_ptr->max_pkt_target = max_pkt;
P
Per Liden 已提交
162 163
	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
		l_ptr->max_pkt = l_ptr->max_pkt_target;
164
	else
P
Per Liden 已提交
165 166
		l_ptr->max_pkt = MAX_PKT_DEFAULT;

167
	l_ptr->max_pkt_probes = 0;
P
Per Liden 已提交
168 169 170
}

/*
S
Sam Ravnborg 已提交
171
 *  Simple non-static link routines (i.e. referenced outside this file)
P
Per Liden 已提交
172
 */
173
int tipc_link_is_up(struct tipc_link *l_ptr)
P
Per Liden 已提交
174 175 176
{
	if (!l_ptr)
		return 0;
E
Eric Dumazet 已提交
177
	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
P
Per Liden 已提交
178 179
}

180
int tipc_link_is_active(struct tipc_link *l_ptr)
P
Per Liden 已提交
181
{
E
Eric Dumazet 已提交
182 183
	return	(l_ptr->owner->active_links[0] == l_ptr) ||
		(l_ptr->owner->active_links[1] == l_ptr);
P
Per Liden 已提交
184 185 186 187 188 189
}

/**
 * link_timeout - handle expiration of link timer
 * @l_ptr: pointer to link
 */
190
static void link_timeout(unsigned long data)
P
Per Liden 已提交
191
{
192
	struct tipc_link *l_ptr = (struct tipc_link *)data;
193 194
	struct sk_buff *skb;

195
	tipc_node_lock(l_ptr->owner);
P
Per Liden 已提交
196 197

	/* update counters used in statistical profiling of send traffic */
J
Jon Paul Maloy 已提交
198
	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
P
Per Liden 已提交
199 200
	l_ptr->stats.queue_sz_counts++;

J
Jon Paul Maloy 已提交
201
	skb = skb_peek(&l_ptr->transmq);
202 203
	if (skb) {
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
204 205
		u32 length = msg_size(msg);

206 207
		if ((msg_user(msg) == MSG_FRAGMENTER) &&
		    (msg_type(msg) == FIRST_FRAGMENT)) {
P
Per Liden 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
			length = msg_size(msg_get_wrapped(msg));
		}
		if (length) {
			l_ptr->stats.msg_lengths_total += length;
			l_ptr->stats.msg_length_counts++;
			if (length <= 64)
				l_ptr->stats.msg_length_profile[0]++;
			else if (length <= 256)
				l_ptr->stats.msg_length_profile[1]++;
			else if (length <= 1024)
				l_ptr->stats.msg_length_profile[2]++;
			else if (length <= 4096)
				l_ptr->stats.msg_length_profile[3]++;
			else if (length <= 16384)
				l_ptr->stats.msg_length_profile[4]++;
			else if (length <= 32768)
				l_ptr->stats.msg_length_profile[5]++;
			else
				l_ptr->stats.msg_length_profile[6]++;
		}
	}

	/* do all other link processing performed on a periodic basis */
	link_state_event(l_ptr, TIMEOUT_EVT);

J
Jon Paul Maloy 已提交
233
	if (skb_queue_len(&l_ptr->backlogq))
234
		tipc_link_push_packets(l_ptr);
P
Per Liden 已提交
235

236
	tipc_node_unlock(l_ptr->owner);
237
	tipc_link_put(l_ptr);
P
Per Liden 已提交
238 239
}

240
static void link_set_timer(struct tipc_link *link, unsigned long time)
P
Per Liden 已提交
241
{
242 243
	if (!mod_timer(&link->timer, jiffies + time))
		tipc_link_get(link);
P
Per Liden 已提交
244 245 246
}

/**
247
 * tipc_link_create - create a new link
248
 * @n_ptr: pointer to associated node
P
Per Liden 已提交
249 250
 * @b_ptr: pointer to associated bearer
 * @media_addr: media address to use when sending messages over link
251
 *
P
Per Liden 已提交
252 253
 * Returns pointer to link.
 */
254
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
255 256
				   struct tipc_bearer *b_ptr,
				   const struct tipc_media_addr *media_addr)
P
Per Liden 已提交
257
{
258
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
259
	struct tipc_link *l_ptr;
P
Per Liden 已提交
260 261
	struct tipc_msg *msg;
	char *if_name;
262 263 264
	char addr_string[16];
	u32 peer = n_ptr->addr;

265
	if (n_ptr->link_cnt >= MAX_BEARERS) {
266
		tipc_addr_string_fill(addr_string, n_ptr->addr);
267 268
		pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
			n_ptr->link_cnt, addr_string, MAX_BEARERS);
269 270 271 272 273
		return NULL;
	}

	if (n_ptr->links[b_ptr->identity]) {
		tipc_addr_string_fill(addr_string, n_ptr->addr);
274 275
		pr_err("Attempt to establish second link on <%s> to %s\n",
		       b_ptr->name, addr_string);
276 277
		return NULL;
	}
P
Per Liden 已提交
278

279
	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
P
Per Liden 已提交
280
	if (!l_ptr) {
281
		pr_warn("Link creation failed, no memory\n");
P
Per Liden 已提交
282 283
		return NULL;
	}
284
	kref_init(&l_ptr->ref);
P
Per Liden 已提交
285
	l_ptr->addr = peer;
286
	if_name = strchr(b_ptr->name, ':') + 1;
287
	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
288 289
		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
		tipc_node(tn->own_addr),
P
Per Liden 已提交
290 291
		if_name,
		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
292
		/* note: peer i/f name is updated by reset/activate message */
P
Per Liden 已提交
293
	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
294
	l_ptr->owner = n_ptr;
P
Per Liden 已提交
295
	l_ptr->checkpoint = 1;
296
	l_ptr->peer_session = INVALID_SESSION;
297
	l_ptr->bearer_id = b_ptr->identity;
298
	link_set_supervision_props(l_ptr, b_ptr->tolerance);
P
Per Liden 已提交
299 300 301 302
	l_ptr->state = RESET_UNKNOWN;

	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
	msg = l_ptr->pmsg;
303
	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
304
		      l_ptr->addr);
P
Per Liden 已提交
305
	msg_set_size(msg, sizeof(l_ptr->proto_msg));
306
	msg_set_session(msg, (tn->random & 0xffff));
P
Per Liden 已提交
307 308
	msg_set_bearer_id(msg, b_ptr->identity);
	strcpy((char *)msg_data(msg), if_name);
309
	l_ptr->net_plane = b_ptr->net_plane;
P
Per Liden 已提交
310
	link_init_max_pkt(l_ptr);
311 312
	l_ptr->priority = b_ptr->priority;
	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
P
Per Liden 已提交
313
	l_ptr->next_out_no = 1;
J
Jon Paul Maloy 已提交
314 315 316
	__skb_queue_head_init(&l_ptr->transmq);
	__skb_queue_head_init(&l_ptr->backlogq);
	__skb_queue_head_init(&l_ptr->deferdq);
317 318 319
	skb_queue_head_init(&l_ptr->wakeupq);
	skb_queue_head_init(&l_ptr->inputq);
	skb_queue_head_init(&l_ptr->namedq);
P
Per Liden 已提交
320
	link_reset_statistics(l_ptr);
321
	tipc_node_attach_link(n_ptr, l_ptr);
322
	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
323
	link_state_event(l_ptr, STARTING_EVT);
P
Per Liden 已提交
324 325 326 327

	return l_ptr;
}

328 329 330 331 332 333 334 335 336 337 338 339
/**
 * link_delete - Conditional deletion of link.
 *               If timer still running, real delete is done when it expires
 * @link: link to be deleted
 */
void tipc_link_delete(struct tipc_link *link)
{
	tipc_link_reset_fragments(link);
	tipc_node_detach_link(link->owner, link);
	tipc_link_put(link);
}

340 341
void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
			   bool shutting_down)
342
{
343
	struct tipc_net *tn = net_generic(net, tipc_net_id);
344 345
	struct tipc_link *link;
	struct tipc_node *node;
346
	bool del_link;
347

348
	rcu_read_lock();
349 350 351 352 353
	list_for_each_entry_rcu(node, &tn->node_list, list) {
		tipc_node_lock(node);
		link = node->links[bearer_id];
		if (!link) {
			tipc_node_unlock(node);
354 355
			continue;
		}
356
		del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
357 358 359 360 361
		tipc_link_reset(link);
		if (del_timer(&link->timer))
			tipc_link_put(link);
		link->flags |= LINK_STOPPED;
		/* Delete link now, or when failover is finished: */
362
		if (shutting_down || !tipc_node_is_up(node) || del_link)
363 364
			tipc_link_delete(link);
		tipc_node_unlock(node);
365
	}
366
	rcu_read_unlock();
367
}
P
Per Liden 已提交
368 369

/**
370 371 372 373 374 375
 * link_schedule_user - schedule user for wakeup after congestion
 * @link: congested link
 * @oport: sending port
 * @chain_sz: size of buffer chain that was attempted sent
 * @imp: importance of message attempted sent
 * Create pseudo msg to send back to user when congestion abates
P
Per Liden 已提交
376
 */
377 378
static bool link_schedule_user(struct tipc_link *link, u32 oport,
			       uint chain_sz, uint imp)
P
Per Liden 已提交
379
{
380 381
	struct sk_buff *buf;

382 383 384
	buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
			      link_own_addr(link), link_own_addr(link),
			      oport, 0, 0);
385 386 387 388
	if (!buf)
		return false;
	TIPC_SKB_CB(buf)->chain_sz = chain_sz;
	TIPC_SKB_CB(buf)->chain_imp = imp;
389
	skb_queue_tail(&link->wakeupq, buf);
390 391
	link->stats.link_congs++;
	return true;
P
Per Liden 已提交
392 393
}

394 395 396 397 398 399
/**
 * link_prepare_wakeup - prepare users for wakeup after congestion
 * @link: congested link
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to node wait queue for wakeup
 */
400
void link_prepare_wakeup(struct tipc_link *l)
P
Per Liden 已提交
401
{
402 403
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
404
	struct sk_buff *skb, *tmp;
405

406 407 408 409 410
	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = l->window + l->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + l->backlog[imp].len) >= lim)
P
Per Liden 已提交
411
			break;
412 413 414 415
		skb_unlink(skb, &l->wakeupq);
		skb_queue_tail(&l->inputq, skb);
		l->owner->inputq = &l->inputq;
		l->owner->action_flags |= TIPC_MSG_EVT;
P
Per Liden 已提交
416 417 418 419
	}
}

/**
420
 * tipc_link_reset_fragments - purge link's inbound message fragments queue
P
Per Liden 已提交
421 422
 * @l_ptr: pointer to link
 */
423
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
P
Per Liden 已提交
424
{
425 426
	kfree_skb(l_ptr->reasm_buf);
	l_ptr->reasm_buf = NULL;
P
Per Liden 已提交
427 428
}

429 430 431 432 433 434 435 436 437 438
static void tipc_link_purge_backlog(struct tipc_link *l)
{
	__skb_queue_purge(&l->backlogq);
	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
}

439
/**
440
 * tipc_link_purge_queues - purge all pkt queues associated with link
P
Per Liden 已提交
441 442
 * @l_ptr: pointer to link
 */
443
void tipc_link_purge_queues(struct tipc_link *l_ptr)
P
Per Liden 已提交
444
{
J
Jon Paul Maloy 已提交
445 446
	__skb_queue_purge(&l_ptr->deferdq);
	__skb_queue_purge(&l_ptr->transmq);
447
	tipc_link_purge_backlog(l_ptr);
448
	tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
449 450
}

451
void tipc_link_reset(struct tipc_link *l_ptr)
P
Per Liden 已提交
452 453 454
{
	u32 prev_state = l_ptr->state;
	u32 checkpoint = l_ptr->next_in_no;
455
	int was_active_link = tipc_link_is_active(l_ptr);
456
	struct tipc_node *owner = l_ptr->owner;
457

458
	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
P
Per Liden 已提交
459

460 461
	/* Link is down, accept any session */
	l_ptr->peer_session = INVALID_SESSION;
P
Per Liden 已提交
462

463
	/* Prepare for max packet size negotiation */
P
Per Liden 已提交
464
	link_init_max_pkt(l_ptr);
465

P
Per Liden 已提交
466 467 468 469 470
	l_ptr->state = RESET_UNKNOWN;

	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
		return;

471
	tipc_node_link_down(l_ptr->owner, l_ptr);
472
	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
473

474
	if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
P
Per Liden 已提交
475 476 477 478
		l_ptr->reset_checkpoint = checkpoint;
		l_ptr->exp_msg_count = START_CHANGEOVER;
	}

479
	/* Clean up all queues, except inputq: */
J
Jon Paul Maloy 已提交
480 481
	__skb_queue_purge(&l_ptr->transmq);
	__skb_queue_purge(&l_ptr->deferdq);
482 483 484 485
	if (!owner->inputq)
		owner->inputq = &l_ptr->inputq;
	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
	if (!skb_queue_empty(owner->inputq))
486
		owner->action_flags |= TIPC_MSG_EVT;
487
	tipc_link_purge_backlog(l_ptr);
J
Jon Paul Maloy 已提交
488
	l_ptr->rcv_unacked = 0;
P
Per Liden 已提交
489 490 491 492 493 494 495
	l_ptr->checkpoint = 1;
	l_ptr->next_out_no = 1;
	l_ptr->fsm_msg_cnt = 0;
	l_ptr->stale_count = 0;
	link_reset_statistics(l_ptr);
}

496
void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
497
{
498
	struct tipc_net *tn = net_generic(net, tipc_net_id);
499
	struct tipc_link *l_ptr;
500
	struct tipc_node *n_ptr;
501

502
	rcu_read_lock();
503
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
504
		tipc_node_lock(n_ptr);
505 506 507
		l_ptr = n_ptr->links[bearer_id];
		if (l_ptr)
			tipc_link_reset(l_ptr);
508
		tipc_node_unlock(n_ptr);
509
	}
510
	rcu_read_unlock();
511
}
P
Per Liden 已提交
512

513
static void link_activate(struct tipc_link *link)
P
Per Liden 已提交
514
{
515 516 517 518 519 520
	struct tipc_node *node = link->owner;

	link->next_in_no = 1;
	link->stats.recv_info = 1;
	tipc_node_link_up(node, link);
	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
P
Per Liden 已提交
521 522 523 524 525 526 527
}

/**
 * link_state_event - link finite state machine
 * @l_ptr: pointer to link
 * @event: state machine event to process
 */
528
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
P
Per Liden 已提交
529
{
530
	struct tipc_link *other;
531
	unsigned long cont_intv = l_ptr->cont_intv;
P
Per Liden 已提交
532

533 534 535
	if (l_ptr->flags & LINK_STOPPED)
		return;

536
	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
P
Per Liden 已提交
537 538
		return;		/* Not yet. */

539 540
	/* Check whether changeover is going on */
	if (l_ptr->exp_msg_count) {
541
		if (event == TIMEOUT_EVT)
P
Per Liden 已提交
542
			link_set_timer(l_ptr, cont_intv);
543
		return;
P
Per Liden 已提交
544 545 546 547 548 549 550 551 552 553 554
	}

	switch (l_ptr->state) {
	case WORKING_WORKING:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			break;
		case TIMEOUT_EVT:
			if (l_ptr->next_in_no != l_ptr->checkpoint) {
				l_ptr->checkpoint = l_ptr->next_in_no;
555
				if (tipc_bclink_acks_missing(l_ptr->owner)) {
556 557
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     0, 0, 0, 0, 0);
P
Per Liden 已提交
558 559
					l_ptr->fsm_msg_cnt++;
				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
560 561
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     1, 0, 0, 0, 0);
P
Per Liden 已提交
562 563 564 565 566 567 568
					l_ptr->fsm_msg_cnt++;
				}
				link_set_timer(l_ptr, cont_intv);
				break;
			}
			l_ptr->state = WORKING_UNKNOWN;
			l_ptr->fsm_msg_cnt = 0;
569
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
570 571 572 573
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv / 4);
			break;
		case RESET_MSG:
574 575
			pr_debug("%s<%s>, requested by peer\n",
				 link_rst_msg, l_ptr->name);
576
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
577 578
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
579 580
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
581 582 583 584
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
585
			pr_debug("%s%u in WW state\n", link_unk_evt, event);
P
Per Liden 已提交
586 587 588 589 590 591 592 593 594 595 596
		}
		break;
	case WORKING_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
597 598
			pr_debug("%s<%s>, requested by peer while probing\n",
				 link_rst_msg, l_ptr->name);
599
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
600 601
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
602 603
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
604 605 606 607 608 609 610 611
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		case TIMEOUT_EVT:
			if (l_ptr->next_in_no != l_ptr->checkpoint) {
				l_ptr->state = WORKING_WORKING;
				l_ptr->fsm_msg_cnt = 0;
				l_ptr->checkpoint = l_ptr->next_in_no;
612
				if (tipc_bclink_acks_missing(l_ptr->owner)) {
613 614
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     0, 0, 0, 0, 0);
P
Per Liden 已提交
615 616 617 618
					l_ptr->fsm_msg_cnt++;
				}
				link_set_timer(l_ptr, cont_intv);
			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
619 620
				tipc_link_proto_xmit(l_ptr, STATE_MSG,
						     1, 0, 0, 0, 0);
P
Per Liden 已提交
621 622 623
				l_ptr->fsm_msg_cnt++;
				link_set_timer(l_ptr, cont_intv / 4);
			} else {	/* Link has failed */
624 625
				pr_debug("%s<%s>, peer not responding\n",
					 link_rst_msg, l_ptr->name);
626
				tipc_link_reset(l_ptr);
P
Per Liden 已提交
627 628
				l_ptr->state = RESET_UNKNOWN;
				l_ptr->fsm_msg_cnt = 0;
629 630
				tipc_link_proto_xmit(l_ptr, RESET_MSG,
						     0, 0, 0, 0, 0);
P
Per Liden 已提交
631 632 633 634 635
				l_ptr->fsm_msg_cnt++;
				link_set_timer(l_ptr, cont_intv);
			}
			break;
		default:
636
			pr_err("%s%u in WU state\n", link_unk_evt, event);
P
Per Liden 已提交
637 638 639 640 641 642 643 644
		}
		break;
	case RESET_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
			break;
		case ACTIVATE_MSG:
			other = l_ptr->owner->active_links[0];
645
			if (other && link_working_unknown(other))
P
Per Liden 已提交
646 647 648 649
				break;
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_activate(l_ptr);
650
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
651
			l_ptr->fsm_msg_cnt++;
652
			if (l_ptr->owner->working_links == 1)
653
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
654 655 656 657 658
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
659 660
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     1, 0, 0, 0, 0);
P
Per Liden 已提交
661 662 663 664
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		case STARTING_EVT:
665
			l_ptr->flags |= LINK_STARTED;
666 667 668
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
P
Per Liden 已提交
669
		case TIMEOUT_EVT:
670
			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
P
Per Liden 已提交
671 672 673 674
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
675
			pr_err("%s%u in RU state\n", link_unk_evt, event);
P
Per Liden 已提交
676 677 678 679 680 681 682
		}
		break;
	case RESET_RESET:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			other = l_ptr->owner->active_links[0];
683
			if (other && link_working_unknown(other))
P
Per Liden 已提交
684 685 686 687
				break;
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_activate(l_ptr);
688
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
689
			l_ptr->fsm_msg_cnt++;
690
			if (l_ptr->owner->working_links == 1)
691
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
692 693 694 695 696
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
			break;
		case TIMEOUT_EVT:
697 698
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
699 700 701 702
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
703
			pr_err("%s%u in RR state\n", link_unk_evt, event);
P
Per Liden 已提交
704 705 706
		}
		break;
	default:
707
		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
P
Per Liden 已提交
708 709 710
	}
}

711 712 713 714 715 716 717
/* tipc_link_cong: determine return value and how to treat the
 * sent buffer during link congestion.
 * - For plain, errorless user data messages we keep the buffer and
 *   return -ELINKONG.
 * - For all other messages we discard the buffer and return -EHOSTUNREACH
 * - For TIPC internal messages we also reset the link
 */
718
static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
719
{
720 721
	struct sk_buff *skb = skb_peek(list);
	struct tipc_msg *msg = buf_msg(skb);
722
	int imp = msg_importance(msg);
723 724
	u32 oport = msg_tot_origport(msg);

725
	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
726 727
		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
		tipc_link_reset(link);
728
		goto drop;
729
	}
730 731 732 733
	if (unlikely(msg_errcode(msg)))
		goto drop;
	if (unlikely(msg_reroute_cnt(msg)))
		goto drop;
734
	if (TIPC_SKB_CB(skb)->wakeup_pending)
735
		return -ELINKCONG;
736
	if (link_schedule_user(link, oport, skb_queue_len(list), imp))
737 738
		return -ELINKCONG;
drop:
739
	__skb_queue_purge(list);
740 741 742 743
	return -EHOSTUNREACH;
}

/**
744
 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
745
 * @link: link to use
746 747
 * @list: chain of buffers containing message
 *
748 749 750 751 752 753
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
 * user data messages) or -EHOSTUNREACH (all other messages/senders)
 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
 * to act on the return value, since they may need to do more send attempts.
 */
754 755
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
		     struct sk_buff_head *list)
756
{
757
	struct tipc_msg *msg = buf_msg(skb_peek(list));
J
Jon Paul Maloy 已提交
758
	unsigned int maxwin = link->window;
759
	unsigned int imp = msg_importance(msg);
760 761 762 763 764
	uint mtu = link->max_pkt;
	uint ack = mod(link->next_in_no - 1);
	uint seqno = link->next_out_no;
	uint bc_last_in = link->owner->bclink.last_in;
	struct tipc_media_addr *addr = &link->media_addr;
J
Jon Paul Maloy 已提交
765 766
	struct sk_buff_head *transmq = &link->transmq;
	struct sk_buff_head *backlogq = &link->backlogq;
767
	struct sk_buff *skb, *tmp;
768

769 770
	/* Match backlog limit against msg importance: */
	if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
771
		return tipc_link_cong(link, list);
772

J
Jon Paul Maloy 已提交
773
	if (unlikely(msg_size(msg) > mtu)) {
774
		__skb_queue_purge(list);
775 776
		return -EMSGSIZE;
	}
J
Jon Paul Maloy 已提交
777
	/* Prepare each packet for sending, and add to relevant queue: */
778 779
	skb_queue_walk_safe(list, skb, tmp) {
		__skb_unlink(skb, list);
780
		msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
781 782
		msg_set_seqno(msg, seqno);
		msg_set_ack(msg, ack);
783 784
		msg_set_bcast_ack(msg, bc_last_in);

J
Jon Paul Maloy 已提交
785 786 787 788 789 790 791 792
		if (likely(skb_queue_len(transmq) < maxwin)) {
			__skb_queue_tail(transmq, skb);
			tipc_bearer_send(net, link->bearer_id, skb, addr);
			link->rcv_unacked = 0;
			seqno++;
			continue;
		}
		if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
793 794
			link->stats.sent_bundled++;
			continue;
J
Jon Paul Maloy 已提交
795 796
		}
		if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
797 798
			link->stats.sent_bundled++;
			link->stats.sent_bundles++;
799
			imp = msg_importance(buf_msg(skb));
800
		}
J
Jon Paul Maloy 已提交
801
		__skb_queue_tail(backlogq, skb);
802
		link->backlog[imp].len++;
803 804 805 806 807 808
		seqno++;
	}
	link->next_out_no = seqno;
	return 0;
}

809 810
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
811
	skb_queue_head_init(list);
812 813 814 815 816 817 818 819
	__skb_queue_tail(list, skb);
}

static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
820
	return __tipc_link_xmit(link->owner->net, link, &head);
821 822
}

823 824
int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
		       u32 selector)
825 826 827 828
{
	struct sk_buff_head head;

	skb2list(skb, &head);
829
	return tipc_link_xmit(net, &head, dnode, selector);
830 831
}

832
/**
833
 * tipc_link_xmit() is the general link level function for message sending
834
 * @net: the applicable net namespace
835
 * @list: chain of buffers containing message
836 837 838 839 840 841
 * @dsz: amount of user data to be sent
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
842 843
int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
		   u32 selector)
844 845 846 847 848
{
	struct tipc_link *link = NULL;
	struct tipc_node *node;
	int rc = -EHOSTUNREACH;

849
	node = tipc_node_find(net, dnode);
850 851 852 853
	if (node) {
		tipc_node_lock(node);
		link = node->active_links[selector & 1];
		if (link)
854
			rc = __tipc_link_xmit(net, link, list);
855 856 857 858 859
		tipc_node_unlock(node);
	}
	if (link)
		return rc;

860 861 862 863
	if (likely(in_own_node(net, dnode))) {
		tipc_sk_rcv(net, list);
		return 0;
	}
864

865
	__skb_queue_purge(list);
866 867 868
	return rc;
}

869
/*
870
 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
871 872 873 874 875 876
 *
 * Give a newly added peer node the sequence number where it should
 * start receiving and acking broadcast packets.
 *
 * Called with node locked
 */
877
static void tipc_link_sync_xmit(struct tipc_link *link)
878
{
879
	struct sk_buff *skb;
880 881
	struct tipc_msg *msg;

882 883
	skb = tipc_buf_acquire(INT_H_SIZE);
	if (!skb)
884 885
		return;

886
	msg = buf_msg(skb);
887
	tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
888
		      INT_H_SIZE, link->addr);
889
	msg_set_last_bcast(msg, link->owner->bclink.acked);
890
	__tipc_link_xmit_skb(link, skb);
891 892 893
}

/*
894
 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
895 896 897 898 899 900
 * Receive the sequence number where we should start receiving and
 * acking broadcast packets from a newly added peer node, and open
 * up for reception of such packets.
 *
 * Called with node locked
 */
901
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
902 903 904 905 906 907 908 909
{
	struct tipc_msg *msg = buf_msg(buf);

	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
	n->bclink.recv_permitted = true;
	kfree_skb(buf);
}

910
/*
911 912 913 914 915 916
 * tipc_link_push_packets - push unsent packets to bearer
 *
 * Push out the unsent messages of a link where congestion
 * has abated. Node is locked.
 *
 * Called with node locked
P
Per Liden 已提交
917
 */
J
Jon Paul Maloy 已提交
918
void tipc_link_push_packets(struct tipc_link *link)
P
Per Liden 已提交
919
{
J
Jon Paul Maloy 已提交
920
	struct sk_buff *skb;
921
	struct tipc_msg *msg;
J
Jon Paul Maloy 已提交
922
	unsigned int ack = mod(link->next_in_no - 1);
P
Per Liden 已提交
923

J
Jon Paul Maloy 已提交
924 925 926
	while (skb_queue_len(&link->transmq) < link->window) {
		skb = __skb_dequeue(&link->backlogq);
		if (!skb)
927
			break;
J
Jon Paul Maloy 已提交
928
		msg = buf_msg(skb);
929
		link->backlog[msg_importance(msg)].len--;
J
Jon Paul Maloy 已提交
930 931 932 933 934 935
		msg_set_ack(msg, ack);
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		link->rcv_unacked = 0;
		__skb_queue_tail(&link->transmq, skb);
		tipc_bearer_send(link->owner->net, link->bearer_id,
				 skb, &link->media_addr);
P
Per Liden 已提交
936 937 938
	}
}

939
void tipc_link_reset_all(struct tipc_node *node)
940 941 942 943
{
	char addr_string[16];
	u32 i;

944
	tipc_node_lock(node);
945

946
	pr_warn("Resetting all links to %s\n",
947
		tipc_addr_string_fill(addr_string, node->addr));
948 949

	for (i = 0; i < MAX_BEARERS; i++) {
950 951 952
		if (node->links[i]) {
			link_print(node->links[i], "Resetting link\n");
			tipc_link_reset(node->links[i]);
953 954 955
		}
	}

956
	tipc_node_unlock(node);
957 958
}

959
static void link_retransmit_failure(struct tipc_link *l_ptr,
960
				    struct sk_buff *buf)
961 962
{
	struct tipc_msg *msg = buf_msg(buf);
963
	struct net *net = l_ptr->owner->net;
964

965
	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
966 967 968

	if (l_ptr->addr) {
		/* Handle failure on standard link */
969
		link_print(l_ptr, "Resetting link\n");
970 971 972 973
		tipc_link_reset(l_ptr);

	} else {
		/* Handle failure on broadcast link */
974
		struct tipc_node *n_ptr;
975 976
		char addr_string[16];

977 978 979
		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
		pr_cont("Outstanding acks: %lu\n",
			(unsigned long) TIPC_SKB_CB(buf)->handle);
J
Jeff Garzik 已提交
980

981
		n_ptr = tipc_bclink_retransmit_to(net);
982 983
		tipc_node_lock(n_ptr);

984
		tipc_addr_string_fill(addr_string, n_ptr->addr);
985
		pr_info("Broadcast link info for %s\n", addr_string);
986 987
		pr_info("Reception permitted: %d,  Acked: %u\n",
			n_ptr->bclink.recv_permitted,
988 989 990 991 992
			n_ptr->bclink.acked);
		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
			n_ptr->bclink.last_in,
			n_ptr->bclink.oos_state,
			n_ptr->bclink.last_sent);
993 994 995

		tipc_node_unlock(n_ptr);

996
		tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
997 998 999 1000
		l_ptr->stale_count = 0;
	}
}

1001
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1002
			  u32 retransmits)
P
Per Liden 已提交
1003 1004 1005
{
	struct tipc_msg *msg;

1006
	if (!skb)
1007 1008
		return;

1009
	msg = buf_msg(skb);
1010

1011 1012 1013
	/* Detect repeated retransmit failures */
	if (l_ptr->last_retransmitted == msg_seqno(msg)) {
		if (++l_ptr->stale_count > 100) {
1014
			link_retransmit_failure(l_ptr, skb);
1015
			return;
1016 1017
		}
	} else {
1018 1019
		l_ptr->last_retransmitted = msg_seqno(msg);
		l_ptr->stale_count = 1;
P
Per Liden 已提交
1020
	}
1021

J
Jon Paul Maloy 已提交
1022 1023
	skb_queue_walk_from(&l_ptr->transmq, skb) {
		if (!retransmits)
1024 1025
			break;
		msg = buf_msg(skb);
P
Per Liden 已提交
1026
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1027
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1028 1029
		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
				 &l_ptr->media_addr);
1030 1031
		retransmits--;
		l_ptr->stats.retransmitted++;
P
Per Liden 已提交
1032 1033 1034
	}
}

1035 1036
static void link_retrieve_defq(struct tipc_link *link,
			       struct sk_buff_head *list)
P
Per Liden 已提交
1037 1038 1039
{
	u32 seq_no;

J
Jon Paul Maloy 已提交
1040
	if (skb_queue_empty(&link->deferdq))
1041 1042
		return;

J
Jon Paul Maloy 已提交
1043
	seq_no = buf_seqno(skb_peek(&link->deferdq));
1044
	if (seq_no == mod(link->next_in_no))
J
Jon Paul Maloy 已提交
1045
		skb_queue_splice_tail_init(&link->deferdq, list);
P
Per Liden 已提交
1046 1047
}

1048
/**
1049
 * tipc_rcv - process TIPC packets/messages arriving from off-node
1050
 * @net: the applicable net namespace
1051
 * @skb: TIPC packet
1052
 * @b_ptr: pointer to bearer message arrived on
1053 1054 1055 1056
 *
 * Invoked with no locks held.  Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
1057
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
P
Per Liden 已提交
1058
{
1059
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1060 1061 1062 1063 1064 1065 1066 1067
	struct sk_buff_head head;
	struct tipc_node *n_ptr;
	struct tipc_link *l_ptr;
	struct sk_buff *skb1, *tmp;
	struct tipc_msg *msg;
	u32 seq_no;
	u32 ackd;
	u32 released;
P
Per Liden 已提交
1068

1069
	skb2list(skb, &head);
1070

1071
	while ((skb = __skb_dequeue(&head))) {
1072
		/* Ensure message is well-formed */
1073
		if (unlikely(!tipc_msg_validate(skb)))
1074
			goto discard;
P
Per Liden 已提交
1075

1076
		/* Handle arrival of a non-unicast link message */
1077
		msg = buf_msg(skb);
P
Per Liden 已提交
1078
		if (unlikely(msg_non_seq(msg))) {
1079
			if (msg_user(msg) ==  LINK_CONFIG)
1080
				tipc_disc_rcv(net, skb, b_ptr);
1081
			else
1082
				tipc_bclink_rcv(net, skb);
P
Per Liden 已提交
1083 1084
			continue;
		}
1085

1086
		/* Discard unicast link messages destined for another node */
1087
		if (unlikely(!msg_short(msg) &&
1088
			     (msg_destnode(msg) != tn->own_addr)))
1089
			goto discard;
1090

1091
		/* Locate neighboring node that sent message */
1092
		n_ptr = tipc_node_find(net, msg_prevnode(msg));
P
Per Liden 已提交
1093
		if (unlikely(!n_ptr))
1094
			goto discard;
1095
		tipc_node_lock(n_ptr);
1096

1097 1098
		/* Locate unicast link endpoint that should handle message */
		l_ptr = n_ptr->links[b_ptr->identity];
1099
		if (unlikely(!l_ptr))
1100
			goto unlock;
1101

1102
		/* Verify that communication with node is currently allowed */
Y
Ying Xue 已提交
1103
		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1104 1105 1106 1107
		    msg_user(msg) == LINK_PROTOCOL &&
		    (msg_type(msg) == RESET_MSG ||
		    msg_type(msg) == ACTIVATE_MSG) &&
		    !msg_redundant_link(msg))
Y
Ying Xue 已提交
1108
			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1109 1110

		if (tipc_node_blocked(n_ptr))
1111
			goto unlock;
1112 1113 1114 1115 1116 1117

		/* Validate message sequence number info */
		seq_no = msg_seqno(msg);
		ackd = msg_ack(msg);

		/* Release acked messages */
1118
		if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1119
			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
P
Per Liden 已提交
1120

1121
		released = 0;
J
Jon Paul Maloy 已提交
1122 1123
		skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
			if (more(buf_seqno(skb1), ackd))
1124
				break;
J
Jon Paul Maloy 已提交
1125
			 __skb_unlink(skb1, &l_ptr->transmq);
1126 1127
			 kfree_skb(skb1);
			 released = 1;
P
Per Liden 已提交
1128
		}
1129 1130

		/* Try sending any messages link endpoint has pending */
J
Jon Paul Maloy 已提交
1131
		if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1132
			tipc_link_push_packets(l_ptr);
1133

1134
		if (released && !skb_queue_empty(&l_ptr->wakeupq))
1135
			link_prepare_wakeup(l_ptr);
1136 1137

		/* Process the incoming packet */
1138 1139
		if (unlikely(!link_working_working(l_ptr))) {
			if (msg_user(msg) == LINK_PROTOCOL) {
1140
				tipc_link_proto_rcv(l_ptr, skb);
1141
				link_retrieve_defq(l_ptr, &head);
1142 1143
				skb = NULL;
				goto unlock;
P
Per Liden 已提交
1144
			}
1145 1146 1147 1148 1149 1150

			/* Traffic message. Conditionally activate link */
			link_state_event(l_ptr, TRAFFIC_MSG_EVT);

			if (link_working_working(l_ptr)) {
				/* Re-insert buffer in front of queue */
1151
				__skb_queue_head(&head, skb);
1152 1153
				skb = NULL;
				goto unlock;
1154
			}
1155
			goto unlock;
1156 1157 1158 1159
		}

		/* Link is now in state WORKING_WORKING */
		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1160
			link_handle_out_of_seq_msg(l_ptr, skb);
1161
			link_retrieve_defq(l_ptr, &head);
1162 1163
			skb = NULL;
			goto unlock;
P
Per Liden 已提交
1164
		}
1165
		l_ptr->next_in_no++;
J
Jon Paul Maloy 已提交
1166
		if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1167
			link_retrieve_defq(l_ptr, &head);
J
Jon Paul Maloy 已提交
1168
		if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1169 1170 1171
			l_ptr->stats.sent_acks++;
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
		}
1172 1173 1174
		tipc_link_input(l_ptr, skb);
		skb = NULL;
unlock:
1175 1176
		tipc_node_unlock(n_ptr);
discard:
1177 1178
		if (unlikely(skb))
			kfree_skb(skb);
P
Per Liden 已提交
1179 1180 1181
	}
}

1182
/* tipc_data_input - deliver data and name distr msgs to upper layer
1183
 *
1184
 * Consumes buffer if message is of right type
1185 1186
 * Node lock must be held
 */
1187
static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1188
{
1189 1190 1191
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	u32 dport = msg_destport(msg);
1192 1193

	switch (msg_user(msg)) {
1194 1195 1196 1197 1198 1199 1200 1201
	case TIPC_LOW_IMPORTANCE:
	case TIPC_MEDIUM_IMPORTANCE:
	case TIPC_HIGH_IMPORTANCE:
	case TIPC_CRITICAL_IMPORTANCE:
	case CONN_MANAGER:
		if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
			node->inputq = &link->inputq;
			node->action_flags |= TIPC_MSG_EVT;
1202
		}
1203
		return true;
1204
	case NAME_DISTRIBUTOR:
1205 1206 1207 1208 1209 1210 1211 1212 1213
		node->bclink.recv_permitted = true;
		node->namedq = &link->namedq;
		skb_queue_tail(&link->namedq, skb);
		if (skb_queue_len(&link->namedq) == 1)
			node->action_flags |= TIPC_NAMED_MSG_EVT;
		return true;
	case MSG_BUNDLER:
	case CHANGEOVER_PROTOCOL:
	case MSG_FRAGMENTER:
1214
	case BCAST_PROTOCOL:
1215
		return false;
1216
	default:
1217 1218 1219 1220
		pr_warn("Dropping received illegal msg type\n");
		kfree_skb(skb);
		return false;
	};
1221
}
1222 1223 1224 1225 1226

/* tipc_link_input - process packet that has passed link protocol check
 *
 * Consumes buffer
 * Node lock must be held
1227
 */
1228
static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1229
{
1230 1231 1232 1233 1234 1235 1236
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	struct sk_buff *iskb;
	int pos = 0;

	if (likely(tipc_data_input(link, skb)))
		return;
1237 1238

	switch (msg_user(msg)) {
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	case CHANGEOVER_PROTOCOL:
		if (!tipc_link_tunnel_rcv(node, &skb))
			break;
		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
			tipc_data_input(link, skb);
			break;
		}
	case MSG_BUNDLER:
		link->stats.recv_bundles++;
		link->stats.recv_bundled += msg_msgcnt(msg);

		while (tipc_msg_extract(skb, &iskb, &pos))
			tipc_data_input(link, iskb);
1252
		break;
1253 1254 1255 1256 1257 1258 1259 1260
	case MSG_FRAGMENTER:
		link->stats.recv_fragments++;
		if (tipc_buf_append(&link->reasm_buf, &skb)) {
			link->stats.recv_fragmented++;
			tipc_data_input(link, skb);
		} else if (!link->reasm_buf) {
			tipc_link_reset(link);
		}
1261
		break;
1262 1263
	case BCAST_PROTOCOL:
		tipc_link_sync_rcv(node, skb);
1264 1265
		break;
	default:
1266 1267
		break;
	};
1268 1269
}

1270
/**
1271 1272 1273
 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
 *
 * Returns increase in queue length (i.e. 0 or 1)
P
Per Liden 已提交
1274
 */
1275
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
P
Per Liden 已提交
1276
{
1277 1278
	struct sk_buff *skb1;
	u32 seq_no = buf_seqno(skb);
P
Per Liden 已提交
1279 1280

	/* Empty queue ? */
1281 1282
	if (skb_queue_empty(list)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1283 1284 1285 1286
		return 1;
	}

	/* Last ? */
1287 1288
	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1289 1290 1291
		return 1;
	}

1292
	/* Locate insertion point in queue, then insert; discard if duplicate */
1293 1294
	skb_queue_walk(list, skb1) {
		u32 curr_seqno = buf_seqno(skb1);
P
Per Liden 已提交
1295

1296
		if (seq_no == curr_seqno) {
1297
			kfree_skb(skb);
1298
			return 0;
P
Per Liden 已提交
1299
		}
1300 1301

		if (less(seq_no, curr_seqno))
P
Per Liden 已提交
1302
			break;
1303
	}
P
Per Liden 已提交
1304

1305
	__skb_queue_before(list, skb1, skb);
1306
	return 1;
P
Per Liden 已提交
1307 1308
}

1309
/*
P
Per Liden 已提交
1310 1311
 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
 */
1312
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
P
Per Liden 已提交
1313 1314
				       struct sk_buff *buf)
{
1315
	u32 seq_no = buf_seqno(buf);
P
Per Liden 已提交
1316 1317

	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1318
		tipc_link_proto_rcv(l_ptr, buf);
P
Per Liden 已提交
1319 1320 1321 1322 1323 1324
		return;
	}

	/* Record OOS packet arrival (force mismatch on next timeout) */
	l_ptr->checkpoint--;

1325
	/*
P
Per Liden 已提交
1326 1327 1328 1329 1330
	 * Discard packet if a duplicate; otherwise add it to deferred queue
	 * and notify peer of gap as per protocol specification
	 */
	if (less(seq_no, mod(l_ptr->next_in_no))) {
		l_ptr->stats.duplicates++;
1331
		kfree_skb(buf);
P
Per Liden 已提交
1332 1333 1334
		return;
	}

J
Jon Paul Maloy 已提交
1335
	if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
P
Per Liden 已提交
1336
		l_ptr->stats.deferred_recv++;
J
Jon Paul Maloy 已提交
1337
		if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1338
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1339
	} else {
P
Per Liden 已提交
1340
		l_ptr->stats.duplicates++;
1341
	}
P
Per Liden 已提交
1342 1343 1344 1345 1346
}

/*
 * Send protocol message to the other endpoint.
 */
1347 1348
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
			  u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
P
Per Liden 已提交
1349
{
1350
	struct sk_buff *buf = NULL;
P
Per Liden 已提交
1351
	struct tipc_msg *msg = l_ptr->pmsg;
1352
	u32 msg_size = sizeof(l_ptr->proto_msg);
1353
	int r_flag;
P
Per Liden 已提交
1354

1355 1356
	/* Don't send protocol message during link changeover */
	if (l_ptr->exp_msg_count)
P
Per Liden 已提交
1357
		return;
1358 1359

	/* Abort non-RESET send if communication with node is prohibited */
1360
	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1361 1362
		return;

1363
	/* Create protocol message with "out-of-sequence" sequence number */
P
Per Liden 已提交
1364
	msg_set_type(msg, msg_typ);
1365
	msg_set_net_plane(msg, l_ptr->net_plane);
1366
	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1367
	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
P
Per Liden 已提交
1368 1369 1370 1371

	if (msg_typ == STATE_MSG) {
		u32 next_sent = mod(l_ptr->next_out_no);

1372
		if (!tipc_link_is_up(l_ptr))
P
Per Liden 已提交
1373
			return;
J
Jon Paul Maloy 已提交
1374 1375
		if (skb_queue_len(&l_ptr->backlogq))
			next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
P
Per Liden 已提交
1376
		msg_set_next_sent(msg, next_sent);
J
Jon Paul Maloy 已提交
1377 1378
		if (!skb_queue_empty(&l_ptr->deferdq)) {
			u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
P
Per Liden 已提交
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
			gap = mod(rec - mod(l_ptr->next_in_no));
		}
		msg_set_seq_gap(msg, gap);
		if (gap)
			l_ptr->stats.sent_nacks++;
		msg_set_link_tolerance(msg, tolerance);
		msg_set_linkprio(msg, priority);
		msg_set_max_pkt(msg, ack_mtu);
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
		msg_set_probe(msg, probe_msg != 0);
1389
		if (probe_msg) {
P
Per Liden 已提交
1390 1391
			u32 mtu = l_ptr->max_pkt;

1392
			if ((mtu < l_ptr->max_pkt_target) &&
P
Per Liden 已提交
1393 1394 1395
			    link_working_working(l_ptr) &&
			    l_ptr->fsm_msg_cnt) {
				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1396 1397 1398
				if (l_ptr->max_pkt_probes == 10) {
					l_ptr->max_pkt_target = (msg_size - 4);
					l_ptr->max_pkt_probes = 0;
P
Per Liden 已提交
1399
					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1400
				}
P
Per Liden 已提交
1401
				l_ptr->max_pkt_probes++;
1402
			}
P
Per Liden 已提交
1403 1404

			l_ptr->stats.sent_probes++;
1405
		}
P
Per Liden 已提交
1406 1407 1408 1409 1410
		l_ptr->stats.sent_states++;
	} else {		/* RESET_MSG or ACTIVATE_MSG */
		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
		msg_set_seq_gap(msg, 0);
		msg_set_next_sent(msg, 1);
1411
		msg_set_probe(msg, 0);
P
Per Liden 已提交
1412 1413 1414 1415 1416
		msg_set_link_tolerance(msg, l_ptr->tolerance);
		msg_set_linkprio(msg, l_ptr->priority);
		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
	}

1417 1418
	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
	msg_set_redundant_link(msg, r_flag);
P
Per Liden 已提交
1419
	msg_set_linkprio(msg, l_ptr->priority);
1420
	msg_set_size(msg, msg_size);
P
Per Liden 已提交
1421 1422 1423

	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));

1424
	buf = tipc_buf_acquire(msg_size);
P
Per Liden 已提交
1425 1426 1427
	if (!buf)
		return;

1428
	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1429
	buf->priority = TC_PRIO_CONTROL;
1430 1431
	tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
			 &l_ptr->media_addr);
J
Jon Paul Maloy 已提交
1432
	l_ptr->rcv_unacked = 0;
1433
	kfree_skb(buf);
P
Per Liden 已提交
1434 1435 1436 1437
}

/*
 * Receive protocol message :
1438 1439
 * Note that network plane id propagates through the network, and may
 * change at any time. The node with lowest address rules
P
Per Liden 已提交
1440
 */
1441
static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1442
				struct sk_buff *buf)
P
Per Liden 已提交
1443 1444 1445
{
	u32 rec_gap = 0;
	u32 max_pkt_info;
1446
	u32 max_pkt_ack;
P
Per Liden 已提交
1447 1448 1449
	u32 msg_tol;
	struct tipc_msg *msg = buf_msg(buf);

1450 1451
	/* Discard protocol message during link changeover */
	if (l_ptr->exp_msg_count)
P
Per Liden 已提交
1452 1453
		goto exit;

1454
	if (l_ptr->net_plane != msg_net_plane(msg))
1455
		if (link_own_addr(l_ptr) > msg_prevnode(msg))
1456
			l_ptr->net_plane = msg_net_plane(msg);
P
Per Liden 已提交
1457 1458

	switch (msg_type(msg)) {
1459

P
Per Liden 已提交
1460
	case RESET_MSG:
1461 1462
		if (!link_working_unknown(l_ptr) &&
		    (l_ptr->peer_session != INVALID_SESSION)) {
1463 1464
			if (less_eq(msg_session(msg), l_ptr->peer_session))
				break; /* duplicate or old reset: ignore */
P
Per Liden 已提交
1465
		}
1466 1467 1468 1469 1470 1471 1472

		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
				link_working_unknown(l_ptr))) {
			/*
			 * peer has lost contact -- don't allow peer's links
			 * to reactivate before we recognize loss & clean up
			 */
1473
			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1474 1475
		}

1476 1477
		link_state_event(l_ptr, RESET_MSG);

P
Per Liden 已提交
1478 1479 1480 1481 1482
		/* fall thru' */
	case ACTIVATE_MSG:
		/* Update link settings according other endpoint's values */
		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));

1483 1484
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol > l_ptr->tolerance)
P
Per Liden 已提交
1485 1486 1487 1488 1489 1490
			link_set_supervision_props(l_ptr, msg_tol);

		if (msg_linkprio(msg) > l_ptr->priority)
			l_ptr->priority = msg_linkprio(msg);

		max_pkt_info = msg_max_pkt(msg);
1491
		if (max_pkt_info) {
P
Per Liden 已提交
1492 1493 1494 1495 1496
			if (max_pkt_info < l_ptr->max_pkt_target)
				l_ptr->max_pkt_target = max_pkt_info;
			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
				l_ptr->max_pkt = l_ptr->max_pkt_target;
		} else {
1497
			l_ptr->max_pkt = l_ptr->max_pkt_target;
P
Per Liden 已提交
1498 1499
		}

1500
		/* Synchronize broadcast link info, if not done previously */
1501 1502 1503 1504 1505 1506
		if (!tipc_node_is_up(l_ptr->owner)) {
			l_ptr->owner->bclink.last_sent =
				l_ptr->owner->bclink.last_in =
				msg_last_bcast(msg);
			l_ptr->owner->bclink.oos_state = 0;
		}
1507

P
Per Liden 已提交
1508 1509
		l_ptr->peer_session = msg_session(msg);
		l_ptr->peer_bearer_id = msg_bearer_id(msg);
1510 1511 1512

		if (msg_type(msg) == ACTIVATE_MSG)
			link_state_event(l_ptr, ACTIVATE_MSG);
P
Per Liden 已提交
1513 1514 1515
		break;
	case STATE_MSG:

1516 1517
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol)
P
Per Liden 已提交
1518
			link_set_supervision_props(l_ptr, msg_tol);
1519 1520

		if (msg_linkprio(msg) &&
P
Per Liden 已提交
1521
		    (msg_linkprio(msg) != l_ptr->priority)) {
1522 1523 1524
			pr_debug("%s<%s>, priority change %u->%u\n",
				 link_rst_msg, l_ptr->name,
				 l_ptr->priority, msg_linkprio(msg));
P
Per Liden 已提交
1525
			l_ptr->priority = msg_linkprio(msg);
1526
			tipc_link_reset(l_ptr); /* Enforce change to take effect */
P
Per Liden 已提交
1527 1528
			break;
		}
1529 1530 1531 1532

		/* Record reception; force mismatch at next timeout: */
		l_ptr->checkpoint--;

P
Per Liden 已提交
1533 1534 1535 1536 1537 1538
		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
		l_ptr->stats.recv_states++;
		if (link_reset_unknown(l_ptr))
			break;

		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1539
			rec_gap = mod(msg_next_sent(msg) -
P
Per Liden 已提交
1540 1541 1542 1543
				      mod(l_ptr->next_in_no));
		}

		max_pkt_ack = msg_max_pkt(msg);
1544 1545 1546 1547
		if (max_pkt_ack > l_ptr->max_pkt) {
			l_ptr->max_pkt = max_pkt_ack;
			l_ptr->max_pkt_probes = 0;
		}
P
Per Liden 已提交
1548 1549

		max_pkt_ack = 0;
1550
		if (msg_probe(msg)) {
P
Per Liden 已提交
1551
			l_ptr->stats.recv_probes++;
1552
			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1553 1554
				max_pkt_ack = msg_size(msg);
		}
P
Per Liden 已提交
1555 1556

		/* Protocol message before retransmits, reduce loss risk */
1557
		if (l_ptr->owner->bclink.recv_permitted)
1558
			tipc_bclink_update_link_state(l_ptr->owner,
1559
						      msg_last_bcast(msg));
P
Per Liden 已提交
1560 1561

		if (rec_gap || (msg_probe(msg))) {
1562 1563
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
					     0, max_pkt_ack);
P
Per Liden 已提交
1564 1565 1566
		}
		if (msg_seq_gap(msg)) {
			l_ptr->stats.recv_nacks++;
J
Jon Paul Maloy 已提交
1567
			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1568
					     msg_seq_gap(msg));
P
Per Liden 已提交
1569 1570 1571 1572
		}
		break;
	}
exit:
1573
	kfree_skb(buf);
P
Per Liden 已提交
1574 1575 1576
}


1577 1578
/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 * a different bearer. Owner node is locked.
P
Per Liden 已提交
1579
 */
1580 1581 1582 1583
static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
				  struct tipc_msg *tunnel_hdr,
				  struct tipc_msg *msg,
				  u32 selector)
P
Per Liden 已提交
1584
{
1585
	struct tipc_link *tunnel;
1586
	struct sk_buff *skb;
P
Per Liden 已提交
1587 1588 1589
	u32 length = msg_size(msg);

	tunnel = l_ptr->owner->active_links[selector & 1];
1590
	if (!tipc_link_is_up(tunnel)) {
1591
		pr_warn("%stunnel link no longer available\n", link_co_err);
P
Per Liden 已提交
1592
		return;
1593
	}
P
Per Liden 已提交
1594
	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1595 1596
	skb = tipc_buf_acquire(length + INT_H_SIZE);
	if (!skb) {
1597
		pr_warn("%sunable to send tunnel msg\n", link_co_err);
P
Per Liden 已提交
1598
		return;
1599
	}
1600 1601 1602
	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
	__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1603 1604 1605
}


1606 1607 1608 1609 1610
/* tipc_link_failover_send_queue(): A link has gone down, but a second
 * link is still active. We can do failover. Tunnel the failing link's
 * whole send queue via the remaining link. This way, we don't lose
 * any packets, and sequence order is preserved for subsequent traffic
 * sent over the remaining link. Owner node is locked.
P
Per Liden 已提交
1611
 */
1612
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
P
Per Liden 已提交
1613
{
J
Jon Paul Maloy 已提交
1614
	int msgcount;
1615
	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
P
Per Liden 已提交
1616
	struct tipc_msg tunnel_hdr;
1617
	struct sk_buff *skb;
1618
	int split_bundles;
P
Per Liden 已提交
1619 1620 1621 1622

	if (!tunnel)
		return;

1623
	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1624
		      ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
J
Jon Paul Maloy 已提交
1625
	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1626
	tipc_link_purge_backlog(l_ptr);
J
Jon Paul Maloy 已提交
1627
	msgcount = skb_queue_len(&l_ptr->transmq);
P
Per Liden 已提交
1628 1629
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
	msg_set_msgcnt(&tunnel_hdr, msgcount);
1630

J
Jon Paul Maloy 已提交
1631
	if (skb_queue_empty(&l_ptr->transmq)) {
1632 1633 1634
		skb = tipc_buf_acquire(INT_H_SIZE);
		if (skb) {
			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
P
Per Liden 已提交
1635
			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1636
			__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1637
		} else {
1638 1639
			pr_warn("%sunable to send changeover msg\n",
				link_co_err);
P
Per Liden 已提交
1640 1641 1642
		}
		return;
	}
1643

1644
	split_bundles = (l_ptr->owner->active_links[0] !=
1645 1646
			 l_ptr->owner->active_links[1]);

J
Jon Paul Maloy 已提交
1647
	skb_queue_walk(&l_ptr->transmq, skb) {
1648
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1649 1650 1651

		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
			struct tipc_msg *m = msg_get_wrapped(msg);
1652
			unchar *pos = (unchar *)m;
P
Per Liden 已提交
1653

1654
			msgcount = msg_msgcnt(msg);
P
Per Liden 已提交
1655
			while (msgcount--) {
1656
				msg_set_seqno(m, msg_seqno(msg));
1657 1658
				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
						      msg_link_selector(m));
P
Per Liden 已提交
1659 1660 1661 1662
				pos += align(msg_size(m));
				m = (struct tipc_msg *)pos;
			}
		} else {
1663 1664
			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
					      msg_link_selector(msg));
P
Per Liden 已提交
1665 1666 1667 1668
		}
	}
}

1669
/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1670 1671 1672 1673 1674 1675 1676 1677
 * duplicate of the first link's send queue via the new link. This way, we
 * are guaranteed that currently queued packets from a socket are delivered
 * before future traffic from the same socket, even if this is using the
 * new link. The last arriving copy of each duplicate packet is dropped at
 * the receiving end by the regular protocol check, so packet cardinality
 * and sequence order is preserved per sender/receiver socket pair.
 * Owner node is locked.
 */
J
Jon Paul Maloy 已提交
1678 1679
void tipc_link_dup_queue_xmit(struct tipc_link *link,
			      struct tipc_link *tnl)
P
Per Liden 已提交
1680
{
1681
	struct sk_buff *skb;
J
Jon Paul Maloy 已提交
1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
	struct tipc_msg tnl_hdr;
	struct sk_buff_head *queue = &link->transmq;
	int mcnt;

	tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL,
		      DUPLICATE_MSG, INT_H_SIZE, link->addr);
	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
	msg_set_msgcnt(&tnl_hdr, mcnt);
	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);

tunnel_queue:
	skb_queue_walk(queue, skb) {
1694 1695
		struct sk_buff *outskb;
		struct tipc_msg *msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
1696
		u32 len = msg_size(msg);
P
Per Liden 已提交
1697

J
Jon Paul Maloy 已提交
1698 1699 1700 1701
		msg_set_ack(msg, mod(link->next_in_no - 1));
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
		outskb = tipc_buf_acquire(len + INT_H_SIZE);
1702
		if (outskb == NULL) {
1703 1704
			pr_warn("%sunable to send duplicate msg\n",
				link_co_err);
P
Per Liden 已提交
1705 1706
			return;
		}
J
Jon Paul Maloy 已提交
1707 1708 1709 1710 1711
		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
					       skb->data, len);
		__tipc_link_xmit_skb(tnl, outskb);
		if (!tipc_link_is_up(link))
P
Per Liden 已提交
1712 1713
			return;
	}
J
Jon Paul Maloy 已提交
1714 1715 1716 1717
	if (queue == &link->backlogq)
		return;
	queue = &link->backlogq;
	goto tunnel_queue;
P
Per Liden 已提交
1718 1719
}

1720 1721 1722
/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
 * Owner node is locked.
 */
1723 1724
static void tipc_link_dup_rcv(struct tipc_link *link,
			      struct sk_buff *skb)
1725
{
1726 1727
	struct sk_buff *iskb;
	int pos = 0;
1728

1729
	if (!tipc_link_is_up(link))
1730 1731
		return;

1732
	if (!tipc_msg_extract(skb, &iskb, &pos)) {
1733 1734 1735
		pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
		return;
	}
1736 1737
	/* Append buffer to deferred queue, if applicable: */
	link_handle_out_of_seq_msg(link, iskb);
1738 1739
}

1740 1741 1742 1743 1744 1745 1746 1747 1748
/*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
 *  Owner node is locked.
 */
static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
					      struct sk_buff *t_buf)
{
	struct tipc_msg *t_msg = buf_msg(t_buf);
	struct sk_buff *buf = NULL;
	struct tipc_msg *msg;
1749
	int pos = 0;
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760

	if (tipc_link_is_up(l_ptr))
		tipc_link_reset(l_ptr);

	/* First failover packet? */
	if (l_ptr->exp_msg_count == START_CHANGEOVER)
		l_ptr->exp_msg_count = msg_msgcnt(t_msg);

	/* Should there be an inner packet? */
	if (l_ptr->exp_msg_count) {
		l_ptr->exp_msg_count--;
1761
		if (!tipc_msg_extract(t_buf, &buf, &pos)) {
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
			pr_warn("%sno inner failover pkt\n", link_co_err);
			goto exit;
		}
		msg = buf_msg(buf);

		if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
			kfree_skb(buf);
			buf = NULL;
			goto exit;
		}
		if (msg_user(msg) == MSG_FRAGMENTER) {
			l_ptr->stats.recv_fragments++;
1774
			tipc_buf_append(&l_ptr->reasm_buf, &buf);
1775 1776 1777
		}
	}
exit:
1778 1779
	if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
		tipc_link_delete(l_ptr);
1780 1781 1782
	return buf;
}

1783
/*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1784 1785 1786 1787
 *  via other link as result of a failover (ORIGINAL_MSG) or
 *  a new active link (DUPLICATE_MSG). Failover packets are
 *  returned to the active link for delivery upwards.
 *  Owner node is locked.
P
Per Liden 已提交
1788
 */
1789
static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1790
				struct sk_buff **buf)
P
Per Liden 已提交
1791
{
1792 1793 1794 1795
	struct sk_buff *t_buf = *buf;
	struct tipc_link *l_ptr;
	struct tipc_msg *t_msg = buf_msg(t_buf);
	u32 bearer_id = msg_bearer_id(t_msg);
P
Per Liden 已提交
1796

1797 1798
	*buf = NULL;

1799 1800
	if (bearer_id >= MAX_BEARERS)
		goto exit;
1801

1802 1803
	l_ptr = n_ptr->links[bearer_id];
	if (!l_ptr)
P
Per Liden 已提交
1804 1805
		goto exit;

1806
	if (msg_type(t_msg) == DUPLICATE_MSG)
1807
		tipc_link_dup_rcv(l_ptr, t_buf);
1808 1809
	else if (msg_type(t_msg) == ORIGINAL_MSG)
		*buf = tipc_link_failover_rcv(l_ptr, t_buf);
1810 1811
	else
		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
P
Per Liden 已提交
1812
exit:
1813
	kfree_skb(t_buf);
1814
	return *buf != NULL;
P
Per Liden 已提交
1815 1816
}

1817
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
P
Per Liden 已提交
1818
{
1819 1820 1821
	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;

	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1822 1823
		return;

1824 1825 1826
	l_ptr->tolerance = tol;
	l_ptr->cont_intv = msecs_to_jiffies(intv);
	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
P
Per Liden 已提交
1827 1828
}

1829
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
P
Per Liden 已提交
1830
{
1831 1832 1833
	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE);

	l->window = win;
1834 1835 1836 1837 1838
	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
P
Per Liden 已提交
1839 1840
}

1841
/* tipc_link_find_owner - locate owner node of link by link's name
1842
 * @net: the applicable net namespace
1843 1844
 * @name: pointer to link name string
 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1845
 *
1846
 * Returns pointer to node owning the link, or 0 if no matching link is found.
P
Per Liden 已提交
1847
 */
1848 1849
static struct tipc_node *tipc_link_find_owner(struct net *net,
					      const char *link_name,
1850
					      unsigned int *bearer_id)
P
Per Liden 已提交
1851
{
1852
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1853
	struct tipc_link *l_ptr;
1854
	struct tipc_node *n_ptr;
1855
	struct tipc_node *found_node = NULL;
1856
	int i;
P
Per Liden 已提交
1857

1858
	*bearer_id = 0;
1859
	rcu_read_lock();
1860
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1861
		tipc_node_lock(n_ptr);
1862 1863
		for (i = 0; i < MAX_BEARERS; i++) {
			l_ptr = n_ptr->links[i];
1864 1865 1866 1867 1868
			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
				*bearer_id = i;
				found_node = n_ptr;
				break;
			}
1869
		}
1870
		tipc_node_unlock(n_ptr);
1871 1872
		if (found_node)
			break;
1873
	}
1874 1875
	rcu_read_unlock();

1876
	return found_node;
P
Per Liden 已提交
1877 1878 1879 1880 1881 1882
}

/**
 * link_reset_statistics - reset link statistics
 * @l_ptr: pointer to link
 */
1883
static void link_reset_statistics(struct tipc_link *l_ptr)
P
Per Liden 已提交
1884 1885 1886 1887 1888 1889
{
	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
	l_ptr->stats.sent_info = l_ptr->next_out_no;
	l_ptr->stats.recv_info = l_ptr->next_in_no;
}

1890
static void link_print(struct tipc_link *l_ptr, const char *str)
P
Per Liden 已提交
1891
{
1892
	struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1893 1894 1895
	struct tipc_bearer *b_ptr;

	rcu_read_lock();
1896
	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1897 1898 1899
	if (b_ptr)
		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
	rcu_read_unlock();
1900

P
Per Liden 已提交
1901
	if (link_working_unknown(l_ptr))
1902
		pr_cont(":WU\n");
1903
	else if (link_reset_reset(l_ptr))
1904
		pr_cont(":RR\n");
1905
	else if (link_reset_unknown(l_ptr))
1906
		pr_cont(":RU\n");
1907
	else if (link_working_working(l_ptr))
1908 1909 1910
		pr_cont(":WW\n");
	else
		pr_cont("\n");
P
Per Liden 已提交
1911
}
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949

/* Parse and validate nested (link) properties valid for media, bearer and link
 */
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
	int err;

	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
			       tipc_nl_prop_policy);
	if (err)
		return err;

	if (props[TIPC_NLA_PROP_PRIO]) {
		u32 prio;

		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
		if (prio > TIPC_MAX_LINK_PRI)
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_TOL]) {
		u32 tol;

		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_WIN]) {
		u32 win;

		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
			return -EINVAL;
	}

	return 0;
}
1950

1951 1952 1953 1954 1955 1956 1957 1958 1959
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	int res = 0;
	int bearer_id;
	char *name;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1960
	struct net *net = sock_net(skb->sk);
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

1976
	node = tipc_link_find_owner(net, name, &bearer_id);
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

	link = node->links[bearer_id];
	if (!link) {
		res = -EINVAL;
		goto out;
	}

	if (attrs[TIPC_NLA_LINK_PROP]) {
		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
					      props);
		if (err) {
			res = err;
			goto out;
		}

		if (props[TIPC_NLA_PROP_TOL]) {
			u32 tol;

			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
			link_set_supervision_props(link, tol);
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
		}
		if (props[TIPC_NLA_PROP_PRIO]) {
			u32 prio;

			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
			link->priority = prio;
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
		}
		if (props[TIPC_NLA_PROP_WIN]) {
			u32 win;

			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
			tipc_link_set_queue_limits(link, win);
		}
	}

out:
	tipc_node_unlock(node);

	return res;
}
2025 2026

static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
{
	int i;
	struct nlattr *stats;

	struct nla_map {
		u32 key;
		u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
			s->msg_length_counts : 1},
		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
			(s->accu_queue_sz / s->queue_sz_counts) : 0}
	};

	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!stats)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, stats);

	return 0;
msg_full:
	nla_nest_cancel(skb, stats);

	return -EMSGSIZE;
}

/* Caller should hold appropriate locks to protect the link */
2092 2093
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
			      struct tipc_link *link)
2094 2095 2096 2097 2098
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
2099
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2100

2101
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2113
			tipc_cluster_mask(tn->own_addr)))
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
		goto attr_msg_full;

	if (tipc_link_is_up(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
			goto attr_msg_full;
	if (tipc_link_is_active(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
			goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2137
			link->window))
2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_stats(msg->skb, &link->stats);
	if (err)
		goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

/* Caller should hold node lock  */
2163 2164
static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
				    struct tipc_node *node, u32 *prev_link)
2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
{
	u32 i;
	int err;

	for (i = *prev_link; i < MAX_BEARERS; i++) {
		*prev_link = i;

		if (!node->links[i])
			continue;

2175
		err = __tipc_nl_add_link(net, msg, node->links[i]);
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
		if (err)
			return err;
	}
	*prev_link = 0;

	return 0;
}

int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
2186 2187
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
	struct tipc_node *node;
	struct tipc_nl_msg msg;
	u32 prev_node = cb->args[0];
	u32 prev_link = cb->args[1];
	int done = cb->args[2];
	int err;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();

	if (prev_node) {
2205
		node = tipc_node_find(net, prev_node);
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
		if (!node) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			goto out;
		}

2217 2218
		list_for_each_entry_continue_rcu(node, &tn->node_list,
						 list) {
2219
			tipc_node_lock(node);
2220 2221
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2222 2223 2224 2225 2226 2227 2228
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	} else {
2229
		err = tipc_nl_add_bc_link(net, &msg);
2230 2231 2232
		if (err)
			goto out;

2233
		list_for_each_entry_rcu(node, &tn->node_list, list) {
2234
			tipc_node_lock(node);
2235 2236
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	}
	done = 1;
out:
	rcu_read_unlock();

	cb->args[0] = prev_node;
	cb->args[1] = prev_link;
	cb->args[2] = done;

	return skb->len;
}

int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
2257
	struct net *net = genl_info_net(info);
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
	struct sk_buff *ans_skb;
	struct tipc_nl_msg msg;
	struct tipc_link *link;
	struct tipc_node *node;
	char *name;
	int bearer_id;
	int err;

	if (!info->attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2270
	node = tipc_link_find_owner(net, name, &bearer_id);
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
	if (!node)
		return -EINVAL;

	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!ans_skb)
		return -ENOMEM;

	msg.skb = ans_skb;
	msg.portid = info->snd_portid;
	msg.seq = info->snd_seq;

	tipc_node_lock(node);
	link = node->links[bearer_id];
	if (!link) {
		err = -EINVAL;
		goto err_out;
	}

2289
	err = __tipc_nl_add_link(net, &msg, link);
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
	if (err)
		goto err_out;

	tipc_node_unlock(node);

	return genlmsg_reply(ans_skb, info);

err_out:
	tipc_node_unlock(node);
	nlmsg_free(ans_skb);

	return err;
}
2303 2304 2305 2306 2307 2308 2309 2310 2311

int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	char *link_name;
	unsigned int bearer_id;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2312
	struct net *net = sock_net(skb->sk);
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	if (strcmp(link_name, tipc_bclink_name) == 0) {
2329
		err = tipc_bclink_reset_stats(net);
2330 2331 2332 2333 2334
		if (err)
			return err;
		return 0;
	}

2335
	node = tipc_link_find_owner(net, link_name, &bearer_id);
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

	link = node->links[bearer_id];
	if (!link) {
		tipc_node_unlock(node);
		return -EINVAL;
	}

	link_reset_statistics(link);

	tipc_node_unlock(node);

	return 0;
}