link.c 72.8 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/link.c: TIPC link code
3
 *
4
 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5
 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37 38
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
#include "link.h"
39
#include "bcast.h"
40
#include "socket.h"
P
Per Liden 已提交
41 42 43
#include "name_distr.h"
#include "discover.h"
#include "config.h"
44
#include "netlink.h"
P
Per Liden 已提交
45

46 47
#include <linux/pkt_sched.h>

48 49 50 51 52 53
/*
 * Error message prefixes
 */
static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
P
Per Liden 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_LINK_NAME] = {
		.type = NLA_STRING,
		.len = TIPC_MAX_LINK_NAME
	},
	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
};

71 72 73 74 75 76 77 78
/* Properties valid for media, bearar and link */
static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
};

79 80 81 82 83
/*
 * Out-of-range value for link session numbers
 */
#define INVALID_SESSION 0x10000

84 85
/*
 * Link state events:
P
Per Liden 已提交
86 87 88 89 90
 */
#define  STARTING_EVT    856384768	/* link processing trigger */
#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
#define  TIMEOUT_EVT     560817u	/* link timer expired */

91 92 93
/*
 * The following two 'message types' is really just implementation
 * data conveniently stored in the message header.
P
Per Liden 已提交
94 95 96 97 98
 * They must not be considered part of the protocol
 */
#define OPEN_MSG   0
#define CLOSED_MSG 1

99
/*
P
Per Liden 已提交
100 101 102 103
 * State value stored in 'exp_msg_count'
 */
#define START_CHANGEOVER 100000u

104 105
static void link_handle_out_of_seq_msg(struct net *net,
				       struct tipc_link *l_ptr,
P
Per Liden 已提交
106
				       struct sk_buff *buf);
107 108 109
static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
				struct sk_buff *buf);
static int  tipc_link_tunnel_rcv(struct net *net, struct tipc_node *n_ptr,
110
				 struct sk_buff **buf);
111
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
112 113 114
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
115 116
static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
117 118
static int tipc_link_input(struct net *net, struct tipc_link *l,
			   struct sk_buff *buf);
119 120
static int tipc_link_prepare_input(struct net *net, struct tipc_link *l,
				   struct sk_buff **buf);
121

P
Per Liden 已提交
122
/*
S
Sam Ravnborg 已提交
123
 *  Simple link routines
P
Per Liden 已提交
124
 */
S
Sam Ravnborg 已提交
125
static unsigned int align(unsigned int i)
P
Per Liden 已提交
126 127 128 129
{
	return (i + 3) & ~3u;
}

130
static void link_init_max_pkt(struct tipc_link *l_ptr)
P
Per Liden 已提交
131
{
132
	struct tipc_bearer *b_ptr;
P
Per Liden 已提交
133
	u32 max_pkt;
134

135 136 137 138 139 140 141 142 143
	rcu_read_lock();
	b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
	if (!b_ptr) {
		rcu_read_unlock();
		return;
	}
	max_pkt = (b_ptr->mtu & ~3);
	rcu_read_unlock();

P
Per Liden 已提交
144 145 146
	if (max_pkt > MAX_MSG_SIZE)
		max_pkt = MAX_MSG_SIZE;

147
	l_ptr->max_pkt_target = max_pkt;
P
Per Liden 已提交
148 149
	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
		l_ptr->max_pkt = l_ptr->max_pkt_target;
150
	else
P
Per Liden 已提交
151 152
		l_ptr->max_pkt = MAX_PKT_DEFAULT;

153
	l_ptr->max_pkt_probes = 0;
P
Per Liden 已提交
154 155 156
}

/*
S
Sam Ravnborg 已提交
157
 *  Simple non-static link routines (i.e. referenced outside this file)
P
Per Liden 已提交
158
 */
159
int tipc_link_is_up(struct tipc_link *l_ptr)
P
Per Liden 已提交
160 161 162
{
	if (!l_ptr)
		return 0;
E
Eric Dumazet 已提交
163
	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
P
Per Liden 已提交
164 165
}

166
int tipc_link_is_active(struct tipc_link *l_ptr)
P
Per Liden 已提交
167
{
E
Eric Dumazet 已提交
168 169
	return	(l_ptr->owner->active_links[0] == l_ptr) ||
		(l_ptr->owner->active_links[1] == l_ptr);
P
Per Liden 已提交
170 171 172 173 174 175
}

/**
 * link_timeout - handle expiration of link timer
 * @l_ptr: pointer to link
 */
176
static void link_timeout(unsigned long data)
P
Per Liden 已提交
177
{
178
	struct tipc_link *l_ptr = (struct tipc_link *)data;
179 180
	struct sk_buff *skb;

181
	tipc_node_lock(l_ptr->owner);
P
Per Liden 已提交
182 183

	/* update counters used in statistical profiling of send traffic */
184
	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
P
Per Liden 已提交
185 186
	l_ptr->stats.queue_sz_counts++;

187 188 189
	skb = skb_peek(&l_ptr->outqueue);
	if (skb) {
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
190 191
		u32 length = msg_size(msg);

192 193
		if ((msg_user(msg) == MSG_FRAGMENTER) &&
		    (msg_type(msg) == FIRST_FRAGMENT)) {
P
Per Liden 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
			length = msg_size(msg_get_wrapped(msg));
		}
		if (length) {
			l_ptr->stats.msg_lengths_total += length;
			l_ptr->stats.msg_length_counts++;
			if (length <= 64)
				l_ptr->stats.msg_length_profile[0]++;
			else if (length <= 256)
				l_ptr->stats.msg_length_profile[1]++;
			else if (length <= 1024)
				l_ptr->stats.msg_length_profile[2]++;
			else if (length <= 4096)
				l_ptr->stats.msg_length_profile[3]++;
			else if (length <= 16384)
				l_ptr->stats.msg_length_profile[4]++;
			else if (length <= 32768)
				l_ptr->stats.msg_length_profile[5]++;
			else
				l_ptr->stats.msg_length_profile[6]++;
		}
	}

	/* do all other link processing performed on a periodic basis */
	link_state_event(l_ptr, TIMEOUT_EVT);

	if (l_ptr->next_out)
220
		tipc_link_push_packets(l_ptr);
P
Per Liden 已提交
221

222
	tipc_node_unlock(l_ptr->owner);
P
Per Liden 已提交
223 224
}

225
static void link_set_timer(struct tipc_link *link, unsigned long time)
P
Per Liden 已提交
226
{
227
	mod_timer(&link->timer, jiffies + time);
P
Per Liden 已提交
228 229 230
}

/**
231
 * tipc_link_create - create a new link
232
 * @n_ptr: pointer to associated node
P
Per Liden 已提交
233 234
 * @b_ptr: pointer to associated bearer
 * @media_addr: media address to use when sending messages over link
235
 *
P
Per Liden 已提交
236 237
 * Returns pointer to link.
 */
238
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
239 240
				   struct tipc_bearer *b_ptr,
				   const struct tipc_media_addr *media_addr)
P
Per Liden 已提交
241
{
242
	struct tipc_link *l_ptr;
P
Per Liden 已提交
243 244
	struct tipc_msg *msg;
	char *if_name;
245 246 247
	char addr_string[16];
	u32 peer = n_ptr->addr;

248
	if (n_ptr->link_cnt >= MAX_BEARERS) {
249
		tipc_addr_string_fill(addr_string, n_ptr->addr);
250 251
		pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
			n_ptr->link_cnt, addr_string, MAX_BEARERS);
252 253 254 255 256
		return NULL;
	}

	if (n_ptr->links[b_ptr->identity]) {
		tipc_addr_string_fill(addr_string, n_ptr->addr);
257 258
		pr_err("Attempt to establish second link on <%s> to %s\n",
		       b_ptr->name, addr_string);
259 260
		return NULL;
	}
P
Per Liden 已提交
261

262
	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
P
Per Liden 已提交
263
	if (!l_ptr) {
264
		pr_warn("Link creation failed, no memory\n");
P
Per Liden 已提交
265 266 267 268
		return NULL;
	}

	l_ptr->addr = peer;
269
	if_name = strchr(b_ptr->name, ':') + 1;
270
	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
P
Per Liden 已提交
271
		tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
272
		tipc_node(tipc_own_addr),
P
Per Liden 已提交
273 274
		if_name,
		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
275
		/* note: peer i/f name is updated by reset/activate message */
P
Per Liden 已提交
276
	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
277
	l_ptr->owner = n_ptr;
P
Per Liden 已提交
278
	l_ptr->checkpoint = 1;
279
	l_ptr->peer_session = INVALID_SESSION;
280
	l_ptr->bearer_id = b_ptr->identity;
281
	link_set_supervision_props(l_ptr, b_ptr->tolerance);
P
Per Liden 已提交
282 283 284 285
	l_ptr->state = RESET_UNKNOWN;

	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
	msg = l_ptr->pmsg;
286
	tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
P
Per Liden 已提交
287
	msg_set_size(msg, sizeof(l_ptr->proto_msg));
288
	msg_set_session(msg, (tipc_random & 0xffff));
P
Per Liden 已提交
289 290 291 292
	msg_set_bearer_id(msg, b_ptr->identity);
	strcpy((char *)msg_data(msg), if_name);

	l_ptr->priority = b_ptr->priority;
293
	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
P
Per Liden 已提交
294

295
	l_ptr->net_plane = b_ptr->net_plane;
P
Per Liden 已提交
296 297 298
	link_init_max_pkt(l_ptr);

	l_ptr->next_out_no = 1;
299
	__skb_queue_head_init(&l_ptr->outqueue);
300
	__skb_queue_head_init(&l_ptr->deferred_queue);
301
	skb_queue_head_init(&l_ptr->waiting_sks);
P
Per Liden 已提交
302 303 304

	link_reset_statistics(l_ptr);

305
	tipc_node_attach_link(n_ptr, l_ptr);
P
Per Liden 已提交
306

307
	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
308 309

	link_state_event(l_ptr, STARTING_EVT);
P
Per Liden 已提交
310 311 312 313

	return l_ptr;
}

314 315
void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
			   bool shutting_down)
316
{
317
	struct tipc_net *tn = net_generic(net, tipc_net_id);
318
	struct tipc_link *l_ptr;
319
	struct tipc_node *n_ptr;
320

321
	rcu_read_lock();
322
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
323
		tipc_node_lock(n_ptr);
324 325 326
		l_ptr = n_ptr->links[bearer_id];
		if (l_ptr) {
			tipc_link_reset(l_ptr);
327 328 329
			if (shutting_down || !tipc_node_is_up(n_ptr)) {
				tipc_node_detach_link(l_ptr->owner, l_ptr);
				tipc_link_reset_fragments(l_ptr);
330
				tipc_node_unlock(n_ptr);
331 332 333 334 335 336 337

				/* Nobody else can access this link now: */
				del_timer_sync(&l_ptr->timer);
				kfree(l_ptr);
			} else {
				/* Detach/delete when failover is finished: */
				l_ptr->flags |= LINK_STOPPED;
338
				tipc_node_unlock(n_ptr);
339 340
				del_timer_sync(&l_ptr->timer);
			}
341 342
			continue;
		}
343
		tipc_node_unlock(n_ptr);
344
	}
345
	rcu_read_unlock();
346
}
P
Per Liden 已提交
347 348

/**
349 350 351 352 353 354
 * link_schedule_user - schedule user for wakeup after congestion
 * @link: congested link
 * @oport: sending port
 * @chain_sz: size of buffer chain that was attempted sent
 * @imp: importance of message attempted sent
 * Create pseudo msg to send back to user when congestion abates
P
Per Liden 已提交
355
 */
356 357
static bool link_schedule_user(struct tipc_link *link, u32 oport,
			       uint chain_sz, uint imp)
P
Per Liden 已提交
358
{
359 360 361 362 363 364 365 366
	struct sk_buff *buf;

	buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
			      tipc_own_addr, oport, 0, 0);
	if (!buf)
		return false;
	TIPC_SKB_CB(buf)->chain_sz = chain_sz;
	TIPC_SKB_CB(buf)->chain_imp = imp;
367
	skb_queue_tail(&link->waiting_sks, buf);
368 369
	link->stats.link_congs++;
	return true;
P
Per Liden 已提交
370 371
}

372 373 374 375 376 377 378
/**
 * link_prepare_wakeup - prepare users for wakeup after congestion
 * @link: congested link
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to node wait queue for wakeup
 */
static void link_prepare_wakeup(struct tipc_link *link)
P
Per Liden 已提交
379
{
380
	uint pend_qsz = skb_queue_len(&link->outqueue);
381
	struct sk_buff *skb, *tmp;
382

383 384
	skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
		if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
P
Per Liden 已提交
385
			break;
386
		pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
387 388
		skb_unlink(skb, &link->waiting_sks);
		skb_queue_tail(&link->owner->waiting_sks, skb);
P
Per Liden 已提交
389 390 391 392
	}
}

/**
393
 * tipc_link_reset_fragments - purge link's inbound message fragments queue
P
Per Liden 已提交
394 395
 * @l_ptr: pointer to link
 */
396
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
P
Per Liden 已提交
397
{
398 399
	kfree_skb(l_ptr->reasm_buf);
	l_ptr->reasm_buf = NULL;
P
Per Liden 已提交
400 401
}

402
/**
403
 * tipc_link_purge_queues - purge all pkt queues associated with link
P
Per Liden 已提交
404 405
 * @l_ptr: pointer to link
 */
406
void tipc_link_purge_queues(struct tipc_link *l_ptr)
P
Per Liden 已提交
407
{
408
	__skb_queue_purge(&l_ptr->deferred_queue);
409
	__skb_queue_purge(&l_ptr->outqueue);
410
	tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
411 412
}

413
void tipc_link_reset(struct tipc_link *l_ptr)
P
Per Liden 已提交
414 415 416
{
	u32 prev_state = l_ptr->state;
	u32 checkpoint = l_ptr->next_in_no;
417
	int was_active_link = tipc_link_is_active(l_ptr);
418
	struct tipc_node *owner = l_ptr->owner;
419

420
	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
P
Per Liden 已提交
421

422 423
	/* Link is down, accept any session */
	l_ptr->peer_session = INVALID_SESSION;
P
Per Liden 已提交
424

425
	/* Prepare for max packet size negotiation */
P
Per Liden 已提交
426
	link_init_max_pkt(l_ptr);
427

P
Per Liden 已提交
428 429 430 431 432
	l_ptr->state = RESET_UNKNOWN;

	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
		return;

433
	tipc_node_link_down(l_ptr->owner, l_ptr);
434
	tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
435

436
	if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
P
Per Liden 已提交
437 438 439 440 441
		l_ptr->reset_checkpoint = checkpoint;
		l_ptr->exp_msg_count = START_CHANGEOVER;
	}

	/* Clean up all queues: */
442
	__skb_queue_purge(&l_ptr->outqueue);
443
	__skb_queue_purge(&l_ptr->deferred_queue);
444 445 446 447
	if (!skb_queue_empty(&l_ptr->waiting_sks)) {
		skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
		owner->action_flags |= TIPC_WAKEUP_USERS;
	}
P
Per Liden 已提交
448 449 450 451 452 453 454 455 456
	l_ptr->next_out = NULL;
	l_ptr->unacked_window = 0;
	l_ptr->checkpoint = 1;
	l_ptr->next_out_no = 1;
	l_ptr->fsm_msg_cnt = 0;
	l_ptr->stale_count = 0;
	link_reset_statistics(l_ptr);
}

457
void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
458
{
459
	struct tipc_net *tn = net_generic(net, tipc_net_id);
460
	struct tipc_link *l_ptr;
461
	struct tipc_node *n_ptr;
462

463
	rcu_read_lock();
464
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
465
		tipc_node_lock(n_ptr);
466 467 468
		l_ptr = n_ptr->links[bearer_id];
		if (l_ptr)
			tipc_link_reset(l_ptr);
469
		tipc_node_unlock(n_ptr);
470
	}
471
	rcu_read_unlock();
472
}
P
Per Liden 已提交
473

474
static void link_activate(struct tipc_link *l_ptr)
P
Per Liden 已提交
475
{
476
	l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
477
	tipc_node_link_up(l_ptr->owner, l_ptr);
478
	tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
P
Per Liden 已提交
479 480 481 482 483 484 485
}

/**
 * link_state_event - link finite state machine
 * @l_ptr: pointer to link
 * @event: state machine event to process
 */
486
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
P
Per Liden 已提交
487
{
488
	struct tipc_link *other;
489
	unsigned long cont_intv = l_ptr->cont_intv;
P
Per Liden 已提交
490

491 492 493
	if (l_ptr->flags & LINK_STOPPED)
		return;

494
	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
P
Per Liden 已提交
495 496
		return;		/* Not yet. */

497 498
	/* Check whether changeover is going on */
	if (l_ptr->exp_msg_count) {
499
		if (event == TIMEOUT_EVT)
P
Per Liden 已提交
500
			link_set_timer(l_ptr, cont_intv);
501
		return;
P
Per Liden 已提交
502 503 504 505 506 507 508 509 510 511 512
	}

	switch (l_ptr->state) {
	case WORKING_WORKING:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			break;
		case TIMEOUT_EVT:
			if (l_ptr->next_in_no != l_ptr->checkpoint) {
				l_ptr->checkpoint = l_ptr->next_in_no;
513
				if (tipc_bclink_acks_missing(l_ptr->owner)) {
514 515
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     0, 0, 0, 0, 0);
P
Per Liden 已提交
516 517
					l_ptr->fsm_msg_cnt++;
				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
518 519
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     1, 0, 0, 0, 0);
P
Per Liden 已提交
520 521 522 523 524 525 526
					l_ptr->fsm_msg_cnt++;
				}
				link_set_timer(l_ptr, cont_intv);
				break;
			}
			l_ptr->state = WORKING_UNKNOWN;
			l_ptr->fsm_msg_cnt = 0;
527
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
528 529 530 531
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv / 4);
			break;
		case RESET_MSG:
532 533
			pr_info("%s<%s>, requested by peer\n", link_rst_msg,
				l_ptr->name);
534
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
535 536
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
537 538
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
539 540 541 542
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
543
			pr_err("%s%u in WW state\n", link_unk_evt, event);
P
Per Liden 已提交
544 545 546 547 548 549 550 551 552 553 554
		}
		break;
	case WORKING_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
555 556
			pr_info("%s<%s>, requested by peer while probing\n",
				link_rst_msg, l_ptr->name);
557
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
558 559
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
560 561
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
562 563 564 565 566 567 568 569
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		case TIMEOUT_EVT:
			if (l_ptr->next_in_no != l_ptr->checkpoint) {
				l_ptr->state = WORKING_WORKING;
				l_ptr->fsm_msg_cnt = 0;
				l_ptr->checkpoint = l_ptr->next_in_no;
570
				if (tipc_bclink_acks_missing(l_ptr->owner)) {
571 572
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     0, 0, 0, 0, 0);
P
Per Liden 已提交
573 574 575 576
					l_ptr->fsm_msg_cnt++;
				}
				link_set_timer(l_ptr, cont_intv);
			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
577 578
				tipc_link_proto_xmit(l_ptr, STATE_MSG,
						     1, 0, 0, 0, 0);
P
Per Liden 已提交
579 580 581
				l_ptr->fsm_msg_cnt++;
				link_set_timer(l_ptr, cont_intv / 4);
			} else {	/* Link has failed */
582 583
				pr_warn("%s<%s>, peer not responding\n",
					link_rst_msg, l_ptr->name);
584
				tipc_link_reset(l_ptr);
P
Per Liden 已提交
585 586
				l_ptr->state = RESET_UNKNOWN;
				l_ptr->fsm_msg_cnt = 0;
587 588
				tipc_link_proto_xmit(l_ptr, RESET_MSG,
						     0, 0, 0, 0, 0);
P
Per Liden 已提交
589 590 591 592 593
				l_ptr->fsm_msg_cnt++;
				link_set_timer(l_ptr, cont_intv);
			}
			break;
		default:
594
			pr_err("%s%u in WU state\n", link_unk_evt, event);
P
Per Liden 已提交
595 596 597 598 599 600 601 602
		}
		break;
	case RESET_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
			break;
		case ACTIVATE_MSG:
			other = l_ptr->owner->active_links[0];
603
			if (other && link_working_unknown(other))
P
Per Liden 已提交
604 605 606 607
				break;
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_activate(l_ptr);
608
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
609
			l_ptr->fsm_msg_cnt++;
610
			if (l_ptr->owner->working_links == 1)
611
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
612 613 614 615 616
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
617 618
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     1, 0, 0, 0, 0);
P
Per Liden 已提交
619 620 621 622
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		case STARTING_EVT:
623
			l_ptr->flags |= LINK_STARTED;
P
Per Liden 已提交
624 625
			/* fall through */
		case TIMEOUT_EVT:
626
			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
P
Per Liden 已提交
627 628 629 630
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
631
			pr_err("%s%u in RU state\n", link_unk_evt, event);
P
Per Liden 已提交
632 633 634 635 636 637 638
		}
		break;
	case RESET_RESET:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			other = l_ptr->owner->active_links[0];
639
			if (other && link_working_unknown(other))
P
Per Liden 已提交
640 641 642 643
				break;
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_activate(l_ptr);
644
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
645
			l_ptr->fsm_msg_cnt++;
646
			if (l_ptr->owner->working_links == 1)
647
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
648 649 650 651 652
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
			break;
		case TIMEOUT_EVT:
653 654
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
655 656 657 658
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
659
			pr_err("%s%u in RR state\n", link_unk_evt, event);
P
Per Liden 已提交
660 661 662
		}
		break;
	default:
663
		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
P
Per Liden 已提交
664 665 666
	}
}

667 668 669 670 671 672 673
/* tipc_link_cong: determine return value and how to treat the
 * sent buffer during link congestion.
 * - For plain, errorless user data messages we keep the buffer and
 *   return -ELINKONG.
 * - For all other messages we discard the buffer and return -EHOSTUNREACH
 * - For TIPC internal messages we also reset the link
 */
674
static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
675
{
676 677
	struct sk_buff *skb = skb_peek(list);
	struct tipc_msg *msg = buf_msg(skb);
678 679 680
	uint imp = tipc_msg_tot_importance(msg);
	u32 oport = msg_tot_origport(msg);

681
	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
682 683
		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
		tipc_link_reset(link);
684
		goto drop;
685
	}
686 687 688 689
	if (unlikely(msg_errcode(msg)))
		goto drop;
	if (unlikely(msg_reroute_cnt(msg)))
		goto drop;
690
	if (TIPC_SKB_CB(skb)->wakeup_pending)
691
		return -ELINKCONG;
692
	if (link_schedule_user(link, oport, skb_queue_len(list), imp))
693 694
		return -ELINKCONG;
drop:
695
	__skb_queue_purge(list);
696 697 698 699
	return -EHOSTUNREACH;
}

/**
700
 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
701
 * @link: link to use
702 703
 * @list: chain of buffers containing message
 *
704 705 706 707 708 709
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
 * user data messages) or -EHOSTUNREACH (all other messages/senders)
 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
 * to act on the return value, since they may need to do more send attempts.
 */
710
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
711
{
712
	struct tipc_msg *msg = buf_msg(skb_peek(list));
713 714 715 716 717 718 719 720
	uint psz = msg_size(msg);
	uint sndlim = link->queue_limit[0];
	uint imp = tipc_msg_tot_importance(msg);
	uint mtu = link->max_pkt;
	uint ack = mod(link->next_in_no - 1);
	uint seqno = link->next_out_no;
	uint bc_last_in = link->owner->bclink.last_in;
	struct tipc_media_addr *addr = &link->media_addr;
721
	struct sk_buff_head *outqueue = &link->outqueue;
722
	struct sk_buff *skb, *tmp;
723 724

	/* Match queue limits against msg importance: */
725
	if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
726
		return tipc_link_cong(link, list);
727 728 729

	/* Has valid packet limit been used ? */
	if (unlikely(psz > mtu)) {
730
		__skb_queue_purge(list);
731 732 733 734
		return -EMSGSIZE;
	}

	/* Prepare each packet for sending, and add to outqueue: */
735 736
	skb_queue_walk_safe(list, skb, tmp) {
		__skb_unlink(skb, list);
737
		msg = buf_msg(skb);
738 739 740
		msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
		msg_set_bcast_ack(msg, bc_last_in);

741 742 743 744 745 746
		if (skb_queue_len(outqueue) < sndlim) {
			__skb_queue_tail(outqueue, skb);
			tipc_bearer_send(link->bearer_id, skb, addr);
			link->next_out = NULL;
			link->unacked_window = 0;
		} else if (tipc_msg_bundle(outqueue, skb, mtu)) {
747 748
			link->stats.sent_bundled++;
			continue;
749 750
		} else if (tipc_msg_make_bundle(outqueue, skb, mtu,
						link->addr)) {
751 752 753
			link->stats.sent_bundled++;
			link->stats.sent_bundles++;
			if (!link->next_out)
754
				link->next_out = skb_peek_tail(outqueue);
755
		} else {
756
			__skb_queue_tail(outqueue, skb);
757
			if (!link->next_out)
758
				link->next_out = skb;
759 760 761 762 763 764 765
		}
		seqno++;
	}
	link->next_out_no = seqno;
	return 0;
}

766 767 768 769 770 771 772 773 774 775 776 777 778 779
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
	__skb_queue_head_init(list);
	__skb_queue_tail(list, skb);
}

static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
	return __tipc_link_xmit(link, &head);
}

780 781
int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
		       u32 selector)
782 783 784 785
{
	struct sk_buff_head head;

	skb2list(skb, &head);
786
	return tipc_link_xmit(net, &head, dnode, selector);
787 788
}

789
/**
790
 * tipc_link_xmit() is the general link level function for message sending
791
 * @net: the applicable net namespace
792
 * @list: chain of buffers containing message
793 794 795 796 797 798
 * @dsz: amount of user data to be sent
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
799 800
int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
		   u32 selector)
801 802 803 804 805
{
	struct tipc_link *link = NULL;
	struct tipc_node *node;
	int rc = -EHOSTUNREACH;

806
	node = tipc_node_find(net, dnode);
807 808 809 810
	if (node) {
		tipc_node_lock(node);
		link = node->active_links[selector & 1];
		if (link)
811
			rc = __tipc_link_xmit(link, list);
812 813 814 815 816 817
		tipc_node_unlock(node);
	}

	if (link)
		return rc;

818 819 820 821 822
	if (likely(in_own_node(dnode))) {
		/* As a node local message chain never contains more than one
		 * buffer, we just need to dequeue one SKB buffer from the
		 * head list.
		 */
823
		return tipc_sk_rcv(net, __skb_dequeue(list));
824 825
	}
	__skb_queue_purge(list);
826 827 828 829

	return rc;
}

830
/*
831
 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
832 833 834 835 836 837
 *
 * Give a newly added peer node the sequence number where it should
 * start receiving and acking broadcast packets.
 *
 * Called with node locked
 */
838
static void tipc_link_sync_xmit(struct tipc_link *link)
839
{
840
	struct sk_buff *skb;
841 842
	struct tipc_msg *msg;

843 844
	skb = tipc_buf_acquire(INT_H_SIZE);
	if (!skb)
845 846
		return;

847
	msg = buf_msg(skb);
848 849
	tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
	msg_set_last_bcast(msg, link->owner->bclink.acked);
850
	__tipc_link_xmit_skb(link, skb);
851 852 853
}

/*
854
 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
855 856 857 858 859 860
 * Receive the sequence number where we should start receiving and
 * acking broadcast packets from a newly added peer node, and open
 * up for reception of such packets.
 *
 * Called with node locked
 */
861
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
862 863 864 865 866 867 868 869
{
	struct tipc_msg *msg = buf_msg(buf);

	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
	n->bclink.recv_permitted = true;
	kfree_skb(buf);
}

870 871 872 873 874 875 876 877
struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
				    const struct sk_buff *skb)
{
	if (skb_queue_is_last(list, skb))
		return NULL;
	return skb->next;
}

878
/*
879 880 881 882 883 884
 * tipc_link_push_packets - push unsent packets to bearer
 *
 * Push out the unsent messages of a link where congestion
 * has abated. Node is locked.
 *
 * Called with node locked
P
Per Liden 已提交
885
 */
886
void tipc_link_push_packets(struct tipc_link *l_ptr)
P
Per Liden 已提交
887
{
888 889
	struct sk_buff_head *outqueue = &l_ptr->outqueue;
	struct sk_buff *skb = l_ptr->next_out;
890 891
	struct tipc_msg *msg;
	u32 next, first;
P
Per Liden 已提交
892

893
	skb_queue_walk_from(outqueue, skb) {
894 895
		msg = buf_msg(skb);
		next = msg_seqno(msg);
896
		first = buf_seqno(skb_peek(outqueue));
P
Per Liden 已提交
897 898 899

		if (mod(next - first) < l_ptr->queue_limit[0]) {
			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
900
			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
901
			if (msg_user(msg) == MSG_BUNDLER)
902
				TIPC_SKB_CB(skb)->bundling = false;
903 904
			tipc_bearer_send(l_ptr->bearer_id, skb,
					 &l_ptr->media_addr);
905
			l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
906 907
		} else {
			break;
P
Per Liden 已提交
908 909 910 911
		}
	}
}

912
void tipc_link_reset_all(struct tipc_node *node)
913 914 915 916
{
	char addr_string[16];
	u32 i;

917
	tipc_node_lock(node);
918

919
	pr_warn("Resetting all links to %s\n",
920
		tipc_addr_string_fill(addr_string, node->addr));
921 922

	for (i = 0; i < MAX_BEARERS; i++) {
923 924 925
		if (node->links[i]) {
			link_print(node->links[i], "Resetting link\n");
			tipc_link_reset(node->links[i]);
926 927 928
		}
	}

929
	tipc_node_unlock(node);
930 931
}

932
static void link_retransmit_failure(struct tipc_link *l_ptr,
933
				    struct sk_buff *buf)
934 935 936
{
	struct tipc_msg *msg = buf_msg(buf);

937
	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
938 939 940

	if (l_ptr->addr) {
		/* Handle failure on standard link */
941
		link_print(l_ptr, "Resetting link\n");
942 943 944 945
		tipc_link_reset(l_ptr);

	} else {
		/* Handle failure on broadcast link */
946
		struct tipc_node *n_ptr;
947 948
		char addr_string[16];

949 950 951
		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
		pr_cont("Outstanding acks: %lu\n",
			(unsigned long) TIPC_SKB_CB(buf)->handle);
J
Jeff Garzik 已提交
952

953
		n_ptr = tipc_bclink_retransmit_to();
954 955
		tipc_node_lock(n_ptr);

956
		tipc_addr_string_fill(addr_string, n_ptr->addr);
957
		pr_info("Broadcast link info for %s\n", addr_string);
958 959
		pr_info("Reception permitted: %d,  Acked: %u\n",
			n_ptr->bclink.recv_permitted,
960 961 962 963 964
			n_ptr->bclink.acked);
		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
			n_ptr->bclink.last_in,
			n_ptr->bclink.oos_state,
			n_ptr->bclink.last_sent);
965 966 967

		tipc_node_unlock(n_ptr);

968
		tipc_bclink_set_flags(TIPC_BCLINK_RESET);
969 970 971 972
		l_ptr->stale_count = 0;
	}
}

973
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
974
			  u32 retransmits)
P
Per Liden 已提交
975 976 977
{
	struct tipc_msg *msg;

978
	if (!skb)
979 980
		return;

981
	msg = buf_msg(skb);
982

983 984 985
	/* Detect repeated retransmit failures */
	if (l_ptr->last_retransmitted == msg_seqno(msg)) {
		if (++l_ptr->stale_count > 100) {
986
			link_retransmit_failure(l_ptr, skb);
987
			return;
988 989
		}
	} else {
990 991
		l_ptr->last_retransmitted = msg_seqno(msg);
		l_ptr->stale_count = 1;
P
Per Liden 已提交
992
	}
993

994 995 996 997
	skb_queue_walk_from(&l_ptr->outqueue, skb) {
		if (!retransmits || skb == l_ptr->next_out)
			break;
		msg = buf_msg(skb);
P
Per Liden 已提交
998
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
999
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1000
		tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
1001 1002
		retransmits--;
		l_ptr->stats.retransmitted++;
P
Per Liden 已提交
1003 1004 1005
	}
}

1006 1007
static void link_retrieve_defq(struct tipc_link *link,
			       struct sk_buff_head *list)
P
Per Liden 已提交
1008 1009 1010
{
	u32 seq_no;

1011 1012 1013 1014 1015 1016
	if (skb_queue_empty(&link->deferred_queue))
		return;

	seq_no = buf_seqno(skb_peek(&link->deferred_queue));
	if (seq_no == mod(link->next_in_no))
		skb_queue_splice_tail_init(&link->deferred_queue, list);
P
Per Liden 已提交
1017 1018
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
/**
 * link_recv_buf_validate - validate basic format of received message
 *
 * This routine ensures a TIPC message has an acceptable header, and at least
 * as much data as the header indicates it should.  The routine also ensures
 * that the entire message header is stored in the main fragment of the message
 * buffer, to simplify future access to message header fields.
 *
 * Note: Having extra info present in the message header or data areas is OK.
 * TIPC will ignore the excess, under the assumption that it is optional info
 * introduced by a later release of the protocol.
 */
static int link_recv_buf_validate(struct sk_buff *buf)
{
	static u32 min_data_hdr_size[8] = {
1034
		SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1035 1036 1037 1038 1039 1040 1041 1042 1043
		MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
		};

	struct tipc_msg *msg;
	u32 tipc_hdr[2];
	u32 size;
	u32 hdr_size;
	u32 min_hdr_size;

1044 1045 1046 1047 1048 1049
	/* If this packet comes from the defer queue, the skb has already
	 * been validated
	 */
	if (unlikely(TIPC_SKB_CB(buf)->deferred))
		return 1;

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	if (unlikely(buf->len < MIN_H_SIZE))
		return 0;

	msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
	if (msg == NULL)
		return 0;

	if (unlikely(msg_version(msg) != TIPC_VERSION))
		return 0;

	size = msg_size(msg);
	hdr_size = msg_hdr_sz(msg);
	min_hdr_size = msg_isdata(msg) ?
		min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;

	if (unlikely((hdr_size < min_hdr_size) ||
		     (size < hdr_size) ||
		     (buf->len < size) ||
		     (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
		return 0;

	return pskb_may_pull(buf, hdr_size);
}

1074
/**
1075
 * tipc_rcv - process TIPC packets/messages arriving from off-node
1076
 * @net: the applicable net namespace
1077
 * @skb: TIPC packet
1078
 * @b_ptr: pointer to bearer message arrived on
1079 1080 1081 1082
 *
 * Invoked with no locks held.  Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
1083
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
P
Per Liden 已提交
1084
{
1085 1086 1087 1088 1089 1090 1091 1092
	struct sk_buff_head head;
	struct tipc_node *n_ptr;
	struct tipc_link *l_ptr;
	struct sk_buff *skb1, *tmp;
	struct tipc_msg *msg;
	u32 seq_no;
	u32 ackd;
	u32 released;
P
Per Liden 已提交
1093

1094
	skb2list(skb, &head);
1095

1096
	while ((skb = __skb_dequeue(&head))) {
1097
		/* Ensure message is well-formed */
1098
		if (unlikely(!link_recv_buf_validate(skb)))
1099
			goto discard;
P
Per Liden 已提交
1100

1101
		/* Ensure message data is a single contiguous unit */
1102
		if (unlikely(skb_linearize(skb)))
1103
			goto discard;
1104

1105
		/* Handle arrival of a non-unicast link message */
1106
		msg = buf_msg(skb);
1107

P
Per Liden 已提交
1108
		if (unlikely(msg_non_seq(msg))) {
1109
			if (msg_user(msg) ==  LINK_CONFIG)
1110
				tipc_disc_rcv(net, skb, b_ptr);
1111
			else
1112
				tipc_bclink_rcv(net, skb);
P
Per Liden 已提交
1113 1114
			continue;
		}
1115

1116
		/* Discard unicast link messages destined for another node */
1117 1118
		if (unlikely(!msg_short(msg) &&
			     (msg_destnode(msg) != tipc_own_addr)))
1119
			goto discard;
1120

1121
		/* Locate neighboring node that sent message */
1122
		n_ptr = tipc_node_find(net, msg_prevnode(msg));
P
Per Liden 已提交
1123
		if (unlikely(!n_ptr))
1124
			goto discard;
1125
		tipc_node_lock(n_ptr);
1126

1127 1128
		/* Locate unicast link endpoint that should handle message */
		l_ptr = n_ptr->links[b_ptr->identity];
1129 1130
		if (unlikely(!l_ptr))
			goto unlock_discard;
1131

1132
		/* Verify that communication with node is currently allowed */
Y
Ying Xue 已提交
1133
		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1134 1135 1136 1137
		    msg_user(msg) == LINK_PROTOCOL &&
		    (msg_type(msg) == RESET_MSG ||
		    msg_type(msg) == ACTIVATE_MSG) &&
		    !msg_redundant_link(msg))
Y
Ying Xue 已提交
1138
			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1139 1140

		if (tipc_node_blocked(n_ptr))
1141
			goto unlock_discard;
1142 1143 1144 1145 1146 1147

		/* Validate message sequence number info */
		seq_no = msg_seqno(msg);
		ackd = msg_ack(msg);

		/* Release acked messages */
1148
		if (n_ptr->bclink.recv_permitted)
1149
			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
P
Per Liden 已提交
1150

1151 1152 1153 1154 1155 1156 1157 1158
		released = 0;
		skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
			if (skb1 == l_ptr->next_out ||
			    more(buf_seqno(skb1), ackd))
				break;
			 __skb_unlink(skb1, &l_ptr->outqueue);
			 kfree_skb(skb1);
			 released = 1;
P
Per Liden 已提交
1159
		}
1160 1161

		/* Try sending any messages link endpoint has pending */
P
Per Liden 已提交
1162
		if (unlikely(l_ptr->next_out))
1163
			tipc_link_push_packets(l_ptr);
1164

1165 1166 1167 1168
		if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
			link_prepare_wakeup(l_ptr);
			l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
		}
1169 1170

		/* Process the incoming packet */
1171 1172
		if (unlikely(!link_working_working(l_ptr))) {
			if (msg_user(msg) == LINK_PROTOCOL) {
1173
				tipc_link_proto_rcv(net, l_ptr, skb);
1174
				link_retrieve_defq(l_ptr, &head);
1175
				tipc_node_unlock(n_ptr);
P
Per Liden 已提交
1176 1177
				continue;
			}
1178 1179 1180 1181 1182 1183

			/* Traffic message. Conditionally activate link */
			link_state_event(l_ptr, TRAFFIC_MSG_EVT);

			if (link_working_working(l_ptr)) {
				/* Re-insert buffer in front of queue */
1184
				__skb_queue_head(&head, skb);
1185 1186 1187 1188 1189 1190 1191 1192
				tipc_node_unlock(n_ptr);
				continue;
			}
			goto unlock_discard;
		}

		/* Link is now in state WORKING_WORKING */
		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1193
			link_handle_out_of_seq_msg(net, l_ptr, skb);
1194
			link_retrieve_defq(l_ptr, &head);
1195
			tipc_node_unlock(n_ptr);
P
Per Liden 已提交
1196 1197
			continue;
		}
1198
		l_ptr->next_in_no++;
1199
		if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
1200
			link_retrieve_defq(l_ptr, &head);
1201

1202 1203 1204 1205 1206
		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
			l_ptr->stats.sent_acks++;
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
		}

1207
		if (tipc_link_prepare_input(net, l_ptr, &skb)) {
1208 1209
			tipc_node_unlock(n_ptr);
			continue;
P
Per Liden 已提交
1210
		}
1211
		tipc_node_unlock(n_ptr);
1212

1213
		if (tipc_link_input(net, l_ptr, skb) != 0)
1214
			goto discard;
1215 1216 1217 1218
		continue;
unlock_discard:
		tipc_node_unlock(n_ptr);
discard:
1219
		kfree_skb(skb);
P
Per Liden 已提交
1220 1221 1222
	}
}

1223 1224 1225 1226 1227 1228 1229
/**
 * tipc_link_prepare_input - process TIPC link messages
 *
 * returns nonzero if the message was consumed
 *
 * Node lock must be held
 */
1230 1231
static int tipc_link_prepare_input(struct net *net, struct tipc_link *l,
				   struct sk_buff **buf)
1232 1233 1234 1235 1236 1237 1238 1239 1240
{
	struct tipc_node *n;
	struct tipc_msg *msg;
	int res = -EINVAL;

	n = l->owner;
	msg = buf_msg(*buf);
	switch (msg_user(msg)) {
	case CHANGEOVER_PROTOCOL:
1241
		if (tipc_link_tunnel_rcv(net, n, buf))
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
			res = 0;
		break;
	case MSG_FRAGMENTER:
		l->stats.recv_fragments++;
		if (tipc_buf_append(&l->reasm_buf, buf)) {
			l->stats.recv_fragmented++;
			res = 0;
		} else if (!l->reasm_buf) {
			tipc_link_reset(l);
		}
		break;
	case MSG_BUNDLER:
		l->stats.recv_bundles++;
		l->stats.recv_bundled += msg_msgcnt(msg);
		res = 0;
		break;
	case NAME_DISTRIBUTOR:
		n->bclink.recv_permitted = true;
		res = 0;
		break;
	case BCAST_PROTOCOL:
		tipc_link_sync_rcv(n, *buf);
		break;
	default:
		res = 0;
	}
	return res;
}
/**
 * tipc_link_input - Deliver message too higher layers
 */
1273 1274
static int tipc_link_input(struct net *net, struct tipc_link *l,
			   struct sk_buff *buf)
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
{
	struct tipc_msg *msg = buf_msg(buf);
	int res = 0;

	switch (msg_user(msg)) {
	case TIPC_LOW_IMPORTANCE:
	case TIPC_MEDIUM_IMPORTANCE:
	case TIPC_HIGH_IMPORTANCE:
	case TIPC_CRITICAL_IMPORTANCE:
	case CONN_MANAGER:
1285
		tipc_sk_rcv(net, buf);
1286 1287
		break;
	case NAME_DISTRIBUTOR:
1288
		tipc_named_rcv(net, buf);
1289 1290
		break;
	case MSG_BUNDLER:
1291
		tipc_link_bundle_rcv(net, buf);
1292 1293 1294 1295 1296 1297 1298
		break;
	default:
		res = -EINVAL;
	}
	return res;
}

1299
/**
1300 1301 1302
 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
 *
 * Returns increase in queue length (i.e. 0 or 1)
P
Per Liden 已提交
1303
 */
1304
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
P
Per Liden 已提交
1305
{
1306 1307
	struct sk_buff *skb1;
	u32 seq_no = buf_seqno(skb);
P
Per Liden 已提交
1308 1309

	/* Empty queue ? */
1310 1311
	if (skb_queue_empty(list)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1312 1313 1314 1315
		return 1;
	}

	/* Last ? */
1316 1317
	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1318 1319 1320
		return 1;
	}

1321
	/* Locate insertion point in queue, then insert; discard if duplicate */
1322 1323
	skb_queue_walk(list, skb1) {
		u32 curr_seqno = buf_seqno(skb1);
P
Per Liden 已提交
1324

1325
		if (seq_no == curr_seqno) {
1326
			kfree_skb(skb);
1327
			return 0;
P
Per Liden 已提交
1328
		}
1329 1330

		if (less(seq_no, curr_seqno))
P
Per Liden 已提交
1331
			break;
1332
	}
P
Per Liden 已提交
1333

1334
	__skb_queue_before(list, skb1, skb);
1335
	return 1;
P
Per Liden 已提交
1336 1337
}

1338
/*
P
Per Liden 已提交
1339 1340
 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
 */
1341 1342
static void link_handle_out_of_seq_msg(struct net *net,
				       struct tipc_link *l_ptr,
P
Per Liden 已提交
1343 1344
				       struct sk_buff *buf)
{
1345
	u32 seq_no = buf_seqno(buf);
P
Per Liden 已提交
1346 1347

	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1348
		tipc_link_proto_rcv(net, l_ptr, buf);
P
Per Liden 已提交
1349 1350 1351 1352 1353 1354
		return;
	}

	/* Record OOS packet arrival (force mismatch on next timeout) */
	l_ptr->checkpoint--;

1355
	/*
P
Per Liden 已提交
1356 1357 1358 1359 1360
	 * Discard packet if a duplicate; otherwise add it to deferred queue
	 * and notify peer of gap as per protocol specification
	 */
	if (less(seq_no, mod(l_ptr->next_in_no))) {
		l_ptr->stats.duplicates++;
1361
		kfree_skb(buf);
P
Per Liden 已提交
1362 1363 1364
		return;
	}

1365
	if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
P
Per Liden 已提交
1366
		l_ptr->stats.deferred_recv++;
1367
		TIPC_SKB_CB(buf)->deferred = true;
1368
		if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
1369
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1370
	} else {
P
Per Liden 已提交
1371
		l_ptr->stats.duplicates++;
1372
	}
P
Per Liden 已提交
1373 1374 1375 1376 1377
}

/*
 * Send protocol message to the other endpoint.
 */
1378 1379
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
			  u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
P
Per Liden 已提交
1380
{
1381
	struct sk_buff *buf = NULL;
P
Per Liden 已提交
1382
	struct tipc_msg *msg = l_ptr->pmsg;
1383
	u32 msg_size = sizeof(l_ptr->proto_msg);
1384
	int r_flag;
P
Per Liden 已提交
1385

1386 1387
	/* Don't send protocol message during link changeover */
	if (l_ptr->exp_msg_count)
P
Per Liden 已提交
1388
		return;
1389 1390

	/* Abort non-RESET send if communication with node is prohibited */
1391
	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1392 1393
		return;

1394
	/* Create protocol message with "out-of-sequence" sequence number */
P
Per Liden 已提交
1395
	msg_set_type(msg, msg_typ);
1396
	msg_set_net_plane(msg, l_ptr->net_plane);
1397
	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1398
	msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
P
Per Liden 已提交
1399 1400 1401 1402

	if (msg_typ == STATE_MSG) {
		u32 next_sent = mod(l_ptr->next_out_no);

1403
		if (!tipc_link_is_up(l_ptr))
P
Per Liden 已提交
1404 1405
			return;
		if (l_ptr->next_out)
1406
			next_sent = buf_seqno(l_ptr->next_out);
P
Per Liden 已提交
1407
		msg_set_next_sent(msg, next_sent);
1408 1409
		if (!skb_queue_empty(&l_ptr->deferred_queue)) {
			u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
P
Per Liden 已提交
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
			gap = mod(rec - mod(l_ptr->next_in_no));
		}
		msg_set_seq_gap(msg, gap);
		if (gap)
			l_ptr->stats.sent_nacks++;
		msg_set_link_tolerance(msg, tolerance);
		msg_set_linkprio(msg, priority);
		msg_set_max_pkt(msg, ack_mtu);
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
		msg_set_probe(msg, probe_msg != 0);
1420
		if (probe_msg) {
P
Per Liden 已提交
1421 1422
			u32 mtu = l_ptr->max_pkt;

1423
			if ((mtu < l_ptr->max_pkt_target) &&
P
Per Liden 已提交
1424 1425 1426
			    link_working_working(l_ptr) &&
			    l_ptr->fsm_msg_cnt) {
				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1427 1428 1429
				if (l_ptr->max_pkt_probes == 10) {
					l_ptr->max_pkt_target = (msg_size - 4);
					l_ptr->max_pkt_probes = 0;
P
Per Liden 已提交
1430
					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1431
				}
P
Per Liden 已提交
1432
				l_ptr->max_pkt_probes++;
1433
			}
P
Per Liden 已提交
1434 1435

			l_ptr->stats.sent_probes++;
1436
		}
P
Per Liden 已提交
1437 1438 1439 1440 1441
		l_ptr->stats.sent_states++;
	} else {		/* RESET_MSG or ACTIVATE_MSG */
		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
		msg_set_seq_gap(msg, 0);
		msg_set_next_sent(msg, 1);
1442
		msg_set_probe(msg, 0);
P
Per Liden 已提交
1443 1444 1445 1446 1447
		msg_set_link_tolerance(msg, l_ptr->tolerance);
		msg_set_linkprio(msg, l_ptr->priority);
		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
	}

1448 1449
	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
	msg_set_redundant_link(msg, r_flag);
P
Per Liden 已提交
1450
	msg_set_linkprio(msg, l_ptr->priority);
1451
	msg_set_size(msg, msg_size);
P
Per Liden 已提交
1452 1453 1454

	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));

1455
	buf = tipc_buf_acquire(msg_size);
P
Per Liden 已提交
1456 1457 1458
	if (!buf)
		return;

1459
	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1460
	buf->priority = TC_PRIO_CONTROL;
P
Per Liden 已提交
1461

1462
	tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1463
	l_ptr->unacked_window = 0;
1464
	kfree_skb(buf);
P
Per Liden 已提交
1465 1466 1467 1468
}

/*
 * Receive protocol message :
1469 1470
 * Note that network plane id propagates through the network, and may
 * change at any time. The node with lowest address rules
P
Per Liden 已提交
1471
 */
1472 1473
static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
				struct sk_buff *buf)
P
Per Liden 已提交
1474 1475 1476
{
	u32 rec_gap = 0;
	u32 max_pkt_info;
1477
	u32 max_pkt_ack;
P
Per Liden 已提交
1478 1479 1480
	u32 msg_tol;
	struct tipc_msg *msg = buf_msg(buf);

1481 1482
	/* Discard protocol message during link changeover */
	if (l_ptr->exp_msg_count)
P
Per Liden 已提交
1483 1484
		goto exit;

1485
	if (l_ptr->net_plane != msg_net_plane(msg))
P
Per Liden 已提交
1486
		if (tipc_own_addr > msg_prevnode(msg))
1487
			l_ptr->net_plane = msg_net_plane(msg);
P
Per Liden 已提交
1488 1489

	switch (msg_type(msg)) {
1490

P
Per Liden 已提交
1491
	case RESET_MSG:
1492 1493
		if (!link_working_unknown(l_ptr) &&
		    (l_ptr->peer_session != INVALID_SESSION)) {
1494 1495
			if (less_eq(msg_session(msg), l_ptr->peer_session))
				break; /* duplicate or old reset: ignore */
P
Per Liden 已提交
1496
		}
1497 1498 1499 1500 1501 1502 1503

		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
				link_working_unknown(l_ptr))) {
			/*
			 * peer has lost contact -- don't allow peer's links
			 * to reactivate before we recognize loss & clean up
			 */
1504
			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1505 1506
		}

1507 1508
		link_state_event(l_ptr, RESET_MSG);

P
Per Liden 已提交
1509 1510 1511 1512 1513
		/* fall thru' */
	case ACTIVATE_MSG:
		/* Update link settings according other endpoint's values */
		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));

1514 1515
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol > l_ptr->tolerance)
P
Per Liden 已提交
1516 1517 1518 1519 1520 1521
			link_set_supervision_props(l_ptr, msg_tol);

		if (msg_linkprio(msg) > l_ptr->priority)
			l_ptr->priority = msg_linkprio(msg);

		max_pkt_info = msg_max_pkt(msg);
1522
		if (max_pkt_info) {
P
Per Liden 已提交
1523 1524 1525 1526 1527
			if (max_pkt_info < l_ptr->max_pkt_target)
				l_ptr->max_pkt_target = max_pkt_info;
			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
				l_ptr->max_pkt = l_ptr->max_pkt_target;
		} else {
1528
			l_ptr->max_pkt = l_ptr->max_pkt_target;
P
Per Liden 已提交
1529 1530
		}

1531
		/* Synchronize broadcast link info, if not done previously */
1532 1533 1534 1535 1536 1537
		if (!tipc_node_is_up(l_ptr->owner)) {
			l_ptr->owner->bclink.last_sent =
				l_ptr->owner->bclink.last_in =
				msg_last_bcast(msg);
			l_ptr->owner->bclink.oos_state = 0;
		}
1538

P
Per Liden 已提交
1539 1540
		l_ptr->peer_session = msg_session(msg);
		l_ptr->peer_bearer_id = msg_bearer_id(msg);
1541 1542 1543

		if (msg_type(msg) == ACTIVATE_MSG)
			link_state_event(l_ptr, ACTIVATE_MSG);
P
Per Liden 已提交
1544 1545 1546
		break;
	case STATE_MSG:

1547 1548
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol)
P
Per Liden 已提交
1549
			link_set_supervision_props(l_ptr, msg_tol);
1550 1551

		if (msg_linkprio(msg) &&
P
Per Liden 已提交
1552
		    (msg_linkprio(msg) != l_ptr->priority)) {
1553 1554 1555
			pr_warn("%s<%s>, priority change %u->%u\n",
				link_rst_msg, l_ptr->name, l_ptr->priority,
				msg_linkprio(msg));
P
Per Liden 已提交
1556
			l_ptr->priority = msg_linkprio(msg);
1557
			tipc_link_reset(l_ptr); /* Enforce change to take effect */
P
Per Liden 已提交
1558 1559
			break;
		}
1560 1561 1562 1563

		/* Record reception; force mismatch at next timeout: */
		l_ptr->checkpoint--;

P
Per Liden 已提交
1564 1565 1566 1567 1568 1569
		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
		l_ptr->stats.recv_states++;
		if (link_reset_unknown(l_ptr))
			break;

		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1570
			rec_gap = mod(msg_next_sent(msg) -
P
Per Liden 已提交
1571 1572 1573 1574
				      mod(l_ptr->next_in_no));
		}

		max_pkt_ack = msg_max_pkt(msg);
1575 1576 1577 1578
		if (max_pkt_ack > l_ptr->max_pkt) {
			l_ptr->max_pkt = max_pkt_ack;
			l_ptr->max_pkt_probes = 0;
		}
P
Per Liden 已提交
1579 1580

		max_pkt_ack = 0;
1581
		if (msg_probe(msg)) {
P
Per Liden 已提交
1582
			l_ptr->stats.recv_probes++;
1583
			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1584 1585
				max_pkt_ack = msg_size(msg);
		}
P
Per Liden 已提交
1586 1587

		/* Protocol message before retransmits, reduce loss risk */
1588
		if (l_ptr->owner->bclink.recv_permitted)
1589
			tipc_bclink_update_link_state(net, l_ptr->owner,
1590
						      msg_last_bcast(msg));
P
Per Liden 已提交
1591 1592

		if (rec_gap || (msg_probe(msg))) {
1593 1594
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
					     0, max_pkt_ack);
P
Per Liden 已提交
1595 1596 1597
		}
		if (msg_seq_gap(msg)) {
			l_ptr->stats.recv_nacks++;
1598
			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
1599
					     msg_seq_gap(msg));
P
Per Liden 已提交
1600 1601 1602 1603
		}
		break;
	}
exit:
1604
	kfree_skb(buf);
P
Per Liden 已提交
1605 1606 1607
}


1608 1609
/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 * a different bearer. Owner node is locked.
P
Per Liden 已提交
1610
 */
1611 1612 1613 1614
static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
				  struct tipc_msg *tunnel_hdr,
				  struct tipc_msg *msg,
				  u32 selector)
P
Per Liden 已提交
1615
{
1616
	struct tipc_link *tunnel;
1617
	struct sk_buff *skb;
P
Per Liden 已提交
1618 1619 1620
	u32 length = msg_size(msg);

	tunnel = l_ptr->owner->active_links[selector & 1];
1621
	if (!tipc_link_is_up(tunnel)) {
1622
		pr_warn("%stunnel link no longer available\n", link_co_err);
P
Per Liden 已提交
1623
		return;
1624
	}
P
Per Liden 已提交
1625
	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1626 1627
	skb = tipc_buf_acquire(length + INT_H_SIZE);
	if (!skb) {
1628
		pr_warn("%sunable to send tunnel msg\n", link_co_err);
P
Per Liden 已提交
1629
		return;
1630
	}
1631 1632 1633
	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
	__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1634 1635 1636
}


1637 1638 1639 1640 1641
/* tipc_link_failover_send_queue(): A link has gone down, but a second
 * link is still active. We can do failover. Tunnel the failing link's
 * whole send queue via the remaining link. This way, we don't lose
 * any packets, and sequence order is preserved for subsequent traffic
 * sent over the remaining link. Owner node is locked.
P
Per Liden 已提交
1642
 */
1643
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
P
Per Liden 已提交
1644
{
1645
	u32 msgcount = skb_queue_len(&l_ptr->outqueue);
1646
	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
P
Per Liden 已提交
1647
	struct tipc_msg tunnel_hdr;
1648
	struct sk_buff *skb;
1649
	int split_bundles;
P
Per Liden 已提交
1650 1651 1652 1653

	if (!tunnel)
		return;

1654
	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1655
		 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
P
Per Liden 已提交
1656 1657
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
	msg_set_msgcnt(&tunnel_hdr, msgcount);
1658

1659 1660 1661 1662
	if (skb_queue_empty(&l_ptr->outqueue)) {
		skb = tipc_buf_acquire(INT_H_SIZE);
		if (skb) {
			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
P
Per Liden 已提交
1663
			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1664
			__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1665
		} else {
1666 1667
			pr_warn("%sunable to send changeover msg\n",
				link_co_err);
P
Per Liden 已提交
1668 1669 1670
		}
		return;
	}
1671

1672
	split_bundles = (l_ptr->owner->active_links[0] !=
1673 1674
			 l_ptr->owner->active_links[1]);

1675 1676
	skb_queue_walk(&l_ptr->outqueue, skb) {
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1677 1678 1679

		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
			struct tipc_msg *m = msg_get_wrapped(msg);
1680
			unchar *pos = (unchar *)m;
P
Per Liden 已提交
1681

1682
			msgcount = msg_msgcnt(msg);
P
Per Liden 已提交
1683
			while (msgcount--) {
1684
				msg_set_seqno(m, msg_seqno(msg));
1685 1686
				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
						      msg_link_selector(m));
P
Per Liden 已提交
1687 1688 1689 1690
				pos += align(msg_size(m));
				m = (struct tipc_msg *)pos;
			}
		} else {
1691 1692
			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
					      msg_link_selector(msg));
P
Per Liden 已提交
1693 1694 1695 1696
		}
	}
}

1697
/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1698 1699 1700 1701 1702 1703 1704 1705
 * duplicate of the first link's send queue via the new link. This way, we
 * are guaranteed that currently queued packets from a socket are delivered
 * before future traffic from the same socket, even if this is using the
 * new link. The last arriving copy of each duplicate packet is dropped at
 * the receiving end by the regular protocol check, so packet cardinality
 * and sequence order is preserved per sender/receiver socket pair.
 * Owner node is locked.
 */
1706
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1707
			      struct tipc_link *tunnel)
P
Per Liden 已提交
1708
{
1709
	struct sk_buff *skb;
P
Per Liden 已提交
1710 1711
	struct tipc_msg tunnel_hdr;

1712
	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1713
		 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1714
	msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
P
Per Liden 已提交
1715
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1716 1717 1718
	skb_queue_walk(&l_ptr->outqueue, skb) {
		struct sk_buff *outskb;
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1719 1720 1721 1722 1723
		u32 length = msg_size(msg);

		if (msg_user(msg) == MSG_BUNDLER)
			msg_set_type(msg, CLOSED_MSG);
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
1724
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
P
Per Liden 已提交
1725
		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1726 1727
		outskb = tipc_buf_acquire(length + INT_H_SIZE);
		if (outskb == NULL) {
1728 1729
			pr_warn("%sunable to send duplicate msg\n",
				link_co_err);
P
Per Liden 已提交
1730 1731
			return;
		}
1732 1733
		skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
1734
					       length);
1735
		__tipc_link_xmit_skb(tunnel, outskb);
1736
		if (!tipc_link_is_up(l_ptr))
P
Per Liden 已提交
1737 1738 1739 1740 1741 1742 1743 1744 1745
			return;
	}
}

/**
 * buf_extract - extracts embedded TIPC message from another message
 * @skb: encapsulating message buffer
 * @from_pos: offset to extract from
 *
1746
 * Returns a new message buffer containing an embedded message.  The
P
Per Liden 已提交
1747 1748 1749 1750 1751 1752 1753 1754
 * encapsulating message itself is left unchanged.
 */
static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
{
	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
	u32 size = msg_size(msg);
	struct sk_buff *eb;

1755
	eb = tipc_buf_acquire(size);
P
Per Liden 已提交
1756
	if (eb)
1757
		skb_copy_to_linear_data(eb, msg, size);
P
Per Liden 已提交
1758 1759 1760
	return eb;
}

1761 1762 1763 1764 1765


/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
 * Owner node is locked.
 */
1766
static void tipc_link_dup_rcv(struct net *net, struct tipc_link *l_ptr,
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
			      struct sk_buff *t_buf)
{
	struct sk_buff *buf;

	if (!tipc_link_is_up(l_ptr))
		return;

	buf = buf_extract(t_buf, INT_H_SIZE);
	if (buf == NULL) {
		pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
		return;
	}

	/* Add buffer to deferred queue, if applicable: */
1781
	link_handle_out_of_seq_msg(net, l_ptr, buf);
1782 1783
}

1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
/*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
 *  Owner node is locked.
 */
static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
					      struct sk_buff *t_buf)
{
	struct tipc_msg *t_msg = buf_msg(t_buf);
	struct sk_buff *buf = NULL;
	struct tipc_msg *msg;

	if (tipc_link_is_up(l_ptr))
		tipc_link_reset(l_ptr);

	/* First failover packet? */
	if (l_ptr->exp_msg_count == START_CHANGEOVER)
		l_ptr->exp_msg_count = msg_msgcnt(t_msg);

	/* Should there be an inner packet? */
	if (l_ptr->exp_msg_count) {
		l_ptr->exp_msg_count--;
		buf = buf_extract(t_buf, INT_H_SIZE);
		if (buf == NULL) {
			pr_warn("%sno inner failover pkt\n", link_co_err);
			goto exit;
		}
		msg = buf_msg(buf);

		if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
			kfree_skb(buf);
			buf = NULL;
			goto exit;
		}
		if (msg_user(msg) == MSG_FRAGMENTER) {
			l_ptr->stats.recv_fragments++;
1818
			tipc_buf_append(&l_ptr->reasm_buf, &buf);
1819 1820 1821
		}
	}
exit:
1822 1823 1824 1825
	if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
		tipc_node_detach_link(l_ptr->owner, l_ptr);
		kfree(l_ptr);
	}
1826 1827 1828
	return buf;
}

1829
/*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1830 1831 1832 1833
 *  via other link as result of a failover (ORIGINAL_MSG) or
 *  a new active link (DUPLICATE_MSG). Failover packets are
 *  returned to the active link for delivery upwards.
 *  Owner node is locked.
P
Per Liden 已提交
1834
 */
1835
static int tipc_link_tunnel_rcv(struct net *net, struct tipc_node *n_ptr,
1836
				struct sk_buff **buf)
P
Per Liden 已提交
1837
{
1838 1839 1840 1841
	struct sk_buff *t_buf = *buf;
	struct tipc_link *l_ptr;
	struct tipc_msg *t_msg = buf_msg(t_buf);
	u32 bearer_id = msg_bearer_id(t_msg);
P
Per Liden 已提交
1842

1843 1844
	*buf = NULL;

1845 1846
	if (bearer_id >= MAX_BEARERS)
		goto exit;
1847

1848 1849
	l_ptr = n_ptr->links[bearer_id];
	if (!l_ptr)
P
Per Liden 已提交
1850 1851
		goto exit;

1852
	if (msg_type(t_msg) == DUPLICATE_MSG)
1853
		tipc_link_dup_rcv(net, l_ptr, t_buf);
1854 1855
	else if (msg_type(t_msg) == ORIGINAL_MSG)
		*buf = tipc_link_failover_rcv(l_ptr, t_buf);
1856 1857
	else
		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
P
Per Liden 已提交
1858
exit:
1859
	kfree_skb(t_buf);
1860
	return *buf != NULL;
P
Per Liden 已提交
1861 1862 1863 1864 1865
}

/*
 *  Bundler functionality:
 */
1866
void tipc_link_bundle_rcv(struct net *net, struct sk_buff *buf)
P
Per Liden 已提交
1867 1868 1869 1870
{
	u32 msgcount = msg_msgcnt(buf_msg(buf));
	u32 pos = INT_H_SIZE;
	struct sk_buff *obuf;
1871
	struct tipc_msg *omsg;
P
Per Liden 已提交
1872 1873 1874 1875

	while (msgcount--) {
		obuf = buf_extract(buf, pos);
		if (obuf == NULL) {
1876
			pr_warn("Link unable to unbundle message(s)\n");
1877
			break;
1878
		}
1879 1880
		omsg = buf_msg(obuf);
		pos += align(msg_size(omsg));
1881 1882
		if (msg_isdata(omsg)) {
			if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
1883
				tipc_sk_mcast_rcv(net, obuf);
1884
			else
1885
				tipc_sk_rcv(net, obuf);
1886
		} else if (msg_user(omsg) == CONN_MANAGER) {
1887
			tipc_sk_rcv(net, obuf);
1888
		} else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
1889
			tipc_named_rcv(net, obuf);
1890 1891 1892 1893
		} else {
			pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
			kfree_skb(obuf);
		}
P
Per Liden 已提交
1894
	}
1895
	kfree_skb(buf);
P
Per Liden 已提交
1896 1897
}

1898
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
P
Per Liden 已提交
1899
{
1900 1901 1902
	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;

	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1903 1904
		return;

1905 1906 1907
	l_ptr->tolerance = tol;
	l_ptr->cont_intv = msecs_to_jiffies(intv);
	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
P
Per Liden 已提交
1908 1909
}

1910
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
P
Per Liden 已提交
1911 1912
{
	/* Data messages from this node, inclusive FIRST_FRAGM */
1913 1914 1915 1916
	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
P
Per Liden 已提交
1917
	/* Transiting data messages,inclusive FIRST_FRAGM */
1918 1919 1920 1921
	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
P
Per Liden 已提交
1922 1923 1924 1925 1926 1927 1928
	l_ptr->queue_limit[CONN_MANAGER] = 1200;
	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
	/* FRAGMENT and LAST_FRAGMENT packets */
	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
}

1929
/* tipc_link_find_owner - locate owner node of link by link's name
1930
 * @net: the applicable net namespace
1931 1932
 * @name: pointer to link name string
 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1933
 *
1934
 * Returns pointer to node owning the link, or 0 if no matching link is found.
P
Per Liden 已提交
1935
 */
1936 1937
static struct tipc_node *tipc_link_find_owner(struct net *net,
					      const char *link_name,
1938
					      unsigned int *bearer_id)
P
Per Liden 已提交
1939
{
1940
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1941
	struct tipc_link *l_ptr;
1942
	struct tipc_node *n_ptr;
1943
	struct tipc_node *found_node = NULL;
1944
	int i;
P
Per Liden 已提交
1945

1946
	*bearer_id = 0;
1947
	rcu_read_lock();
1948
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1949
		tipc_node_lock(n_ptr);
1950 1951
		for (i = 0; i < MAX_BEARERS; i++) {
			l_ptr = n_ptr->links[i];
1952 1953 1954 1955 1956
			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
				*bearer_id = i;
				found_node = n_ptr;
				break;
			}
1957
		}
1958
		tipc_node_unlock(n_ptr);
1959 1960
		if (found_node)
			break;
1961
	}
1962 1963
	rcu_read_unlock();

1964
	return found_node;
P
Per Liden 已提交
1965 1966
}

1967 1968 1969
/**
 * link_value_is_valid -- validate proposed link tolerance/priority/window
 *
1970 1971
 * @cmd: value type (TIPC_CMD_SET_LINK_*)
 * @new_value: the new value
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
 *
 * Returns 1 if value is within range, 0 if not.
 */
static int link_value_is_valid(u16 cmd, u32 new_value)
{
	switch (cmd) {
	case TIPC_CMD_SET_LINK_TOL:
		return (new_value >= TIPC_MIN_LINK_TOL) &&
			(new_value <= TIPC_MAX_LINK_TOL);
	case TIPC_CMD_SET_LINK_PRI:
		return (new_value <= TIPC_MAX_LINK_PRI);
	case TIPC_CMD_SET_LINK_WINDOW:
		return (new_value >= TIPC_MIN_LINK_WIN) &&
			(new_value <= TIPC_MAX_LINK_WIN);
	}
	return 0;
}

/**
 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
1992
 * @net: the applicable net namespace
1993 1994 1995
 * @name: ptr to link, bearer, or media name
 * @new_value: new value of link, bearer, or media setting
 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
1996
 *
Y
Ying Xue 已提交
1997
 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
1998 1999 2000
 *
 * Returns 0 if value updated and negative value on error.
 */
2001 2002
static int link_cmd_set_value(struct net *net, const char *name, u32 new_value,
			      u16 cmd)
2003 2004
{
	struct tipc_node *node;
2005
	struct tipc_link *l_ptr;
2006
	struct tipc_bearer *b_ptr;
2007
	struct tipc_media *m_ptr;
2008
	int bearer_id;
2009
	int res = 0;
2010

2011
	node = tipc_link_find_owner(net, name, &bearer_id);
2012
	if (node) {
2013
		tipc_node_lock(node);
2014 2015 2016 2017 2018 2019
		l_ptr = node->links[bearer_id];

		if (l_ptr) {
			switch (cmd) {
			case TIPC_CMD_SET_LINK_TOL:
				link_set_supervision_props(l_ptr, new_value);
2020 2021
				tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
						     new_value, 0, 0);
2022 2023 2024
				break;
			case TIPC_CMD_SET_LINK_PRI:
				l_ptr->priority = new_value;
2025 2026
				tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
						     0, new_value, 0);
2027 2028 2029 2030 2031 2032 2033 2034
				break;
			case TIPC_CMD_SET_LINK_WINDOW:
				tipc_link_set_queue_limits(l_ptr, new_value);
				break;
			default:
				res = -EINVAL;
				break;
			}
2035 2036
		}
		tipc_node_unlock(node);
2037
		return res;
2038 2039 2040 2041 2042 2043 2044
	}

	b_ptr = tipc_bearer_find(name);
	if (b_ptr) {
		switch (cmd) {
		case TIPC_CMD_SET_LINK_TOL:
			b_ptr->tolerance = new_value;
2045
			break;
2046 2047
		case TIPC_CMD_SET_LINK_PRI:
			b_ptr->priority = new_value;
2048
			break;
2049 2050
		case TIPC_CMD_SET_LINK_WINDOW:
			b_ptr->window = new_value;
2051 2052 2053 2054
			break;
		default:
			res = -EINVAL;
			break;
2055
		}
2056
		return res;
2057 2058 2059 2060 2061 2062 2063 2064
	}

	m_ptr = tipc_media_find(name);
	if (!m_ptr)
		return -ENODEV;
	switch (cmd) {
	case TIPC_CMD_SET_LINK_TOL:
		m_ptr->tolerance = new_value;
2065
		break;
2066 2067
	case TIPC_CMD_SET_LINK_PRI:
		m_ptr->priority = new_value;
2068
		break;
2069 2070
	case TIPC_CMD_SET_LINK_WINDOW:
		m_ptr->window = new_value;
2071 2072 2073 2074
		break;
	default:
		res = -EINVAL;
		break;
2075
	}
2076
	return res;
2077 2078
}

2079 2080
struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area,
				     int req_tlv_space, u16 cmd)
P
Per Liden 已提交
2081 2082
{
	struct tipc_link_config *args;
2083 2084
	u32 new_value;
	int res;
P
Per Liden 已提交
2085 2086

	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2087
		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
P
Per Liden 已提交
2088 2089 2090 2091

	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
	new_value = ntohl(args->value);

2092 2093 2094 2095
	if (!link_value_is_valid(cmd, new_value))
		return tipc_cfg_reply_error_string(
			"cannot change, value invalid");

2096
	if (!strcmp(args->name, tipc_bclink_name)) {
P
Per Liden 已提交
2097
		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2098 2099
		    (tipc_bclink_set_queue_limits(new_value) == 0))
			return tipc_cfg_reply_none();
2100
		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2101
						   " (cannot change setting on broadcast link)");
P
Per Liden 已提交
2102 2103
	}

2104
	res = link_cmd_set_value(net, args->name, new_value, cmd);
P
Per Liden 已提交
2105
	if (res)
2106
		return tipc_cfg_reply_error_string("cannot change link setting");
P
Per Liden 已提交
2107

2108
	return tipc_cfg_reply_none();
P
Per Liden 已提交
2109 2110 2111 2112 2113 2114
}

/**
 * link_reset_statistics - reset link statistics
 * @l_ptr: pointer to link
 */
2115
static void link_reset_statistics(struct tipc_link *l_ptr)
P
Per Liden 已提交
2116 2117 2118 2119 2120 2121
{
	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
	l_ptr->stats.sent_info = l_ptr->next_out_no;
	l_ptr->stats.recv_info = l_ptr->next_in_no;
}

2122 2123 2124
struct sk_buff *tipc_link_cmd_reset_stats(struct net *net,
					  const void *req_tlv_area,
					  int req_tlv_space)
P
Per Liden 已提交
2125 2126
{
	char *link_name;
2127
	struct tipc_link *l_ptr;
2128
	struct tipc_node *node;
2129
	unsigned int bearer_id;
P
Per Liden 已提交
2130 2131

	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2132
		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
P
Per Liden 已提交
2133 2134

	link_name = (char *)TLV_DATA(req_tlv_area);
2135 2136 2137 2138
	if (!strcmp(link_name, tipc_bclink_name)) {
		if (tipc_bclink_reset_stats())
			return tipc_cfg_reply_error_string("link not found");
		return tipc_cfg_reply_none();
P
Per Liden 已提交
2139
	}
2140
	node = tipc_link_find_owner(net, link_name, &bearer_id);
Y
Ying Xue 已提交
2141
	if (!node)
2142
		return tipc_cfg_reply_error_string("link not found");
Y
Ying Xue 已提交
2143

2144
	tipc_node_lock(node);
2145
	l_ptr = node->links[bearer_id];
P
Per Liden 已提交
2146
	if (!l_ptr) {
2147
		tipc_node_unlock(node);
2148
		return tipc_cfg_reply_error_string("link not found");
P
Per Liden 已提交
2149 2150
	}
	link_reset_statistics(l_ptr);
2151 2152
	tipc_node_unlock(node);
	return tipc_cfg_reply_none();
P
Per Liden 已提交
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
}

/**
 * percent - convert count to a percentage of total (rounding up or down)
 */
static u32 percent(u32 count, u32 total)
{
	return (count * 100 + (total / 2)) / total;
}

/**
2164
 * tipc_link_stats - print link statistics
2165
 * @net: the applicable net namespace
P
Per Liden 已提交
2166 2167 2168
 * @name: link name
 * @buf: print buffer area
 * @buf_size: size of print buffer area
2169
 *
P
Per Liden 已提交
2170 2171
 * Returns length of print buffer data string (or 0 if error)
 */
2172 2173
static int tipc_link_stats(struct net *net, const char *name, char *buf,
			   const u32 buf_size)
P
Per Liden 已提交
2174
{
2175 2176
	struct tipc_link *l;
	struct tipc_stats *s;
2177
	struct tipc_node *node;
P
Per Liden 已提交
2178 2179
	char *status;
	u32 profile_total = 0;
2180
	unsigned int bearer_id;
2181
	int ret;
P
Per Liden 已提交
2182

2183 2184
	if (!strcmp(name, tipc_bclink_name))
		return tipc_bclink_stats(buf, buf_size);
P
Per Liden 已提交
2185

2186
	node = tipc_link_find_owner(net, name, &bearer_id);
Y
Ying Xue 已提交
2187
	if (!node)
P
Per Liden 已提交
2188
		return 0;
Y
Ying Xue 已提交
2189

2190
	tipc_node_lock(node);
2191 2192 2193 2194 2195 2196 2197

	l = node->links[bearer_id];
	if (!l) {
		tipc_node_unlock(node);
		return 0;
	}

2198
	s = &l->stats;
P
Per Liden 已提交
2199

2200
	if (tipc_link_is_active(l))
P
Per Liden 已提交
2201
		status = "ACTIVE";
2202
	else if (tipc_link_is_up(l))
P
Per Liden 已提交
2203 2204 2205
		status = "STANDBY";
	else
		status = "DEFUNCT";
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225

	ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
			    "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
			    "  Window:%u packets\n",
			    l->name, status, l->max_pkt, l->priority,
			    l->tolerance, l->queue_limit[0]);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
			     l->next_in_no - s->recv_info, s->recv_fragments,
			     s->recv_fragmented, s->recv_bundles,
			     s->recv_bundled);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
			     l->next_out_no - s->sent_info, s->sent_fragments,
			     s->sent_fragmented, s->sent_bundles,
			     s->sent_bundled);

	profile_total = s->msg_length_counts;
P
Per Liden 已提交
2226 2227
	if (!profile_total)
		profile_total = 1;
2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  TX profile sample:%u packets  average:%u octets\n"
			     "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
			     "-16384:%u%% -32768:%u%% -66000:%u%%\n",
			     s->msg_length_counts,
			     s->msg_lengths_total / profile_total,
			     percent(s->msg_length_profile[0], profile_total),
			     percent(s->msg_length_profile[1], profile_total),
			     percent(s->msg_length_profile[2], profile_total),
			     percent(s->msg_length_profile[3], profile_total),
			     percent(s->msg_length_profile[4], profile_total),
			     percent(s->msg_length_profile[5], profile_total),
			     percent(s->msg_length_profile[6], profile_total));

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  RX states:%u probes:%u naks:%u defs:%u"
			     " dups:%u\n", s->recv_states, s->recv_probes,
			     s->recv_nacks, s->deferred_recv, s->duplicates);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  TX states:%u probes:%u naks:%u acks:%u"
			     " dups:%u\n", s->sent_states, s->sent_probes,
			     s->sent_nacks, s->sent_acks, s->retransmitted);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
2254 2255
			     "  Congestion link:%u  Send queue"
			     " max:%u avg:%u\n", s->link_congs,
2256 2257
			     s->max_queue_sz, s->queue_sz_counts ?
			     (s->accu_queue_sz / s->queue_sz_counts) : 0);
P
Per Liden 已提交
2258

2259
	tipc_node_unlock(node);
2260
	return ret;
P
Per Liden 已提交
2261 2262
}

2263 2264 2265
struct sk_buff *tipc_link_cmd_show_stats(struct net *net,
					 const void *req_tlv_area,
					 int req_tlv_space)
P
Per Liden 已提交
2266 2267 2268 2269
{
	struct sk_buff *buf;
	struct tlv_desc *rep_tlv;
	int str_len;
2270 2271
	int pb_len;
	char *pb;
P
Per Liden 已提交
2272 2273

	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2274
		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
P
Per Liden 已提交
2275

2276
	buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
P
Per Liden 已提交
2277 2278 2279 2280
	if (!buf)
		return NULL;

	rep_tlv = (struct tlv_desc *)buf->data;
2281 2282
	pb = TLV_DATA(rep_tlv);
	pb_len = ULTRA_STRING_MAX_LEN;
2283
	str_len = tipc_link_stats(net, (char *)TLV_DATA(req_tlv_area),
2284
				  pb, pb_len);
P
Per Liden 已提交
2285
	if (!str_len) {
2286
		kfree_skb(buf);
2287
		return tipc_cfg_reply_error_string("link not found");
P
Per Liden 已提交
2288
	}
2289
	str_len += 1;	/* for "\0" */
P
Per Liden 已提交
2290 2291 2292 2293 2294 2295
	skb_put(buf, TLV_SPACE(str_len));
	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);

	return buf;
}

2296
static void link_print(struct tipc_link *l_ptr, const char *str)
P
Per Liden 已提交
2297
{
2298 2299 2300 2301 2302 2303 2304
	struct tipc_bearer *b_ptr;

	rcu_read_lock();
	b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
	if (b_ptr)
		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
	rcu_read_unlock();
2305

P
Per Liden 已提交
2306
	if (link_working_unknown(l_ptr))
2307
		pr_cont(":WU\n");
2308
	else if (link_reset_reset(l_ptr))
2309
		pr_cont(":RR\n");
2310
	else if (link_reset_unknown(l_ptr))
2311
		pr_cont(":RU\n");
2312
	else if (link_working_working(l_ptr))
2313 2314 2315
		pr_cont(":WW\n");
	else
		pr_cont("\n");
P
Per Liden 已提交
2316
}
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354

/* Parse and validate nested (link) properties valid for media, bearer and link
 */
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
	int err;

	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
			       tipc_nl_prop_policy);
	if (err)
		return err;

	if (props[TIPC_NLA_PROP_PRIO]) {
		u32 prio;

		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
		if (prio > TIPC_MAX_LINK_PRI)
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_TOL]) {
		u32 tol;

		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_WIN]) {
		u32 win;

		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
			return -EINVAL;
	}

	return 0;
}
2355

2356 2357 2358 2359 2360 2361 2362 2363 2364
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	int res = 0;
	int bearer_id;
	char *name;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2365
	struct net *net = genl_info_net(info);
2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

2381
	node = tipc_link_find_owner(net, name, &bearer_id);
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

	link = node->links[bearer_id];
	if (!link) {
		res = -EINVAL;
		goto out;
	}

	if (attrs[TIPC_NLA_LINK_PROP]) {
		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
					      props);
		if (err) {
			res = err;
			goto out;
		}

		if (props[TIPC_NLA_PROP_TOL]) {
			u32 tol;

			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
			link_set_supervision_props(link, tol);
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
		}
		if (props[TIPC_NLA_PROP_PRIO]) {
			u32 prio;

			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
			link->priority = prio;
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
		}
		if (props[TIPC_NLA_PROP_WIN]) {
			u32 win;

			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
			tipc_link_set_queue_limits(link, win);
		}
	}

out:
	tipc_node_unlock(node);

	return res;
}
2430 2431

static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
{
	int i;
	struct nlattr *stats;

	struct nla_map {
		u32 key;
		u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
			s->msg_length_counts : 1},
		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
			(s->accu_queue_sz / s->queue_sz_counts) : 0}
	};

	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!stats)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, stats);

	return 0;
msg_full:
	nla_nest_cancel(skb, stats);

	return -EMSGSIZE;
}

/* Caller should hold appropriate locks to protect the link */
2497
static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;

	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
			tipc_cluster_mask(tipc_own_addr)))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
		goto attr_msg_full;

	if (tipc_link_is_up(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
			goto attr_msg_full;
	if (tipc_link_is_active(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
			goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
			link->queue_limit[TIPC_LOW_IMPORTANCE]))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_stats(msg->skb, &link->stats);
	if (err)
		goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

/* Caller should hold node lock  */
2566 2567 2568
static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
				    struct tipc_node *node,
				    u32 *prev_link)
2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
{
	u32 i;
	int err;

	for (i = *prev_link; i < MAX_BEARERS; i++) {
		*prev_link = i;

		if (!node->links[i])
			continue;

		err = __tipc_nl_add_link(msg, node->links[i]);
		if (err)
			return err;
	}
	*prev_link = 0;

	return 0;
}

int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
2590 2591
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
	struct tipc_node *node;
	struct tipc_nl_msg msg;
	u32 prev_node = cb->args[0];
	u32 prev_link = cb->args[1];
	int done = cb->args[2];
	int err;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();

	if (prev_node) {
2609
		node = tipc_node_find(net, prev_node);
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
		if (!node) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			goto out;
		}

2621 2622
		list_for_each_entry_continue_rcu(node, &tn->node_list,
						 list) {
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
			tipc_node_lock(node);
			err = __tipc_nl_add_node_links(&msg, node, &prev_link);
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	} else {
		err = tipc_nl_add_bc_link(&msg);
		if (err)
			goto out;

2636
		list_for_each_entry_rcu(node, &tn->node_list, list) {
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
			tipc_node_lock(node);
			err = __tipc_nl_add_node_links(&msg, node, &prev_link);
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	}
	done = 1;
out:
	rcu_read_unlock();

	cb->args[0] = prev_node;
	cb->args[1] = prev_link;
	cb->args[2] = done;

	return skb->len;
}

int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
2659
	struct net *net = genl_info_net(info);
2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
	struct sk_buff *ans_skb;
	struct tipc_nl_msg msg;
	struct tipc_link *link;
	struct tipc_node *node;
	char *name;
	int bearer_id;
	int err;

	if (!info->attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2672
	node = tipc_link_find_owner(net, name, &bearer_id);
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
	if (!node)
		return -EINVAL;

	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!ans_skb)
		return -ENOMEM;

	msg.skb = ans_skb;
	msg.portid = info->snd_portid;
	msg.seq = info->snd_seq;

	tipc_node_lock(node);
	link = node->links[bearer_id];
	if (!link) {
		err = -EINVAL;
		goto err_out;
	}

	err = __tipc_nl_add_link(&msg, link);
	if (err)
		goto err_out;

	tipc_node_unlock(node);

	return genlmsg_reply(ans_skb, info);

err_out:
	tipc_node_unlock(node);
	nlmsg_free(ans_skb);

	return err;
}
2705 2706 2707 2708 2709 2710 2711 2712 2713

int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	char *link_name;
	unsigned int bearer_id;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2714
	struct net *net = genl_info_net(info);
2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	if (strcmp(link_name, tipc_bclink_name) == 0) {
		err = tipc_bclink_reset_stats();
		if (err)
			return err;
		return 0;
	}

2737
	node = tipc_link_find_owner(net, link_name, &bearer_id);
2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

	link = node->links[bearer_id];
	if (!link) {
		tipc_node_unlock(node);
		return -EINVAL;
	}

	link_reset_statistics(link);

	tipc_node_unlock(node);

	return 0;
}