link.c 72.2 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/link.c: TIPC link code
3
 *
4
 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5
 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37 38
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
#include "link.h"
39
#include "bcast.h"
40
#include "socket.h"
P
Per Liden 已提交
41 42 43
#include "name_distr.h"
#include "discover.h"
#include "config.h"
44
#include "netlink.h"
P
Per Liden 已提交
45

46 47
#include <linux/pkt_sched.h>

48 49 50 51 52 53
/*
 * Error message prefixes
 */
static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
P
Per Liden 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_LINK_NAME] = {
		.type = NLA_STRING,
		.len = TIPC_MAX_LINK_NAME
	},
	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
};

71 72 73 74 75 76 77 78
/* Properties valid for media, bearar and link */
static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
};

79 80 81 82 83
/*
 * Out-of-range value for link session numbers
 */
#define INVALID_SESSION 0x10000

84 85
/*
 * Link state events:
P
Per Liden 已提交
86 87 88 89 90
 */
#define  STARTING_EVT    856384768	/* link processing trigger */
#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
#define  TIMEOUT_EVT     560817u	/* link timer expired */

91 92 93
/*
 * The following two 'message types' is really just implementation
 * data conveniently stored in the message header.
P
Per Liden 已提交
94 95 96 97 98
 * They must not be considered part of the protocol
 */
#define OPEN_MSG   0
#define CLOSED_MSG 1

99
/*
P
Per Liden 已提交
100 101 102 103
 * State value stored in 'exp_msg_count'
 */
#define START_CHANGEOVER 100000u

104
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
P
Per Liden 已提交
105
				       struct sk_buff *buf);
106
static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
107
static int  tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
108
				 struct sk_buff **buf);
109 110 111 112
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
113 114
static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115 116
static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
117

P
Per Liden 已提交
118
/*
S
Sam Ravnborg 已提交
119
 *  Simple link routines
P
Per Liden 已提交
120
 */
S
Sam Ravnborg 已提交
121
static unsigned int align(unsigned int i)
P
Per Liden 已提交
122 123 124 125
{
	return (i + 3) & ~3u;
}

126
static void link_init_max_pkt(struct tipc_link *l_ptr)
P
Per Liden 已提交
127
{
128
	struct tipc_bearer *b_ptr;
P
Per Liden 已提交
129
	u32 max_pkt;
130

131 132 133 134 135 136 137 138 139
	rcu_read_lock();
	b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
	if (!b_ptr) {
		rcu_read_unlock();
		return;
	}
	max_pkt = (b_ptr->mtu & ~3);
	rcu_read_unlock();

P
Per Liden 已提交
140 141 142
	if (max_pkt > MAX_MSG_SIZE)
		max_pkt = MAX_MSG_SIZE;

143
	l_ptr->max_pkt_target = max_pkt;
P
Per Liden 已提交
144 145
	if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
		l_ptr->max_pkt = l_ptr->max_pkt_target;
146
	else
P
Per Liden 已提交
147 148
		l_ptr->max_pkt = MAX_PKT_DEFAULT;

149
	l_ptr->max_pkt_probes = 0;
P
Per Liden 已提交
150 151 152
}

/*
S
Sam Ravnborg 已提交
153
 *  Simple non-static link routines (i.e. referenced outside this file)
P
Per Liden 已提交
154
 */
155
int tipc_link_is_up(struct tipc_link *l_ptr)
P
Per Liden 已提交
156 157 158
{
	if (!l_ptr)
		return 0;
E
Eric Dumazet 已提交
159
	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
P
Per Liden 已提交
160 161
}

162
int tipc_link_is_active(struct tipc_link *l_ptr)
P
Per Liden 已提交
163
{
E
Eric Dumazet 已提交
164 165
	return	(l_ptr->owner->active_links[0] == l_ptr) ||
		(l_ptr->owner->active_links[1] == l_ptr);
P
Per Liden 已提交
166 167 168 169 170 171
}

/**
 * link_timeout - handle expiration of link timer
 * @l_ptr: pointer to link
 */
172
static void link_timeout(struct tipc_link *l_ptr)
P
Per Liden 已提交
173
{
174 175
	struct sk_buff *skb;

176
	tipc_node_lock(l_ptr->owner);
P
Per Liden 已提交
177 178

	/* update counters used in statistical profiling of send traffic */
179
	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
P
Per Liden 已提交
180 181
	l_ptr->stats.queue_sz_counts++;

182 183 184
	skb = skb_peek(&l_ptr->outqueue);
	if (skb) {
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
185 186
		u32 length = msg_size(msg);

187 188
		if ((msg_user(msg) == MSG_FRAGMENTER) &&
		    (msg_type(msg) == FIRST_FRAGMENT)) {
P
Per Liden 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
			length = msg_size(msg_get_wrapped(msg));
		}
		if (length) {
			l_ptr->stats.msg_lengths_total += length;
			l_ptr->stats.msg_length_counts++;
			if (length <= 64)
				l_ptr->stats.msg_length_profile[0]++;
			else if (length <= 256)
				l_ptr->stats.msg_length_profile[1]++;
			else if (length <= 1024)
				l_ptr->stats.msg_length_profile[2]++;
			else if (length <= 4096)
				l_ptr->stats.msg_length_profile[3]++;
			else if (length <= 16384)
				l_ptr->stats.msg_length_profile[4]++;
			else if (length <= 32768)
				l_ptr->stats.msg_length_profile[5]++;
			else
				l_ptr->stats.msg_length_profile[6]++;
		}
	}

	/* do all other link processing performed on a periodic basis */
	link_state_event(l_ptr, TIMEOUT_EVT);

	if (l_ptr->next_out)
215
		tipc_link_push_packets(l_ptr);
P
Per Liden 已提交
216

217
	tipc_node_unlock(l_ptr->owner);
P
Per Liden 已提交
218 219
}

220
static void link_set_timer(struct tipc_link *l_ptr, u32 time)
P
Per Liden 已提交
221 222 223 224 225
{
	k_start_timer(&l_ptr->timer, time);
}

/**
226
 * tipc_link_create - create a new link
227
 * @n_ptr: pointer to associated node
P
Per Liden 已提交
228 229
 * @b_ptr: pointer to associated bearer
 * @media_addr: media address to use when sending messages over link
230
 *
P
Per Liden 已提交
231 232
 * Returns pointer to link.
 */
233
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
234 235
				   struct tipc_bearer *b_ptr,
				   const struct tipc_media_addr *media_addr)
P
Per Liden 已提交
236
{
237
	struct tipc_link *l_ptr;
P
Per Liden 已提交
238 239
	struct tipc_msg *msg;
	char *if_name;
240 241 242
	char addr_string[16];
	u32 peer = n_ptr->addr;

243
	if (n_ptr->link_cnt >= MAX_BEARERS) {
244
		tipc_addr_string_fill(addr_string, n_ptr->addr);
245 246
		pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
			n_ptr->link_cnt, addr_string, MAX_BEARERS);
247 248 249 250 251
		return NULL;
	}

	if (n_ptr->links[b_ptr->identity]) {
		tipc_addr_string_fill(addr_string, n_ptr->addr);
252 253
		pr_err("Attempt to establish second link on <%s> to %s\n",
		       b_ptr->name, addr_string);
254 255
		return NULL;
	}
P
Per Liden 已提交
256

257
	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
P
Per Liden 已提交
258
	if (!l_ptr) {
259
		pr_warn("Link creation failed, no memory\n");
P
Per Liden 已提交
260 261 262 263
		return NULL;
	}

	l_ptr->addr = peer;
264
	if_name = strchr(b_ptr->name, ':') + 1;
265
	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
P
Per Liden 已提交
266
		tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
267
		tipc_node(tipc_own_addr),
P
Per Liden 已提交
268 269
		if_name,
		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
270
		/* note: peer i/f name is updated by reset/activate message */
P
Per Liden 已提交
271
	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
272
	l_ptr->owner = n_ptr;
P
Per Liden 已提交
273
	l_ptr->checkpoint = 1;
274
	l_ptr->peer_session = INVALID_SESSION;
275
	l_ptr->bearer_id = b_ptr->identity;
276
	link_set_supervision_props(l_ptr, b_ptr->tolerance);
P
Per Liden 已提交
277 278 279 280
	l_ptr->state = RESET_UNKNOWN;

	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
	msg = l_ptr->pmsg;
281
	tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
P
Per Liden 已提交
282
	msg_set_size(msg, sizeof(l_ptr->proto_msg));
283
	msg_set_session(msg, (tipc_random & 0xffff));
P
Per Liden 已提交
284 285 286 287
	msg_set_bearer_id(msg, b_ptr->identity);
	strcpy((char *)msg_data(msg), if_name);

	l_ptr->priority = b_ptr->priority;
288
	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
P
Per Liden 已提交
289

290
	l_ptr->net_plane = b_ptr->net_plane;
P
Per Liden 已提交
291 292 293
	link_init_max_pkt(l_ptr);

	l_ptr->next_out_no = 1;
294
	__skb_queue_head_init(&l_ptr->outqueue);
295
	__skb_queue_head_init(&l_ptr->deferred_queue);
296
	skb_queue_head_init(&l_ptr->waiting_sks);
P
Per Liden 已提交
297 298 299

	link_reset_statistics(l_ptr);

300
	tipc_node_attach_link(n_ptr, l_ptr);
P
Per Liden 已提交
301

302 303
	k_init_timer(&l_ptr->timer, (Handler)link_timeout,
		     (unsigned long)l_ptr);
304 305

	link_state_event(l_ptr, STARTING_EVT);
P
Per Liden 已提交
306 307 308 309

	return l_ptr;
}

310
void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
311 312
{
	struct tipc_link *l_ptr;
313
	struct tipc_node *n_ptr;
314

315 316
	rcu_read_lock();
	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
317
		tipc_node_lock(n_ptr);
318 319 320
		l_ptr = n_ptr->links[bearer_id];
		if (l_ptr) {
			tipc_link_reset(l_ptr);
321 322 323
			if (shutting_down || !tipc_node_is_up(n_ptr)) {
				tipc_node_detach_link(l_ptr->owner, l_ptr);
				tipc_link_reset_fragments(l_ptr);
324
				tipc_node_unlock(n_ptr);
325 326 327 328 329 330 331

				/* Nobody else can access this link now: */
				del_timer_sync(&l_ptr->timer);
				kfree(l_ptr);
			} else {
				/* Detach/delete when failover is finished: */
				l_ptr->flags |= LINK_STOPPED;
332
				tipc_node_unlock(n_ptr);
333 334
				del_timer_sync(&l_ptr->timer);
			}
335 336
			continue;
		}
337
		tipc_node_unlock(n_ptr);
338
	}
339
	rcu_read_unlock();
340
}
P
Per Liden 已提交
341 342

/**
343 344 345 346 347 348
 * link_schedule_user - schedule user for wakeup after congestion
 * @link: congested link
 * @oport: sending port
 * @chain_sz: size of buffer chain that was attempted sent
 * @imp: importance of message attempted sent
 * Create pseudo msg to send back to user when congestion abates
P
Per Liden 已提交
349
 */
350 351
static bool link_schedule_user(struct tipc_link *link, u32 oport,
			       uint chain_sz, uint imp)
P
Per Liden 已提交
352
{
353 354 355 356 357 358 359 360
	struct sk_buff *buf;

	buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
			      tipc_own_addr, oport, 0, 0);
	if (!buf)
		return false;
	TIPC_SKB_CB(buf)->chain_sz = chain_sz;
	TIPC_SKB_CB(buf)->chain_imp = imp;
361
	skb_queue_tail(&link->waiting_sks, buf);
362 363
	link->stats.link_congs++;
	return true;
P
Per Liden 已提交
364 365
}

366 367 368 369 370 371 372
/**
 * link_prepare_wakeup - prepare users for wakeup after congestion
 * @link: congested link
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to node wait queue for wakeup
 */
static void link_prepare_wakeup(struct tipc_link *link)
P
Per Liden 已提交
373
{
374
	uint pend_qsz = skb_queue_len(&link->outqueue);
375
	struct sk_buff *skb, *tmp;
376

377 378
	skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
		if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
P
Per Liden 已提交
379
			break;
380
		pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
381 382
		skb_unlink(skb, &link->waiting_sks);
		skb_queue_tail(&link->owner->waiting_sks, skb);
P
Per Liden 已提交
383 384 385 386
	}
}

/**
387
 * tipc_link_reset_fragments - purge link's inbound message fragments queue
P
Per Liden 已提交
388 389
 * @l_ptr: pointer to link
 */
390
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
P
Per Liden 已提交
391
{
392 393
	kfree_skb(l_ptr->reasm_buf);
	l_ptr->reasm_buf = NULL;
P
Per Liden 已提交
394 395
}

396
/**
397
 * tipc_link_purge_queues - purge all pkt queues associated with link
P
Per Liden 已提交
398 399
 * @l_ptr: pointer to link
 */
400
void tipc_link_purge_queues(struct tipc_link *l_ptr)
P
Per Liden 已提交
401
{
402
	__skb_queue_purge(&l_ptr->deferred_queue);
403
	__skb_queue_purge(&l_ptr->outqueue);
404
	tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
405 406
}

407
void tipc_link_reset(struct tipc_link *l_ptr)
P
Per Liden 已提交
408 409 410
{
	u32 prev_state = l_ptr->state;
	u32 checkpoint = l_ptr->next_in_no;
411
	int was_active_link = tipc_link_is_active(l_ptr);
412
	struct tipc_node *owner = l_ptr->owner;
413

414
	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
P
Per Liden 已提交
415

416 417
	/* Link is down, accept any session */
	l_ptr->peer_session = INVALID_SESSION;
P
Per Liden 已提交
418

419
	/* Prepare for max packet size negotiation */
P
Per Liden 已提交
420
	link_init_max_pkt(l_ptr);
421

P
Per Liden 已提交
422 423 424 425 426
	l_ptr->state = RESET_UNKNOWN;

	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
		return;

427
	tipc_node_link_down(l_ptr->owner, l_ptr);
428
	tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
429

430
	if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
P
Per Liden 已提交
431 432 433 434 435
		l_ptr->reset_checkpoint = checkpoint;
		l_ptr->exp_msg_count = START_CHANGEOVER;
	}

	/* Clean up all queues: */
436
	__skb_queue_purge(&l_ptr->outqueue);
437
	__skb_queue_purge(&l_ptr->deferred_queue);
438 439 440 441
	if (!skb_queue_empty(&l_ptr->waiting_sks)) {
		skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
		owner->action_flags |= TIPC_WAKEUP_USERS;
	}
P
Per Liden 已提交
442 443 444 445 446 447 448 449 450
	l_ptr->next_out = NULL;
	l_ptr->unacked_window = 0;
	l_ptr->checkpoint = 1;
	l_ptr->next_out_no = 1;
	l_ptr->fsm_msg_cnt = 0;
	l_ptr->stale_count = 0;
	link_reset_statistics(l_ptr);
}

451
void tipc_link_reset_list(unsigned int bearer_id)
452 453
{
	struct tipc_link *l_ptr;
454
	struct tipc_node *n_ptr;
455

456 457
	rcu_read_lock();
	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
458
		tipc_node_lock(n_ptr);
459 460 461
		l_ptr = n_ptr->links[bearer_id];
		if (l_ptr)
			tipc_link_reset(l_ptr);
462
		tipc_node_unlock(n_ptr);
463
	}
464
	rcu_read_unlock();
465
}
P
Per Liden 已提交
466

467
static void link_activate(struct tipc_link *l_ptr)
P
Per Liden 已提交
468
{
469
	l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
470
	tipc_node_link_up(l_ptr->owner, l_ptr);
471
	tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
P
Per Liden 已提交
472 473 474 475 476 477 478
}

/**
 * link_state_event - link finite state machine
 * @l_ptr: pointer to link
 * @event: state machine event to process
 */
479
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
P
Per Liden 已提交
480
{
481
	struct tipc_link *other;
P
Per Liden 已提交
482 483
	u32 cont_intv = l_ptr->continuity_interval;

484 485 486
	if (l_ptr->flags & LINK_STOPPED)
		return;

487
	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
P
Per Liden 已提交
488 489
		return;		/* Not yet. */

490 491
	/* Check whether changeover is going on */
	if (l_ptr->exp_msg_count) {
492
		if (event == TIMEOUT_EVT)
P
Per Liden 已提交
493
			link_set_timer(l_ptr, cont_intv);
494
		return;
P
Per Liden 已提交
495 496 497 498 499 500 501 502 503 504 505
	}

	switch (l_ptr->state) {
	case WORKING_WORKING:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			break;
		case TIMEOUT_EVT:
			if (l_ptr->next_in_no != l_ptr->checkpoint) {
				l_ptr->checkpoint = l_ptr->next_in_no;
506
				if (tipc_bclink_acks_missing(l_ptr->owner)) {
507 508
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     0, 0, 0, 0, 0);
P
Per Liden 已提交
509 510
					l_ptr->fsm_msg_cnt++;
				} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
511 512
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     1, 0, 0, 0, 0);
P
Per Liden 已提交
513 514 515 516 517 518 519
					l_ptr->fsm_msg_cnt++;
				}
				link_set_timer(l_ptr, cont_intv);
				break;
			}
			l_ptr->state = WORKING_UNKNOWN;
			l_ptr->fsm_msg_cnt = 0;
520
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
521 522 523 524
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv / 4);
			break;
		case RESET_MSG:
525 526
			pr_info("%s<%s>, requested by peer\n", link_rst_msg,
				l_ptr->name);
527
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
528 529
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
530 531
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
532 533 534 535
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
536
			pr_err("%s%u in WW state\n", link_unk_evt, event);
P
Per Liden 已提交
537 538 539 540 541 542 543 544 545 546 547
		}
		break;
	case WORKING_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
548 549
			pr_info("%s<%s>, requested by peer while probing\n",
				link_rst_msg, l_ptr->name);
550
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
551 552
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
553 554
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
555 556 557 558 559 560 561 562
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		case TIMEOUT_EVT:
			if (l_ptr->next_in_no != l_ptr->checkpoint) {
				l_ptr->state = WORKING_WORKING;
				l_ptr->fsm_msg_cnt = 0;
				l_ptr->checkpoint = l_ptr->next_in_no;
563
				if (tipc_bclink_acks_missing(l_ptr->owner)) {
564 565
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
							     0, 0, 0, 0, 0);
P
Per Liden 已提交
566 567 568 569
					l_ptr->fsm_msg_cnt++;
				}
				link_set_timer(l_ptr, cont_intv);
			} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
570 571
				tipc_link_proto_xmit(l_ptr, STATE_MSG,
						     1, 0, 0, 0, 0);
P
Per Liden 已提交
572 573 574
				l_ptr->fsm_msg_cnt++;
				link_set_timer(l_ptr, cont_intv / 4);
			} else {	/* Link has failed */
575 576
				pr_warn("%s<%s>, peer not responding\n",
					link_rst_msg, l_ptr->name);
577
				tipc_link_reset(l_ptr);
P
Per Liden 已提交
578 579
				l_ptr->state = RESET_UNKNOWN;
				l_ptr->fsm_msg_cnt = 0;
580 581
				tipc_link_proto_xmit(l_ptr, RESET_MSG,
						     0, 0, 0, 0, 0);
P
Per Liden 已提交
582 583 584 585 586
				l_ptr->fsm_msg_cnt++;
				link_set_timer(l_ptr, cont_intv);
			}
			break;
		default:
587
			pr_err("%s%u in WU state\n", link_unk_evt, event);
P
Per Liden 已提交
588 589 590 591 592 593 594 595
		}
		break;
	case RESET_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
			break;
		case ACTIVATE_MSG:
			other = l_ptr->owner->active_links[0];
596
			if (other && link_working_unknown(other))
P
Per Liden 已提交
597 598 599 600
				break;
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_activate(l_ptr);
601
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
602
			l_ptr->fsm_msg_cnt++;
603
			if (l_ptr->owner->working_links == 1)
604
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
605 606 607 608 609
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
			l_ptr->state = RESET_RESET;
			l_ptr->fsm_msg_cnt = 0;
610 611
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     1, 0, 0, 0, 0);
P
Per Liden 已提交
612 613 614 615
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		case STARTING_EVT:
616
			l_ptr->flags |= LINK_STARTED;
P
Per Liden 已提交
617 618
			/* fall through */
		case TIMEOUT_EVT:
619
			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
P
Per Liden 已提交
620 621 622 623
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
624
			pr_err("%s%u in RU state\n", link_unk_evt, event);
P
Per Liden 已提交
625 626 627 628 629 630 631
		}
		break;
	case RESET_RESET:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			other = l_ptr->owner->active_links[0];
632
			if (other && link_working_unknown(other))
P
Per Liden 已提交
633 634 635 636
				break;
			l_ptr->state = WORKING_WORKING;
			l_ptr->fsm_msg_cnt = 0;
			link_activate(l_ptr);
637
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
P
Per Liden 已提交
638
			l_ptr->fsm_msg_cnt++;
639
			if (l_ptr->owner->working_links == 1)
640
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
641 642 643 644 645
			link_set_timer(l_ptr, cont_intv);
			break;
		case RESET_MSG:
			break;
		case TIMEOUT_EVT:
646 647
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
					     0, 0, 0, 0, 0);
P
Per Liden 已提交
648 649 650 651
			l_ptr->fsm_msg_cnt++;
			link_set_timer(l_ptr, cont_intv);
			break;
		default:
652
			pr_err("%s%u in RR state\n", link_unk_evt, event);
P
Per Liden 已提交
653 654 655
		}
		break;
	default:
656
		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
P
Per Liden 已提交
657 658 659
	}
}

660 661 662 663 664 665 666
/* tipc_link_cong: determine return value and how to treat the
 * sent buffer during link congestion.
 * - For plain, errorless user data messages we keep the buffer and
 *   return -ELINKONG.
 * - For all other messages we discard the buffer and return -EHOSTUNREACH
 * - For TIPC internal messages we also reset the link
 */
667
static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
668
{
669 670
	struct sk_buff *skb = skb_peek(list);
	struct tipc_msg *msg = buf_msg(skb);
671 672 673
	uint imp = tipc_msg_tot_importance(msg);
	u32 oport = msg_tot_origport(msg);

674
	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
675 676
		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
		tipc_link_reset(link);
677
		goto drop;
678
	}
679 680 681 682
	if (unlikely(msg_errcode(msg)))
		goto drop;
	if (unlikely(msg_reroute_cnt(msg)))
		goto drop;
683
	if (TIPC_SKB_CB(skb)->wakeup_pending)
684
		return -ELINKCONG;
685
	if (link_schedule_user(link, oport, skb_queue_len(list), imp))
686 687
		return -ELINKCONG;
drop:
688
	__skb_queue_purge(list);
689 690 691 692
	return -EHOSTUNREACH;
}

/**
693
 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
694
 * @link: link to use
695 696
 * @list: chain of buffers containing message
 *
697 698 699 700 701 702
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
 * user data messages) or -EHOSTUNREACH (all other messages/senders)
 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
 * to act on the return value, since they may need to do more send attempts.
 */
703
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
704
{
705
	struct tipc_msg *msg = buf_msg(skb_peek(list));
706 707 708 709 710 711 712 713
	uint psz = msg_size(msg);
	uint sndlim = link->queue_limit[0];
	uint imp = tipc_msg_tot_importance(msg);
	uint mtu = link->max_pkt;
	uint ack = mod(link->next_in_no - 1);
	uint seqno = link->next_out_no;
	uint bc_last_in = link->owner->bclink.last_in;
	struct tipc_media_addr *addr = &link->media_addr;
714
	struct sk_buff_head *outqueue = &link->outqueue;
715
	struct sk_buff *skb, *tmp;
716 717

	/* Match queue limits against msg importance: */
718
	if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
719
		return tipc_link_cong(link, list);
720 721 722

	/* Has valid packet limit been used ? */
	if (unlikely(psz > mtu)) {
723
		__skb_queue_purge(list);
724 725 726 727
		return -EMSGSIZE;
	}

	/* Prepare each packet for sending, and add to outqueue: */
728 729
	skb_queue_walk_safe(list, skb, tmp) {
		__skb_unlink(skb, list);
730
		msg = buf_msg(skb);
731 732 733
		msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
		msg_set_bcast_ack(msg, bc_last_in);

734 735 736 737 738 739
		if (skb_queue_len(outqueue) < sndlim) {
			__skb_queue_tail(outqueue, skb);
			tipc_bearer_send(link->bearer_id, skb, addr);
			link->next_out = NULL;
			link->unacked_window = 0;
		} else if (tipc_msg_bundle(outqueue, skb, mtu)) {
740 741
			link->stats.sent_bundled++;
			continue;
742 743
		} else if (tipc_msg_make_bundle(outqueue, skb, mtu,
						link->addr)) {
744 745 746
			link->stats.sent_bundled++;
			link->stats.sent_bundles++;
			if (!link->next_out)
747
				link->next_out = skb_peek_tail(outqueue);
748
		} else {
749
			__skb_queue_tail(outqueue, skb);
750
			if (!link->next_out)
751
				link->next_out = skb;
752 753 754 755 756 757 758
		}
		seqno++;
	}
	link->next_out_no = seqno;
	return 0;
}

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
	__skb_queue_head_init(list);
	__skb_queue_tail(list, skb);
}

static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
	return __tipc_link_xmit(link, &head);
}

int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
	return tipc_link_xmit(&head, dnode, selector);
}

781
/**
782
 * tipc_link_xmit() is the general link level function for message sending
783
 * @list: chain of buffers containing message
784 785 786 787 788 789
 * @dsz: amount of user data to be sent
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
790
int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
791 792 793 794 795 796 797 798 799 800
{
	struct tipc_link *link = NULL;
	struct tipc_node *node;
	int rc = -EHOSTUNREACH;

	node = tipc_node_find(dnode);
	if (node) {
		tipc_node_lock(node);
		link = node->active_links[selector & 1];
		if (link)
801
			rc = __tipc_link_xmit(link, list);
802 803 804 805 806 807
		tipc_node_unlock(node);
	}

	if (link)
		return rc;

808 809 810 811 812 813 814 815
	if (likely(in_own_node(dnode))) {
		/* As a node local message chain never contains more than one
		 * buffer, we just need to dequeue one SKB buffer from the
		 * head list.
		 */
		return tipc_sk_rcv(__skb_dequeue(list));
	}
	__skb_queue_purge(list);
816 817 818 819

	return rc;
}

820
/*
821
 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
822 823 824 825 826 827
 *
 * Give a newly added peer node the sequence number where it should
 * start receiving and acking broadcast packets.
 *
 * Called with node locked
 */
828
static void tipc_link_sync_xmit(struct tipc_link *link)
829
{
830
	struct sk_buff *skb;
831 832
	struct tipc_msg *msg;

833 834
	skb = tipc_buf_acquire(INT_H_SIZE);
	if (!skb)
835 836
		return;

837
	msg = buf_msg(skb);
838 839
	tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
	msg_set_last_bcast(msg, link->owner->bclink.acked);
840
	__tipc_link_xmit_skb(link, skb);
841 842 843
}

/*
844
 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
845 846 847 848 849 850
 * Receive the sequence number where we should start receiving and
 * acking broadcast packets from a newly added peer node, and open
 * up for reception of such packets.
 *
 * Called with node locked
 */
851
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
852 853 854 855 856 857 858 859
{
	struct tipc_msg *msg = buf_msg(buf);

	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
	n->bclink.recv_permitted = true;
	kfree_skb(buf);
}

860 861 862 863 864 865 866 867
struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
				    const struct sk_buff *skb)
{
	if (skb_queue_is_last(list, skb))
		return NULL;
	return skb->next;
}

868
/*
869 870 871 872 873 874
 * tipc_link_push_packets - push unsent packets to bearer
 *
 * Push out the unsent messages of a link where congestion
 * has abated. Node is locked.
 *
 * Called with node locked
P
Per Liden 已提交
875
 */
876
void tipc_link_push_packets(struct tipc_link *l_ptr)
P
Per Liden 已提交
877
{
878 879
	struct sk_buff_head *outqueue = &l_ptr->outqueue;
	struct sk_buff *skb = l_ptr->next_out;
880 881
	struct tipc_msg *msg;
	u32 next, first;
P
Per Liden 已提交
882

883
	skb_queue_walk_from(outqueue, skb) {
884 885
		msg = buf_msg(skb);
		next = msg_seqno(msg);
886
		first = buf_seqno(skb_peek(outqueue));
P
Per Liden 已提交
887 888 889

		if (mod(next - first) < l_ptr->queue_limit[0]) {
			msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
890
			msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
891
			if (msg_user(msg) == MSG_BUNDLER)
892
				TIPC_SKB_CB(skb)->bundling = false;
893 894
			tipc_bearer_send(l_ptr->bearer_id, skb,
					 &l_ptr->media_addr);
895
			l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
896 897
		} else {
			break;
P
Per Liden 已提交
898 899 900 901
		}
	}
}

902
void tipc_link_reset_all(struct tipc_node *node)
903 904 905 906
{
	char addr_string[16];
	u32 i;

907
	tipc_node_lock(node);
908

909
	pr_warn("Resetting all links to %s\n",
910
		tipc_addr_string_fill(addr_string, node->addr));
911 912

	for (i = 0; i < MAX_BEARERS; i++) {
913 914 915
		if (node->links[i]) {
			link_print(node->links[i], "Resetting link\n");
			tipc_link_reset(node->links[i]);
916 917 918
		}
	}

919
	tipc_node_unlock(node);
920 921
}

922
static void link_retransmit_failure(struct tipc_link *l_ptr,
923
				    struct sk_buff *buf)
924 925 926
{
	struct tipc_msg *msg = buf_msg(buf);

927
	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
928 929 930

	if (l_ptr->addr) {
		/* Handle failure on standard link */
931
		link_print(l_ptr, "Resetting link\n");
932 933 934 935
		tipc_link_reset(l_ptr);

	} else {
		/* Handle failure on broadcast link */
936
		struct tipc_node *n_ptr;
937 938
		char addr_string[16];

939 940 941
		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
		pr_cont("Outstanding acks: %lu\n",
			(unsigned long) TIPC_SKB_CB(buf)->handle);
J
Jeff Garzik 已提交
942

943
		n_ptr = tipc_bclink_retransmit_to();
944 945
		tipc_node_lock(n_ptr);

946
		tipc_addr_string_fill(addr_string, n_ptr->addr);
947
		pr_info("Broadcast link info for %s\n", addr_string);
948 949
		pr_info("Reception permitted: %d,  Acked: %u\n",
			n_ptr->bclink.recv_permitted,
950 951 952 953 954
			n_ptr->bclink.acked);
		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
			n_ptr->bclink.last_in,
			n_ptr->bclink.oos_state,
			n_ptr->bclink.last_sent);
955 956 957

		tipc_node_unlock(n_ptr);

958
		tipc_bclink_set_flags(TIPC_BCLINK_RESET);
959 960 961 962
		l_ptr->stale_count = 0;
	}
}

963
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
964
			  u32 retransmits)
P
Per Liden 已提交
965 966 967
{
	struct tipc_msg *msg;

968
	if (!skb)
969 970
		return;

971
	msg = buf_msg(skb);
972

973 974 975
	/* Detect repeated retransmit failures */
	if (l_ptr->last_retransmitted == msg_seqno(msg)) {
		if (++l_ptr->stale_count > 100) {
976
			link_retransmit_failure(l_ptr, skb);
977
			return;
978 979
		}
	} else {
980 981
		l_ptr->last_retransmitted = msg_seqno(msg);
		l_ptr->stale_count = 1;
P
Per Liden 已提交
982
	}
983

984 985 986 987
	skb_queue_walk_from(&l_ptr->outqueue, skb) {
		if (!retransmits || skb == l_ptr->next_out)
			break;
		msg = buf_msg(skb);
P
Per Liden 已提交
988
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
989
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
990
		tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
991 992
		retransmits--;
		l_ptr->stats.retransmitted++;
P
Per Liden 已提交
993 994 995
	}
}

996 997
static void link_retrieve_defq(struct tipc_link *link,
			       struct sk_buff_head *list)
P
Per Liden 已提交
998 999 1000
{
	u32 seq_no;

1001 1002 1003 1004 1005 1006
	if (skb_queue_empty(&link->deferred_queue))
		return;

	seq_no = buf_seqno(skb_peek(&link->deferred_queue));
	if (seq_no == mod(link->next_in_no))
		skb_queue_splice_tail_init(&link->deferred_queue, list);
P
Per Liden 已提交
1007 1008
}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
/**
 * link_recv_buf_validate - validate basic format of received message
 *
 * This routine ensures a TIPC message has an acceptable header, and at least
 * as much data as the header indicates it should.  The routine also ensures
 * that the entire message header is stored in the main fragment of the message
 * buffer, to simplify future access to message header fields.
 *
 * Note: Having extra info present in the message header or data areas is OK.
 * TIPC will ignore the excess, under the assumption that it is optional info
 * introduced by a later release of the protocol.
 */
static int link_recv_buf_validate(struct sk_buff *buf)
{
	static u32 min_data_hdr_size[8] = {
1024
		SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1025 1026 1027 1028 1029 1030 1031 1032 1033
		MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
		};

	struct tipc_msg *msg;
	u32 tipc_hdr[2];
	u32 size;
	u32 hdr_size;
	u32 min_hdr_size;

1034 1035 1036 1037 1038 1039
	/* If this packet comes from the defer queue, the skb has already
	 * been validated
	 */
	if (unlikely(TIPC_SKB_CB(buf)->deferred))
		return 1;

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	if (unlikely(buf->len < MIN_H_SIZE))
		return 0;

	msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
	if (msg == NULL)
		return 0;

	if (unlikely(msg_version(msg) != TIPC_VERSION))
		return 0;

	size = msg_size(msg);
	hdr_size = msg_hdr_sz(msg);
	min_hdr_size = msg_isdata(msg) ?
		min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;

	if (unlikely((hdr_size < min_hdr_size) ||
		     (size < hdr_size) ||
		     (buf->len < size) ||
		     (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
		return 0;

	return pskb_may_pull(buf, hdr_size);
}

1064
/**
1065
 * tipc_rcv - process TIPC packets/messages arriving from off-node
1066
 * @skb: TIPC packet
1067
 * @b_ptr: pointer to bearer message arrived on
1068 1069 1070 1071
 *
 * Invoked with no locks held.  Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
1072
void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
P
Per Liden 已提交
1073
{
1074 1075 1076 1077 1078 1079 1080 1081
	struct sk_buff_head head;
	struct tipc_node *n_ptr;
	struct tipc_link *l_ptr;
	struct sk_buff *skb1, *tmp;
	struct tipc_msg *msg;
	u32 seq_no;
	u32 ackd;
	u32 released;
P
Per Liden 已提交
1082

1083
	skb2list(skb, &head);
1084

1085
	while ((skb = __skb_dequeue(&head))) {
1086
		/* Ensure message is well-formed */
1087
		if (unlikely(!link_recv_buf_validate(skb)))
1088
			goto discard;
P
Per Liden 已提交
1089

1090
		/* Ensure message data is a single contiguous unit */
1091
		if (unlikely(skb_linearize(skb)))
1092
			goto discard;
1093

1094
		/* Handle arrival of a non-unicast link message */
1095
		msg = buf_msg(skb);
1096

P
Per Liden 已提交
1097
		if (unlikely(msg_non_seq(msg))) {
1098
			if (msg_user(msg) ==  LINK_CONFIG)
1099
				tipc_disc_rcv(skb, b_ptr);
1100
			else
1101
				tipc_bclink_rcv(skb);
P
Per Liden 已提交
1102 1103
			continue;
		}
1104

1105
		/* Discard unicast link messages destined for another node */
1106 1107
		if (unlikely(!msg_short(msg) &&
			     (msg_destnode(msg) != tipc_own_addr)))
1108
			goto discard;
1109

1110
		/* Locate neighboring node that sent message */
1111
		n_ptr = tipc_node_find(msg_prevnode(msg));
P
Per Liden 已提交
1112
		if (unlikely(!n_ptr))
1113
			goto discard;
1114
		tipc_node_lock(n_ptr);
1115

1116 1117
		/* Locate unicast link endpoint that should handle message */
		l_ptr = n_ptr->links[b_ptr->identity];
1118 1119
		if (unlikely(!l_ptr))
			goto unlock_discard;
1120

1121
		/* Verify that communication with node is currently allowed */
Y
Ying Xue 已提交
1122
		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1123 1124 1125 1126
		    msg_user(msg) == LINK_PROTOCOL &&
		    (msg_type(msg) == RESET_MSG ||
		    msg_type(msg) == ACTIVATE_MSG) &&
		    !msg_redundant_link(msg))
Y
Ying Xue 已提交
1127
			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1128 1129

		if (tipc_node_blocked(n_ptr))
1130
			goto unlock_discard;
1131 1132 1133 1134 1135 1136

		/* Validate message sequence number info */
		seq_no = msg_seqno(msg);
		ackd = msg_ack(msg);

		/* Release acked messages */
1137
		if (n_ptr->bclink.recv_permitted)
1138
			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
P
Per Liden 已提交
1139

1140 1141 1142 1143 1144 1145 1146 1147
		released = 0;
		skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
			if (skb1 == l_ptr->next_out ||
			    more(buf_seqno(skb1), ackd))
				break;
			 __skb_unlink(skb1, &l_ptr->outqueue);
			 kfree_skb(skb1);
			 released = 1;
P
Per Liden 已提交
1148
		}
1149 1150

		/* Try sending any messages link endpoint has pending */
P
Per Liden 已提交
1151
		if (unlikely(l_ptr->next_out))
1152
			tipc_link_push_packets(l_ptr);
1153

1154 1155 1156 1157
		if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
			link_prepare_wakeup(l_ptr);
			l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
		}
1158 1159

		/* Process the incoming packet */
1160 1161
		if (unlikely(!link_working_working(l_ptr))) {
			if (msg_user(msg) == LINK_PROTOCOL) {
1162 1163
				tipc_link_proto_rcv(l_ptr, skb);
				link_retrieve_defq(l_ptr, &head);
1164
				tipc_node_unlock(n_ptr);
P
Per Liden 已提交
1165 1166
				continue;
			}
1167 1168 1169 1170 1171 1172

			/* Traffic message. Conditionally activate link */
			link_state_event(l_ptr, TRAFFIC_MSG_EVT);

			if (link_working_working(l_ptr)) {
				/* Re-insert buffer in front of queue */
1173
				__skb_queue_head(&head, skb);
1174 1175 1176 1177 1178 1179 1180 1181
				tipc_node_unlock(n_ptr);
				continue;
			}
			goto unlock_discard;
		}

		/* Link is now in state WORKING_WORKING */
		if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1182 1183
			link_handle_out_of_seq_msg(l_ptr, skb);
			link_retrieve_defq(l_ptr, &head);
1184
			tipc_node_unlock(n_ptr);
P
Per Liden 已提交
1185 1186
			continue;
		}
1187
		l_ptr->next_in_no++;
1188
		if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
1189
			link_retrieve_defq(l_ptr, &head);
1190

1191 1192 1193 1194 1195
		if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
			l_ptr->stats.sent_acks++;
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
		}

1196
		if (tipc_link_prepare_input(l_ptr, &skb)) {
1197 1198
			tipc_node_unlock(n_ptr);
			continue;
P
Per Liden 已提交
1199
		}
1200
		tipc_node_unlock(n_ptr);
1201 1202

		if (tipc_link_input(l_ptr, skb) != 0)
1203
			goto discard;
1204 1205 1206 1207
		continue;
unlock_discard:
		tipc_node_unlock(n_ptr);
discard:
1208
		kfree_skb(skb);
P
Per Liden 已提交
1209 1210 1211
	}
}

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
/**
 * tipc_link_prepare_input - process TIPC link messages
 *
 * returns nonzero if the message was consumed
 *
 * Node lock must be held
 */
static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
{
	struct tipc_node *n;
	struct tipc_msg *msg;
	int res = -EINVAL;

	n = l->owner;
	msg = buf_msg(*buf);
	switch (msg_user(msg)) {
	case CHANGEOVER_PROTOCOL:
		if (tipc_link_tunnel_rcv(n, buf))
			res = 0;
		break;
	case MSG_FRAGMENTER:
		l->stats.recv_fragments++;
		if (tipc_buf_append(&l->reasm_buf, buf)) {
			l->stats.recv_fragmented++;
			res = 0;
		} else if (!l->reasm_buf) {
			tipc_link_reset(l);
		}
		break;
	case MSG_BUNDLER:
		l->stats.recv_bundles++;
		l->stats.recv_bundled += msg_msgcnt(msg);
		res = 0;
		break;
	case NAME_DISTRIBUTOR:
		n->bclink.recv_permitted = true;
		res = 0;
		break;
	case BCAST_PROTOCOL:
		tipc_link_sync_rcv(n, *buf);
		break;
	default:
		res = 0;
	}
	return res;
}
/**
 * tipc_link_input - Deliver message too higher layers
 */
static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	int res = 0;

	switch (msg_user(msg)) {
	case TIPC_LOW_IMPORTANCE:
	case TIPC_MEDIUM_IMPORTANCE:
	case TIPC_HIGH_IMPORTANCE:
	case TIPC_CRITICAL_IMPORTANCE:
	case CONN_MANAGER:
		tipc_sk_rcv(buf);
		break;
	case NAME_DISTRIBUTOR:
		tipc_named_rcv(buf);
		break;
	case MSG_BUNDLER:
		tipc_link_bundle_rcv(buf);
		break;
	default:
		res = -EINVAL;
	}
	return res;
}

1286
/**
1287 1288 1289
 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
 *
 * Returns increase in queue length (i.e. 0 or 1)
P
Per Liden 已提交
1290
 */
1291
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
P
Per Liden 已提交
1292
{
1293 1294
	struct sk_buff *skb1;
	u32 seq_no = buf_seqno(skb);
P
Per Liden 已提交
1295 1296

	/* Empty queue ? */
1297 1298
	if (skb_queue_empty(list)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1299 1300 1301 1302
		return 1;
	}

	/* Last ? */
1303 1304
	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1305 1306 1307
		return 1;
	}

1308
	/* Locate insertion point in queue, then insert; discard if duplicate */
1309 1310
	skb_queue_walk(list, skb1) {
		u32 curr_seqno = buf_seqno(skb1);
P
Per Liden 已提交
1311

1312
		if (seq_no == curr_seqno) {
1313
			kfree_skb(skb);
1314
			return 0;
P
Per Liden 已提交
1315
		}
1316 1317

		if (less(seq_no, curr_seqno))
P
Per Liden 已提交
1318
			break;
1319
	}
P
Per Liden 已提交
1320

1321
	__skb_queue_before(list, skb1, skb);
1322
	return 1;
P
Per Liden 已提交
1323 1324
}

1325
/*
P
Per Liden 已提交
1326 1327
 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
 */
1328
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
P
Per Liden 已提交
1329 1330
				       struct sk_buff *buf)
{
1331
	u32 seq_no = buf_seqno(buf);
P
Per Liden 已提交
1332 1333

	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1334
		tipc_link_proto_rcv(l_ptr, buf);
P
Per Liden 已提交
1335 1336 1337 1338 1339 1340
		return;
	}

	/* Record OOS packet arrival (force mismatch on next timeout) */
	l_ptr->checkpoint--;

1341
	/*
P
Per Liden 已提交
1342 1343 1344 1345 1346
	 * Discard packet if a duplicate; otherwise add it to deferred queue
	 * and notify peer of gap as per protocol specification
	 */
	if (less(seq_no, mod(l_ptr->next_in_no))) {
		l_ptr->stats.duplicates++;
1347
		kfree_skb(buf);
P
Per Liden 已提交
1348 1349 1350
		return;
	}

1351
	if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
P
Per Liden 已提交
1352
		l_ptr->stats.deferred_recv++;
1353
		TIPC_SKB_CB(buf)->deferred = true;
1354
		if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
1355
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1356
	} else {
P
Per Liden 已提交
1357
		l_ptr->stats.duplicates++;
1358
	}
P
Per Liden 已提交
1359 1360 1361 1362 1363
}

/*
 * Send protocol message to the other endpoint.
 */
1364 1365
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
			  u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
P
Per Liden 已提交
1366
{
1367
	struct sk_buff *buf = NULL;
P
Per Liden 已提交
1368
	struct tipc_msg *msg = l_ptr->pmsg;
1369
	u32 msg_size = sizeof(l_ptr->proto_msg);
1370
	int r_flag;
P
Per Liden 已提交
1371

1372 1373
	/* Don't send protocol message during link changeover */
	if (l_ptr->exp_msg_count)
P
Per Liden 已提交
1374
		return;
1375 1376

	/* Abort non-RESET send if communication with node is prohibited */
1377
	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1378 1379
		return;

1380
	/* Create protocol message with "out-of-sequence" sequence number */
P
Per Liden 已提交
1381
	msg_set_type(msg, msg_typ);
1382
	msg_set_net_plane(msg, l_ptr->net_plane);
1383
	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1384
	msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
P
Per Liden 已提交
1385 1386 1387 1388

	if (msg_typ == STATE_MSG) {
		u32 next_sent = mod(l_ptr->next_out_no);

1389
		if (!tipc_link_is_up(l_ptr))
P
Per Liden 已提交
1390 1391
			return;
		if (l_ptr->next_out)
1392
			next_sent = buf_seqno(l_ptr->next_out);
P
Per Liden 已提交
1393
		msg_set_next_sent(msg, next_sent);
1394 1395
		if (!skb_queue_empty(&l_ptr->deferred_queue)) {
			u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
P
Per Liden 已提交
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
			gap = mod(rec - mod(l_ptr->next_in_no));
		}
		msg_set_seq_gap(msg, gap);
		if (gap)
			l_ptr->stats.sent_nacks++;
		msg_set_link_tolerance(msg, tolerance);
		msg_set_linkprio(msg, priority);
		msg_set_max_pkt(msg, ack_mtu);
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
		msg_set_probe(msg, probe_msg != 0);
1406
		if (probe_msg) {
P
Per Liden 已提交
1407 1408
			u32 mtu = l_ptr->max_pkt;

1409
			if ((mtu < l_ptr->max_pkt_target) &&
P
Per Liden 已提交
1410 1411 1412
			    link_working_working(l_ptr) &&
			    l_ptr->fsm_msg_cnt) {
				msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1413 1414 1415
				if (l_ptr->max_pkt_probes == 10) {
					l_ptr->max_pkt_target = (msg_size - 4);
					l_ptr->max_pkt_probes = 0;
P
Per Liden 已提交
1416
					msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1417
				}
P
Per Liden 已提交
1418
				l_ptr->max_pkt_probes++;
1419
			}
P
Per Liden 已提交
1420 1421

			l_ptr->stats.sent_probes++;
1422
		}
P
Per Liden 已提交
1423 1424 1425 1426 1427
		l_ptr->stats.sent_states++;
	} else {		/* RESET_MSG or ACTIVATE_MSG */
		msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
		msg_set_seq_gap(msg, 0);
		msg_set_next_sent(msg, 1);
1428
		msg_set_probe(msg, 0);
P
Per Liden 已提交
1429 1430 1431 1432 1433
		msg_set_link_tolerance(msg, l_ptr->tolerance);
		msg_set_linkprio(msg, l_ptr->priority);
		msg_set_max_pkt(msg, l_ptr->max_pkt_target);
	}

1434 1435
	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
	msg_set_redundant_link(msg, r_flag);
P
Per Liden 已提交
1436
	msg_set_linkprio(msg, l_ptr->priority);
1437
	msg_set_size(msg, msg_size);
P
Per Liden 已提交
1438 1439 1440

	msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));

1441
	buf = tipc_buf_acquire(msg_size);
P
Per Liden 已提交
1442 1443 1444
	if (!buf)
		return;

1445
	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1446
	buf->priority = TC_PRIO_CONTROL;
P
Per Liden 已提交
1447

1448
	tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1449
	l_ptr->unacked_window = 0;
1450
	kfree_skb(buf);
P
Per Liden 已提交
1451 1452 1453 1454
}

/*
 * Receive protocol message :
1455 1456
 * Note that network plane id propagates through the network, and may
 * change at any time. The node with lowest address rules
P
Per Liden 已提交
1457
 */
1458
static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
P
Per Liden 已提交
1459 1460 1461
{
	u32 rec_gap = 0;
	u32 max_pkt_info;
1462
	u32 max_pkt_ack;
P
Per Liden 已提交
1463 1464 1465
	u32 msg_tol;
	struct tipc_msg *msg = buf_msg(buf);

1466 1467
	/* Discard protocol message during link changeover */
	if (l_ptr->exp_msg_count)
P
Per Liden 已提交
1468 1469
		goto exit;

1470
	if (l_ptr->net_plane != msg_net_plane(msg))
P
Per Liden 已提交
1471
		if (tipc_own_addr > msg_prevnode(msg))
1472
			l_ptr->net_plane = msg_net_plane(msg);
P
Per Liden 已提交
1473 1474

	switch (msg_type(msg)) {
1475

P
Per Liden 已提交
1476
	case RESET_MSG:
1477 1478
		if (!link_working_unknown(l_ptr) &&
		    (l_ptr->peer_session != INVALID_SESSION)) {
1479 1480
			if (less_eq(msg_session(msg), l_ptr->peer_session))
				break; /* duplicate or old reset: ignore */
P
Per Liden 已提交
1481
		}
1482 1483 1484 1485 1486 1487 1488

		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
				link_working_unknown(l_ptr))) {
			/*
			 * peer has lost contact -- don't allow peer's links
			 * to reactivate before we recognize loss & clean up
			 */
1489
			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1490 1491
		}

1492 1493
		link_state_event(l_ptr, RESET_MSG);

P
Per Liden 已提交
1494 1495 1496 1497 1498
		/* fall thru' */
	case ACTIVATE_MSG:
		/* Update link settings according other endpoint's values */
		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));

1499 1500
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol > l_ptr->tolerance)
P
Per Liden 已提交
1501 1502 1503 1504 1505 1506
			link_set_supervision_props(l_ptr, msg_tol);

		if (msg_linkprio(msg) > l_ptr->priority)
			l_ptr->priority = msg_linkprio(msg);

		max_pkt_info = msg_max_pkt(msg);
1507
		if (max_pkt_info) {
P
Per Liden 已提交
1508 1509 1510 1511 1512
			if (max_pkt_info < l_ptr->max_pkt_target)
				l_ptr->max_pkt_target = max_pkt_info;
			if (l_ptr->max_pkt > l_ptr->max_pkt_target)
				l_ptr->max_pkt = l_ptr->max_pkt_target;
		} else {
1513
			l_ptr->max_pkt = l_ptr->max_pkt_target;
P
Per Liden 已提交
1514 1515
		}

1516
		/* Synchronize broadcast link info, if not done previously */
1517 1518 1519 1520 1521 1522
		if (!tipc_node_is_up(l_ptr->owner)) {
			l_ptr->owner->bclink.last_sent =
				l_ptr->owner->bclink.last_in =
				msg_last_bcast(msg);
			l_ptr->owner->bclink.oos_state = 0;
		}
1523

P
Per Liden 已提交
1524 1525
		l_ptr->peer_session = msg_session(msg);
		l_ptr->peer_bearer_id = msg_bearer_id(msg);
1526 1527 1528

		if (msg_type(msg) == ACTIVATE_MSG)
			link_state_event(l_ptr, ACTIVATE_MSG);
P
Per Liden 已提交
1529 1530 1531
		break;
	case STATE_MSG:

1532 1533
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol)
P
Per Liden 已提交
1534
			link_set_supervision_props(l_ptr, msg_tol);
1535 1536

		if (msg_linkprio(msg) &&
P
Per Liden 已提交
1537
		    (msg_linkprio(msg) != l_ptr->priority)) {
1538 1539 1540
			pr_warn("%s<%s>, priority change %u->%u\n",
				link_rst_msg, l_ptr->name, l_ptr->priority,
				msg_linkprio(msg));
P
Per Liden 已提交
1541
			l_ptr->priority = msg_linkprio(msg);
1542
			tipc_link_reset(l_ptr); /* Enforce change to take effect */
P
Per Liden 已提交
1543 1544
			break;
		}
1545 1546 1547 1548

		/* Record reception; force mismatch at next timeout: */
		l_ptr->checkpoint--;

P
Per Liden 已提交
1549 1550 1551 1552 1553 1554
		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
		l_ptr->stats.recv_states++;
		if (link_reset_unknown(l_ptr))
			break;

		if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1555
			rec_gap = mod(msg_next_sent(msg) -
P
Per Liden 已提交
1556 1557 1558 1559
				      mod(l_ptr->next_in_no));
		}

		max_pkt_ack = msg_max_pkt(msg);
1560 1561 1562 1563
		if (max_pkt_ack > l_ptr->max_pkt) {
			l_ptr->max_pkt = max_pkt_ack;
			l_ptr->max_pkt_probes = 0;
		}
P
Per Liden 已提交
1564 1565

		max_pkt_ack = 0;
1566
		if (msg_probe(msg)) {
P
Per Liden 已提交
1567
			l_ptr->stats.recv_probes++;
1568
			if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1569 1570
				max_pkt_ack = msg_size(msg);
		}
P
Per Liden 已提交
1571 1572

		/* Protocol message before retransmits, reduce loss risk */
1573
		if (l_ptr->owner->bclink.recv_permitted)
1574 1575
			tipc_bclink_update_link_state(l_ptr->owner,
						      msg_last_bcast(msg));
P
Per Liden 已提交
1576 1577

		if (rec_gap || (msg_probe(msg))) {
1578 1579
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
					     0, max_pkt_ack);
P
Per Liden 已提交
1580 1581 1582
		}
		if (msg_seq_gap(msg)) {
			l_ptr->stats.recv_nacks++;
1583
			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
1584
					     msg_seq_gap(msg));
P
Per Liden 已提交
1585 1586 1587 1588
		}
		break;
	}
exit:
1589
	kfree_skb(buf);
P
Per Liden 已提交
1590 1591 1592
}


1593 1594
/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 * a different bearer. Owner node is locked.
P
Per Liden 已提交
1595
 */
1596 1597 1598 1599
static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
				  struct tipc_msg *tunnel_hdr,
				  struct tipc_msg *msg,
				  u32 selector)
P
Per Liden 已提交
1600
{
1601
	struct tipc_link *tunnel;
1602
	struct sk_buff *skb;
P
Per Liden 已提交
1603 1604 1605
	u32 length = msg_size(msg);

	tunnel = l_ptr->owner->active_links[selector & 1];
1606
	if (!tipc_link_is_up(tunnel)) {
1607
		pr_warn("%stunnel link no longer available\n", link_co_err);
P
Per Liden 已提交
1608
		return;
1609
	}
P
Per Liden 已提交
1610
	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1611 1612
	skb = tipc_buf_acquire(length + INT_H_SIZE);
	if (!skb) {
1613
		pr_warn("%sunable to send tunnel msg\n", link_co_err);
P
Per Liden 已提交
1614
		return;
1615
	}
1616 1617 1618
	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
	__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1619 1620 1621
}


1622 1623 1624 1625 1626
/* tipc_link_failover_send_queue(): A link has gone down, but a second
 * link is still active. We can do failover. Tunnel the failing link's
 * whole send queue via the remaining link. This way, we don't lose
 * any packets, and sequence order is preserved for subsequent traffic
 * sent over the remaining link. Owner node is locked.
P
Per Liden 已提交
1627
 */
1628
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
P
Per Liden 已提交
1629
{
1630
	u32 msgcount = skb_queue_len(&l_ptr->outqueue);
1631
	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
P
Per Liden 已提交
1632
	struct tipc_msg tunnel_hdr;
1633
	struct sk_buff *skb;
1634
	int split_bundles;
P
Per Liden 已提交
1635 1636 1637 1638

	if (!tunnel)
		return;

1639
	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1640
		 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
P
Per Liden 已提交
1641 1642
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
	msg_set_msgcnt(&tunnel_hdr, msgcount);
1643

1644 1645 1646 1647
	if (skb_queue_empty(&l_ptr->outqueue)) {
		skb = tipc_buf_acquire(INT_H_SIZE);
		if (skb) {
			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
P
Per Liden 已提交
1648
			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1649
			__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1650
		} else {
1651 1652
			pr_warn("%sunable to send changeover msg\n",
				link_co_err);
P
Per Liden 已提交
1653 1654 1655
		}
		return;
	}
1656

1657
	split_bundles = (l_ptr->owner->active_links[0] !=
1658 1659
			 l_ptr->owner->active_links[1]);

1660 1661
	skb_queue_walk(&l_ptr->outqueue, skb) {
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1662 1663 1664

		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
			struct tipc_msg *m = msg_get_wrapped(msg);
1665
			unchar *pos = (unchar *)m;
P
Per Liden 已提交
1666

1667
			msgcount = msg_msgcnt(msg);
P
Per Liden 已提交
1668
			while (msgcount--) {
1669
				msg_set_seqno(m, msg_seqno(msg));
1670 1671
				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
						      msg_link_selector(m));
P
Per Liden 已提交
1672 1673 1674 1675
				pos += align(msg_size(m));
				m = (struct tipc_msg *)pos;
			}
		} else {
1676 1677
			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
					      msg_link_selector(msg));
P
Per Liden 已提交
1678 1679 1680 1681
		}
	}
}

1682
/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1683 1684 1685 1686 1687 1688 1689 1690
 * duplicate of the first link's send queue via the new link. This way, we
 * are guaranteed that currently queued packets from a socket are delivered
 * before future traffic from the same socket, even if this is using the
 * new link. The last arriving copy of each duplicate packet is dropped at
 * the receiving end by the regular protocol check, so packet cardinality
 * and sequence order is preserved per sender/receiver socket pair.
 * Owner node is locked.
 */
1691
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1692
			      struct tipc_link *tunnel)
P
Per Liden 已提交
1693
{
1694
	struct sk_buff *skb;
P
Per Liden 已提交
1695 1696
	struct tipc_msg tunnel_hdr;

1697
	tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1698
		 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1699
	msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
P
Per Liden 已提交
1700
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1701 1702 1703
	skb_queue_walk(&l_ptr->outqueue, skb) {
		struct sk_buff *outskb;
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1704 1705 1706 1707 1708
		u32 length = msg_size(msg);

		if (msg_user(msg) == MSG_BUNDLER)
			msg_set_type(msg, CLOSED_MSG);
		msg_set_ack(msg, mod(l_ptr->next_in_no - 1));	/* Update */
1709
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
P
Per Liden 已提交
1710
		msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1711 1712
		outskb = tipc_buf_acquire(length + INT_H_SIZE);
		if (outskb == NULL) {
1713 1714
			pr_warn("%sunable to send duplicate msg\n",
				link_co_err);
P
Per Liden 已提交
1715 1716
			return;
		}
1717 1718
		skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
1719
					       length);
1720
		__tipc_link_xmit_skb(tunnel, outskb);
1721
		if (!tipc_link_is_up(l_ptr))
P
Per Liden 已提交
1722 1723 1724 1725 1726 1727 1728 1729 1730
			return;
	}
}

/**
 * buf_extract - extracts embedded TIPC message from another message
 * @skb: encapsulating message buffer
 * @from_pos: offset to extract from
 *
1731
 * Returns a new message buffer containing an embedded message.  The
P
Per Liden 已提交
1732 1733 1734 1735 1736 1737 1738 1739
 * encapsulating message itself is left unchanged.
 */
static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
{
	struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
	u32 size = msg_size(msg);
	struct sk_buff *eb;

1740
	eb = tipc_buf_acquire(size);
P
Per Liden 已提交
1741
	if (eb)
1742
		skb_copy_to_linear_data(eb, msg, size);
P
Per Liden 已提交
1743 1744 1745
	return eb;
}

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768


/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
 * Owner node is locked.
 */
static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
			      struct sk_buff *t_buf)
{
	struct sk_buff *buf;

	if (!tipc_link_is_up(l_ptr))
		return;

	buf = buf_extract(t_buf, INT_H_SIZE);
	if (buf == NULL) {
		pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
		return;
	}

	/* Add buffer to deferred queue, if applicable: */
	link_handle_out_of_seq_msg(l_ptr, buf);
}

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
/*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
 *  Owner node is locked.
 */
static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
					      struct sk_buff *t_buf)
{
	struct tipc_msg *t_msg = buf_msg(t_buf);
	struct sk_buff *buf = NULL;
	struct tipc_msg *msg;

	if (tipc_link_is_up(l_ptr))
		tipc_link_reset(l_ptr);

	/* First failover packet? */
	if (l_ptr->exp_msg_count == START_CHANGEOVER)
		l_ptr->exp_msg_count = msg_msgcnt(t_msg);

	/* Should there be an inner packet? */
	if (l_ptr->exp_msg_count) {
		l_ptr->exp_msg_count--;
		buf = buf_extract(t_buf, INT_H_SIZE);
		if (buf == NULL) {
			pr_warn("%sno inner failover pkt\n", link_co_err);
			goto exit;
		}
		msg = buf_msg(buf);

		if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
			kfree_skb(buf);
			buf = NULL;
			goto exit;
		}
		if (msg_user(msg) == MSG_FRAGMENTER) {
			l_ptr->stats.recv_fragments++;
1803
			tipc_buf_append(&l_ptr->reasm_buf, &buf);
1804 1805 1806
		}
	}
exit:
1807 1808 1809 1810
	if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
		tipc_node_detach_link(l_ptr->owner, l_ptr);
		kfree(l_ptr);
	}
1811 1812 1813
	return buf;
}

1814
/*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1815 1816 1817 1818
 *  via other link as result of a failover (ORIGINAL_MSG) or
 *  a new active link (DUPLICATE_MSG). Failover packets are
 *  returned to the active link for delivery upwards.
 *  Owner node is locked.
P
Per Liden 已提交
1819
 */
1820
static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1821
				struct sk_buff **buf)
P
Per Liden 已提交
1822
{
1823 1824 1825 1826
	struct sk_buff *t_buf = *buf;
	struct tipc_link *l_ptr;
	struct tipc_msg *t_msg = buf_msg(t_buf);
	u32 bearer_id = msg_bearer_id(t_msg);
P
Per Liden 已提交
1827

1828 1829
	*buf = NULL;

1830 1831
	if (bearer_id >= MAX_BEARERS)
		goto exit;
1832

1833 1834
	l_ptr = n_ptr->links[bearer_id];
	if (!l_ptr)
P
Per Liden 已提交
1835 1836
		goto exit;

1837 1838 1839 1840
	if (msg_type(t_msg) == DUPLICATE_MSG)
		tipc_link_dup_rcv(l_ptr, t_buf);
	else if (msg_type(t_msg) == ORIGINAL_MSG)
		*buf = tipc_link_failover_rcv(l_ptr, t_buf);
1841 1842
	else
		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
P
Per Liden 已提交
1843
exit:
1844
	kfree_skb(t_buf);
1845
	return *buf != NULL;
P
Per Liden 已提交
1846 1847 1848 1849 1850
}

/*
 *  Bundler functionality:
 */
1851
void tipc_link_bundle_rcv(struct sk_buff *buf)
P
Per Liden 已提交
1852 1853 1854 1855
{
	u32 msgcount = msg_msgcnt(buf_msg(buf));
	u32 pos = INT_H_SIZE;
	struct sk_buff *obuf;
1856
	struct tipc_msg *omsg;
P
Per Liden 已提交
1857 1858 1859 1860

	while (msgcount--) {
		obuf = buf_extract(buf, pos);
		if (obuf == NULL) {
1861
			pr_warn("Link unable to unbundle message(s)\n");
1862
			break;
1863
		}
1864 1865
		omsg = buf_msg(obuf);
		pos += align(msg_size(omsg));
1866 1867 1868 1869 1870 1871
		if (msg_isdata(omsg)) {
			if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
				tipc_sk_mcast_rcv(obuf);
			else
				tipc_sk_rcv(obuf);
		} else if (msg_user(omsg) == CONN_MANAGER) {
1872 1873 1874 1875 1876 1877 1878
			tipc_sk_rcv(obuf);
		} else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
			tipc_named_rcv(obuf);
		} else {
			pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
			kfree_skb(obuf);
		}
P
Per Liden 已提交
1879
	}
1880
	kfree_skb(buf);
P
Per Liden 已提交
1881 1882
}

1883
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
P
Per Liden 已提交
1884
{
1885 1886 1887
	if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
		return;

P
Per Liden 已提交
1888 1889 1890 1891 1892 1893
	l_ptr->tolerance = tolerance;
	l_ptr->continuity_interval =
		((tolerance / 4) > 500) ? 500 : tolerance / 4;
	l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
}

1894
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
P
Per Liden 已提交
1895 1896
{
	/* Data messages from this node, inclusive FIRST_FRAGM */
1897 1898 1899 1900
	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
P
Per Liden 已提交
1901
	/* Transiting data messages,inclusive FIRST_FRAGM */
1902 1903 1904 1905
	l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
	l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
	l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
	l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
P
Per Liden 已提交
1906 1907 1908 1909 1910 1911 1912
	l_ptr->queue_limit[CONN_MANAGER] = 1200;
	l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
	l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
	/* FRAGMENT and LAST_FRAGMENT packets */
	l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
}

1913 1914 1915
/* tipc_link_find_owner - locate owner node of link by link's name
 * @name: pointer to link name string
 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1916
 *
1917
 * Returns pointer to node owning the link, or 0 if no matching link is found.
P
Per Liden 已提交
1918
 */
1919 1920
static struct tipc_node *tipc_link_find_owner(const char *link_name,
					      unsigned int *bearer_id)
P
Per Liden 已提交
1921
{
1922
	struct tipc_link *l_ptr;
1923
	struct tipc_node *n_ptr;
1924
	struct tipc_node *found_node = NULL;
1925
	int i;
P
Per Liden 已提交
1926

1927
	*bearer_id = 0;
1928 1929
	rcu_read_lock();
	list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
1930
		tipc_node_lock(n_ptr);
1931 1932
		for (i = 0; i < MAX_BEARERS; i++) {
			l_ptr = n_ptr->links[i];
1933 1934 1935 1936 1937
			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
				*bearer_id = i;
				found_node = n_ptr;
				break;
			}
1938
		}
1939
		tipc_node_unlock(n_ptr);
1940 1941
		if (found_node)
			break;
1942
	}
1943 1944
	rcu_read_unlock();

1945
	return found_node;
P
Per Liden 已提交
1946 1947
}

1948 1949 1950
/**
 * link_value_is_valid -- validate proposed link tolerance/priority/window
 *
1951 1952
 * @cmd: value type (TIPC_CMD_SET_LINK_*)
 * @new_value: the new value
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
 *
 * Returns 1 if value is within range, 0 if not.
 */
static int link_value_is_valid(u16 cmd, u32 new_value)
{
	switch (cmd) {
	case TIPC_CMD_SET_LINK_TOL:
		return (new_value >= TIPC_MIN_LINK_TOL) &&
			(new_value <= TIPC_MAX_LINK_TOL);
	case TIPC_CMD_SET_LINK_PRI:
		return (new_value <= TIPC_MAX_LINK_PRI);
	case TIPC_CMD_SET_LINK_WINDOW:
		return (new_value >= TIPC_MIN_LINK_WIN) &&
			(new_value <= TIPC_MAX_LINK_WIN);
	}
	return 0;
}

/**
 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
1973 1974 1975
 * @name: ptr to link, bearer, or media name
 * @new_value: new value of link, bearer, or media setting
 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
1976
 *
Y
Ying Xue 已提交
1977
 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
1978 1979 1980 1981 1982 1983
 *
 * Returns 0 if value updated and negative value on error.
 */
static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
{
	struct tipc_node *node;
1984
	struct tipc_link *l_ptr;
1985
	struct tipc_bearer *b_ptr;
1986
	struct tipc_media *m_ptr;
1987
	int bearer_id;
1988
	int res = 0;
1989

1990 1991
	node = tipc_link_find_owner(name, &bearer_id);
	if (node) {
1992
		tipc_node_lock(node);
1993 1994 1995 1996 1997 1998
		l_ptr = node->links[bearer_id];

		if (l_ptr) {
			switch (cmd) {
			case TIPC_CMD_SET_LINK_TOL:
				link_set_supervision_props(l_ptr, new_value);
1999 2000
				tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
						     new_value, 0, 0);
2001 2002 2003
				break;
			case TIPC_CMD_SET_LINK_PRI:
				l_ptr->priority = new_value;
2004 2005
				tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
						     0, new_value, 0);
2006 2007 2008 2009 2010 2011 2012 2013
				break;
			case TIPC_CMD_SET_LINK_WINDOW:
				tipc_link_set_queue_limits(l_ptr, new_value);
				break;
			default:
				res = -EINVAL;
				break;
			}
2014 2015
		}
		tipc_node_unlock(node);
2016
		return res;
2017 2018 2019 2020 2021 2022 2023
	}

	b_ptr = tipc_bearer_find(name);
	if (b_ptr) {
		switch (cmd) {
		case TIPC_CMD_SET_LINK_TOL:
			b_ptr->tolerance = new_value;
2024
			break;
2025 2026
		case TIPC_CMD_SET_LINK_PRI:
			b_ptr->priority = new_value;
2027
			break;
2028 2029
		case TIPC_CMD_SET_LINK_WINDOW:
			b_ptr->window = new_value;
2030 2031 2032 2033
			break;
		default:
			res = -EINVAL;
			break;
2034
		}
2035
		return res;
2036 2037 2038 2039 2040 2041 2042 2043
	}

	m_ptr = tipc_media_find(name);
	if (!m_ptr)
		return -ENODEV;
	switch (cmd) {
	case TIPC_CMD_SET_LINK_TOL:
		m_ptr->tolerance = new_value;
2044
		break;
2045 2046
	case TIPC_CMD_SET_LINK_PRI:
		m_ptr->priority = new_value;
2047
		break;
2048 2049
	case TIPC_CMD_SET_LINK_WINDOW:
		m_ptr->window = new_value;
2050 2051 2052 2053
		break;
	default:
		res = -EINVAL;
		break;
2054
	}
2055
	return res;
2056 2057
}

2058
struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2059
				     u16 cmd)
P
Per Liden 已提交
2060 2061
{
	struct tipc_link_config *args;
2062 2063
	u32 new_value;
	int res;
P
Per Liden 已提交
2064 2065

	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2066
		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
P
Per Liden 已提交
2067 2068 2069 2070

	args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
	new_value = ntohl(args->value);

2071 2072 2073 2074
	if (!link_value_is_valid(cmd, new_value))
		return tipc_cfg_reply_error_string(
			"cannot change, value invalid");

2075
	if (!strcmp(args->name, tipc_bclink_name)) {
P
Per Liden 已提交
2076
		if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2077 2078
		    (tipc_bclink_set_queue_limits(new_value) == 0))
			return tipc_cfg_reply_none();
2079
		return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2080
						   " (cannot change setting on broadcast link)");
P
Per Liden 已提交
2081 2082
	}

2083
	res = link_cmd_set_value(args->name, new_value, cmd);
P
Per Liden 已提交
2084
	if (res)
2085
		return tipc_cfg_reply_error_string("cannot change link setting");
P
Per Liden 已提交
2086

2087
	return tipc_cfg_reply_none();
P
Per Liden 已提交
2088 2089 2090 2091 2092 2093
}

/**
 * link_reset_statistics - reset link statistics
 * @l_ptr: pointer to link
 */
2094
static void link_reset_statistics(struct tipc_link *l_ptr)
P
Per Liden 已提交
2095 2096 2097 2098 2099 2100
{
	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
	l_ptr->stats.sent_info = l_ptr->next_out_no;
	l_ptr->stats.recv_info = l_ptr->next_in_no;
}

2101
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
P
Per Liden 已提交
2102 2103
{
	char *link_name;
2104
	struct tipc_link *l_ptr;
2105
	struct tipc_node *node;
2106
	unsigned int bearer_id;
P
Per Liden 已提交
2107 2108

	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2109
		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
P
Per Liden 已提交
2110 2111

	link_name = (char *)TLV_DATA(req_tlv_area);
2112 2113 2114 2115
	if (!strcmp(link_name, tipc_bclink_name)) {
		if (tipc_bclink_reset_stats())
			return tipc_cfg_reply_error_string("link not found");
		return tipc_cfg_reply_none();
P
Per Liden 已提交
2116
	}
2117
	node = tipc_link_find_owner(link_name, &bearer_id);
Y
Ying Xue 已提交
2118
	if (!node)
2119
		return tipc_cfg_reply_error_string("link not found");
Y
Ying Xue 已提交
2120

2121
	tipc_node_lock(node);
2122
	l_ptr = node->links[bearer_id];
P
Per Liden 已提交
2123
	if (!l_ptr) {
2124
		tipc_node_unlock(node);
2125
		return tipc_cfg_reply_error_string("link not found");
P
Per Liden 已提交
2126 2127
	}
	link_reset_statistics(l_ptr);
2128 2129
	tipc_node_unlock(node);
	return tipc_cfg_reply_none();
P
Per Liden 已提交
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
}

/**
 * percent - convert count to a percentage of total (rounding up or down)
 */
static u32 percent(u32 count, u32 total)
{
	return (count * 100 + (total / 2)) / total;
}

/**
2141
 * tipc_link_stats - print link statistics
P
Per Liden 已提交
2142 2143 2144
 * @name: link name
 * @buf: print buffer area
 * @buf_size: size of print buffer area
2145
 *
P
Per Liden 已提交
2146 2147
 * Returns length of print buffer data string (or 0 if error)
 */
2148
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
P
Per Liden 已提交
2149
{
2150 2151
	struct tipc_link *l;
	struct tipc_stats *s;
2152
	struct tipc_node *node;
P
Per Liden 已提交
2153 2154
	char *status;
	u32 profile_total = 0;
2155
	unsigned int bearer_id;
2156
	int ret;
P
Per Liden 已提交
2157

2158 2159
	if (!strcmp(name, tipc_bclink_name))
		return tipc_bclink_stats(buf, buf_size);
P
Per Liden 已提交
2160

2161
	node = tipc_link_find_owner(name, &bearer_id);
Y
Ying Xue 已提交
2162
	if (!node)
P
Per Liden 已提交
2163
		return 0;
Y
Ying Xue 已提交
2164

2165
	tipc_node_lock(node);
2166 2167 2168 2169 2170 2171 2172

	l = node->links[bearer_id];
	if (!l) {
		tipc_node_unlock(node);
		return 0;
	}

2173
	s = &l->stats;
P
Per Liden 已提交
2174

2175
	if (tipc_link_is_active(l))
P
Per Liden 已提交
2176
		status = "ACTIVE";
2177
	else if (tipc_link_is_up(l))
P
Per Liden 已提交
2178 2179 2180
		status = "STANDBY";
	else
		status = "DEFUNCT";
2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200

	ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
			    "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
			    "  Window:%u packets\n",
			    l->name, status, l->max_pkt, l->priority,
			    l->tolerance, l->queue_limit[0]);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
			     l->next_in_no - s->recv_info, s->recv_fragments,
			     s->recv_fragmented, s->recv_bundles,
			     s->recv_bundled);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
			     l->next_out_no - s->sent_info, s->sent_fragments,
			     s->sent_fragmented, s->sent_bundles,
			     s->sent_bundled);

	profile_total = s->msg_length_counts;
P
Per Liden 已提交
2201 2202
	if (!profile_total)
		profile_total = 1;
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  TX profile sample:%u packets  average:%u octets\n"
			     "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
			     "-16384:%u%% -32768:%u%% -66000:%u%%\n",
			     s->msg_length_counts,
			     s->msg_lengths_total / profile_total,
			     percent(s->msg_length_profile[0], profile_total),
			     percent(s->msg_length_profile[1], profile_total),
			     percent(s->msg_length_profile[2], profile_total),
			     percent(s->msg_length_profile[3], profile_total),
			     percent(s->msg_length_profile[4], profile_total),
			     percent(s->msg_length_profile[5], profile_total),
			     percent(s->msg_length_profile[6], profile_total));

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  RX states:%u probes:%u naks:%u defs:%u"
			     " dups:%u\n", s->recv_states, s->recv_probes,
			     s->recv_nacks, s->deferred_recv, s->duplicates);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
			     "  TX states:%u probes:%u naks:%u acks:%u"
			     " dups:%u\n", s->sent_states, s->sent_probes,
			     s->sent_nacks, s->sent_acks, s->retransmitted);

	ret += tipc_snprintf(buf + ret, buf_size - ret,
2229 2230
			     "  Congestion link:%u  Send queue"
			     " max:%u avg:%u\n", s->link_congs,
2231 2232
			     s->max_queue_sz, s->queue_sz_counts ?
			     (s->accu_queue_sz / s->queue_sz_counts) : 0);
P
Per Liden 已提交
2233

2234
	tipc_node_unlock(node);
2235
	return ret;
P
Per Liden 已提交
2236 2237
}

2238
struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
P
Per Liden 已提交
2239 2240 2241 2242
{
	struct sk_buff *buf;
	struct tlv_desc *rep_tlv;
	int str_len;
2243 2244
	int pb_len;
	char *pb;
P
Per Liden 已提交
2245 2246

	if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2247
		return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
P
Per Liden 已提交
2248

2249
	buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
P
Per Liden 已提交
2250 2251 2252 2253
	if (!buf)
		return NULL;

	rep_tlv = (struct tlv_desc *)buf->data;
2254 2255
	pb = TLV_DATA(rep_tlv);
	pb_len = ULTRA_STRING_MAX_LEN;
2256
	str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2257
				  pb, pb_len);
P
Per Liden 已提交
2258
	if (!str_len) {
2259
		kfree_skb(buf);
2260
		return tipc_cfg_reply_error_string("link not found");
P
Per Liden 已提交
2261
	}
2262
	str_len += 1;	/* for "\0" */
P
Per Liden 已提交
2263 2264 2265 2266 2267 2268 2269
	skb_put(buf, TLV_SPACE(str_len));
	TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);

	return buf;
}

/**
2270
 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
P
Per Liden 已提交
2271 2272
 * @dest: network address of destination node
 * @selector: used to select from set of active links
2273
 *
P
Per Liden 已提交
2274 2275
 * If no active link can be found, uses default maximum packet size.
 */
2276
u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
P
Per Liden 已提交
2277
{
2278
	struct tipc_node *n_ptr;
2279
	struct tipc_link *l_ptr;
P
Per Liden 已提交
2280
	u32 res = MAX_PKT_DEFAULT;
2281

P
Per Liden 已提交
2282 2283 2284
	if (dest == tipc_own_addr)
		return MAX_MSG_SIZE;

2285
	n_ptr = tipc_node_find(dest);
P
Per Liden 已提交
2286
	if (n_ptr) {
2287
		tipc_node_lock(n_ptr);
P
Per Liden 已提交
2288 2289
		l_ptr = n_ptr->active_links[selector & 1];
		if (l_ptr)
2290
			res = l_ptr->max_pkt;
2291
		tipc_node_unlock(n_ptr);
P
Per Liden 已提交
2292 2293 2294 2295
	}
	return res;
}

2296
static void link_print(struct tipc_link *l_ptr, const char *str)
P
Per Liden 已提交
2297
{
2298 2299 2300 2301 2302 2303 2304
	struct tipc_bearer *b_ptr;

	rcu_read_lock();
	b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
	if (b_ptr)
		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
	rcu_read_unlock();
2305

P
Per Liden 已提交
2306
	if (link_working_unknown(l_ptr))
2307
		pr_cont(":WU\n");
2308
	else if (link_reset_reset(l_ptr))
2309
		pr_cont(":RR\n");
2310
	else if (link_reset_unknown(l_ptr))
2311
		pr_cont(":RU\n");
2312
	else if (link_working_working(l_ptr))
2313 2314 2315
		pr_cont(":WW\n");
	else
		pr_cont("\n");
P
Per Liden 已提交
2316
}
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354

/* Parse and validate nested (link) properties valid for media, bearer and link
 */
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
	int err;

	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
			       tipc_nl_prop_policy);
	if (err)
		return err;

	if (props[TIPC_NLA_PROP_PRIO]) {
		u32 prio;

		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
		if (prio > TIPC_MAX_LINK_PRI)
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_TOL]) {
		u32 tol;

		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_WIN]) {
		u32 win;

		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
			return -EINVAL;
	}

	return 0;
}
2355

2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	int res = 0;
	int bearer_id;
	char *name;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	node = tipc_link_find_owner(name, &bearer_id);
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

	link = node->links[bearer_id];
	if (!link) {
		res = -EINVAL;
		goto out;
	}

	if (attrs[TIPC_NLA_LINK_PROP]) {
		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
					      props);
		if (err) {
			res = err;
			goto out;
		}

		if (props[TIPC_NLA_PROP_TOL]) {
			u32 tol;

			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
			link_set_supervision_props(link, tol);
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
		}
		if (props[TIPC_NLA_PROP_PRIO]) {
			u32 prio;

			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
			link->priority = prio;
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
		}
		if (props[TIPC_NLA_PROP_WIN]) {
			u32 win;

			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
			tipc_link_set_queue_limits(link, win);
		}
	}

out:
	tipc_node_unlock(node);

	return res;
}
2429 2430

static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
{
	int i;
	struct nlattr *stats;

	struct nla_map {
		u32 key;
		u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
			s->msg_length_counts : 1},
		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
			(s->accu_queue_sz / s->queue_sz_counts) : 0}
	};

	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!stats)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, stats);

	return 0;
msg_full:
	nla_nest_cancel(skb, stats);

	return -EMSGSIZE;
}

/* Caller should hold appropriate locks to protect the link */
2496
static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;

	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
			  NLM_F_MULTI, TIPC_NL_LINK_GET);
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
			tipc_cluster_mask(tipc_own_addr)))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
		goto attr_msg_full;

	if (tipc_link_is_up(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
			goto attr_msg_full;
	if (tipc_link_is_active(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
			goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
			link->queue_limit[TIPC_LOW_IMPORTANCE]))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_stats(msg->skb, &link->stats);
	if (err)
		goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

/* Caller should hold node lock  */
2565 2566 2567
static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
				    struct tipc_node *node,
				    u32 *prev_link)
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
{
	u32 i;
	int err;

	for (i = *prev_link; i < MAX_BEARERS; i++) {
		*prev_link = i;

		if (!node->links[i])
			continue;

		err = __tipc_nl_add_link(msg, node->links[i]);
		if (err)
			return err;
	}
	*prev_link = 0;

	return 0;
}

int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct tipc_node *node;
	struct tipc_nl_msg msg;
	u32 prev_node = cb->args[0];
	u32 prev_link = cb->args[1];
	int done = cb->args[2];
	int err;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();

	if (prev_node) {
		node = tipc_node_find(prev_node);
		if (!node) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			goto out;
		}

		list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
			tipc_node_lock(node);
			err = __tipc_nl_add_node_links(&msg, node, &prev_link);
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	} else {
		err = tipc_nl_add_bc_link(&msg);
		if (err)
			goto out;

		list_for_each_entry_rcu(node, &tipc_node_list, list) {
			tipc_node_lock(node);
			err = __tipc_nl_add_node_links(&msg, node, &prev_link);
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	}
	done = 1;
out:
	rcu_read_unlock();

	cb->args[0] = prev_node;
	cb->args[1] = prev_link;
	cb->args[2] = done;

	return skb->len;
}

int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
	struct sk_buff *ans_skb;
	struct tipc_nl_msg msg;
	struct tipc_link *link;
	struct tipc_node *node;
	char *name;
	int bearer_id;
	int err;

	if (!info->attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
	node = tipc_link_find_owner(name, &bearer_id);
	if (!node)
		return -EINVAL;

	ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!ans_skb)
		return -ENOMEM;

	msg.skb = ans_skb;
	msg.portid = info->snd_portid;
	msg.seq = info->snd_seq;

	tipc_node_lock(node);
	link = node->links[bearer_id];
	if (!link) {
		err = -EINVAL;
		goto err_out;
	}

	err = __tipc_nl_add_link(&msg, link);
	if (err)
		goto err_out;

	tipc_node_unlock(node);

	return genlmsg_reply(ans_skb, info);

err_out:
	tipc_node_unlock(node);
	nlmsg_free(ans_skb);

	return err;
}
2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748

int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	char *link_name;
	unsigned int bearer_id;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	if (strcmp(link_name, tipc_bclink_name) == 0) {
		err = tipc_bclink_reset_stats();
		if (err)
			return err;
		return 0;
	}

	node = tipc_link_find_owner(link_name, &bearer_id);
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

	link = node->links[bearer_id];
	if (!link) {
		tipc_node_unlock(node);
		return -EINVAL;
	}

	link_reset_statistics(link);

	tipc_node_unlock(node);

	return 0;
}