link.c 59.7 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/link.c: TIPC link code
3
 *
4
 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5
 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36 37
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include "core.h"
38
#include "subscr.h"
P
Per Liden 已提交
39
#include "link.h"
40
#include "bcast.h"
41
#include "socket.h"
P
Per Liden 已提交
42 43
#include "name_distr.h"
#include "discover.h"
44
#include "netlink.h"
P
Per Liden 已提交
45

46 47
#include <linux/pkt_sched.h>

48 49 50 51 52 53
/*
 * Error message prefixes
 */
static const char *link_co_err = "Link changeover error, ";
static const char *link_rst_msg = "Resetting link ";
static const char *link_unk_evt = "Unknown link event ";
P
Per Liden 已提交
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_LINK_NAME] = {
		.type = NLA_STRING,
		.len = TIPC_MAX_LINK_NAME
	},
	[TIPC_NLA_LINK_MTU]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_BROADCAST]	= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_UP]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_ACTIVE]		= { .type = NLA_FLAG },
	[TIPC_NLA_LINK_PROP]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_STATS]		= { .type = NLA_NESTED },
	[TIPC_NLA_LINK_RX]		= { .type = NLA_U32 },
	[TIPC_NLA_LINK_TX]		= { .type = NLA_U32 }
};

71 72 73 74 75 76 77 78
/* Properties valid for media, bearar and link */
static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
	[TIPC_NLA_PROP_UNSPEC]		= { .type = NLA_UNSPEC },
	[TIPC_NLA_PROP_PRIO]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_TOL]		= { .type = NLA_U32 },
	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
};

79 80 81 82 83
/*
 * Out-of-range value for link session numbers
 */
#define INVALID_SESSION 0x10000

84 85
/*
 * Link state events:
P
Per Liden 已提交
86 87 88
 */
#define  STARTING_EVT    856384768	/* link processing trigger */
#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
89
#define  SILENCE_EVT     560817u	/* timer dicovered silence from peer */
P
Per Liden 已提交
90

91
/*
92
 * State value stored in 'failover_pkts'
P
Per Liden 已提交
93
 */
94
#define FIRST_FAILOVER 0xffffu
P
Per Liden 已提交
95

96 97 98 99
static void link_handle_out_of_seq_msg(struct tipc_link *link,
				       struct sk_buff *skb);
static void tipc_link_proto_rcv(struct tipc_link *link,
				struct sk_buff *skb);
100
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
101 102 103
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
104 105
static void tipc_link_sync_xmit(struct tipc_link *l);
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
106 107
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
108
static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
109
static void link_set_timer(struct tipc_link *link, unsigned long time);
P
Per Liden 已提交
110
/*
S
Sam Ravnborg 已提交
111
 *  Simple link routines
P
Per Liden 已提交
112
 */
S
Sam Ravnborg 已提交
113
static unsigned int align(unsigned int i)
P
Per Liden 已提交
114 115 116 117
{
	return (i + 3) & ~3u;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
static void tipc_link_release(struct kref *kref)
{
	kfree(container_of(kref, struct tipc_link, ref));
}

static void tipc_link_get(struct tipc_link *l_ptr)
{
	kref_get(&l_ptr->ref);
}

static void tipc_link_put(struct tipc_link *l_ptr)
{
	kref_put(&l_ptr->ref, tipc_link_release);
}

133 134
static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
{
135 136 137 138 139
	struct tipc_node *n = l->owner;

	if (node_active_link(n, 0) != l)
		return node_active_link(n, 0);
	return node_active_link(n, 1);
140 141
}

P
Per Liden 已提交
142
/*
S
Sam Ravnborg 已提交
143
 *  Simple non-static link routines (i.e. referenced outside this file)
P
Per Liden 已提交
144
 */
145
int tipc_link_is_up(struct tipc_link *l_ptr)
P
Per Liden 已提交
146 147 148
{
	if (!l_ptr)
		return 0;
E
Eric Dumazet 已提交
149
	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
P
Per Liden 已提交
150 151
}

152
int tipc_link_is_active(struct tipc_link *l)
P
Per Liden 已提交
153
{
154 155 156
	struct tipc_node *n = l->owner;

	return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
P
Per Liden 已提交
157 158 159 160 161 162
}

/**
 * link_timeout - handle expiration of link timer
 * @l_ptr: pointer to link
 */
163
static void link_timeout(unsigned long data)
P
Per Liden 已提交
164
{
165
	struct tipc_link *l_ptr = (struct tipc_link *)data;
166 167
	struct sk_buff *skb;

168
	tipc_node_lock(l_ptr->owner);
P
Per Liden 已提交
169 170

	/* update counters used in statistical profiling of send traffic */
J
Jon Paul Maloy 已提交
171
	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
P
Per Liden 已提交
172 173
	l_ptr->stats.queue_sz_counts++;

J
Jon Paul Maloy 已提交
174
	skb = skb_peek(&l_ptr->transmq);
175 176
	if (skb) {
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
177 178
		u32 length = msg_size(msg);

179 180
		if ((msg_user(msg) == MSG_FRAGMENTER) &&
		    (msg_type(msg) == FIRST_FRAGMENT)) {
P
Per Liden 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
			length = msg_size(msg_get_wrapped(msg));
		}
		if (length) {
			l_ptr->stats.msg_lengths_total += length;
			l_ptr->stats.msg_length_counts++;
			if (length <= 64)
				l_ptr->stats.msg_length_profile[0]++;
			else if (length <= 256)
				l_ptr->stats.msg_length_profile[1]++;
			else if (length <= 1024)
				l_ptr->stats.msg_length_profile[2]++;
			else if (length <= 4096)
				l_ptr->stats.msg_length_profile[3]++;
			else if (length <= 16384)
				l_ptr->stats.msg_length_profile[4]++;
			else if (length <= 32768)
				l_ptr->stats.msg_length_profile[5]++;
			else
				l_ptr->stats.msg_length_profile[6]++;
		}
	}

	/* do all other link processing performed on a periodic basis */
204 205 206
	if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
		link_state_event(l_ptr, SILENCE_EVT);
	l_ptr->silent_intv_cnt++;
J
Jon Paul Maloy 已提交
207
	if (skb_queue_len(&l_ptr->backlogq))
208
		tipc_link_push_packets(l_ptr);
209
	link_set_timer(l_ptr, l_ptr->keepalive_intv);
210
	tipc_node_unlock(l_ptr->owner);
211
	tipc_link_put(l_ptr);
P
Per Liden 已提交
212 213
}

214
static void link_set_timer(struct tipc_link *link, unsigned long time)
P
Per Liden 已提交
215
{
216 217
	if (!mod_timer(&link->timer, jiffies + time))
		tipc_link_get(link);
P
Per Liden 已提交
218 219 220
}

/**
221
 * tipc_link_create - create a new link
222
 * @n_ptr: pointer to associated node
P
Per Liden 已提交
223 224
 * @b_ptr: pointer to associated bearer
 * @media_addr: media address to use when sending messages over link
225
 *
P
Per Liden 已提交
226 227
 * Returns pointer to link.
 */
228
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
229
				   struct tipc_bearer *b_ptr,
230 231 232
				   const struct tipc_media_addr *media_addr,
				   struct sk_buff_head *inputq,
				   struct sk_buff_head *namedq)
P
Per Liden 已提交
233
{
234
	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
235
	struct tipc_link *l_ptr;
P
Per Liden 已提交
236 237
	struct tipc_msg *msg;
	char *if_name;
238 239 240
	char addr_string[16];
	u32 peer = n_ptr->addr;

241
	if (n_ptr->link_cnt >= MAX_BEARERS) {
242
		tipc_addr_string_fill(addr_string, n_ptr->addr);
243 244
		pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
		       n_ptr->link_cnt, addr_string, MAX_BEARERS);
245 246 247
		return NULL;
	}

248
	if (n_ptr->links[b_ptr->identity].link) {
249
		tipc_addr_string_fill(addr_string, n_ptr->addr);
250 251
		pr_err("Attempt to establish second link on <%s> to %s\n",
		       b_ptr->name, addr_string);
252 253
		return NULL;
	}
P
Per Liden 已提交
254

255
	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
P
Per Liden 已提交
256
	if (!l_ptr) {
257
		pr_warn("Link creation failed, no memory\n");
P
Per Liden 已提交
258 259
		return NULL;
	}
260
	kref_init(&l_ptr->ref);
P
Per Liden 已提交
261
	l_ptr->addr = peer;
262
	if_name = strchr(b_ptr->name, ':') + 1;
263
	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
264 265
		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
		tipc_node(tn->own_addr),
P
Per Liden 已提交
266 267
		if_name,
		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
268
		/* note: peer i/f name is updated by reset/activate message */
P
Per Liden 已提交
269
	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
270
	l_ptr->owner = n_ptr;
271
	l_ptr->peer_session = INVALID_SESSION;
272
	l_ptr->bearer_id = b_ptr->identity;
273
	link_set_supervision_props(l_ptr, b_ptr->tolerance);
P
Per Liden 已提交
274 275 276 277
	l_ptr->state = RESET_UNKNOWN;

	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
	msg = l_ptr->pmsg;
278
	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
279
		      l_ptr->addr);
P
Per Liden 已提交
280
	msg_set_size(msg, sizeof(l_ptr->proto_msg));
281
	msg_set_session(msg, (tn->random & 0xffff));
P
Per Liden 已提交
282 283
	msg_set_bearer_id(msg, b_ptr->identity);
	strcpy((char *)msg_data(msg), if_name);
284
	l_ptr->net_plane = b_ptr->net_plane;
285 286
	l_ptr->advertised_mtu = b_ptr->mtu;
	l_ptr->mtu = l_ptr->advertised_mtu;
287 288
	l_ptr->priority = b_ptr->priority;
	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
289
	l_ptr->snd_nxt = 1;
J
Jon Paul Maloy 已提交
290 291 292
	__skb_queue_head_init(&l_ptr->transmq);
	__skb_queue_head_init(&l_ptr->backlogq);
	__skb_queue_head_init(&l_ptr->deferdq);
293
	skb_queue_head_init(&l_ptr->wakeupq);
294 295 296
	l_ptr->inputq = inputq;
	l_ptr->namedq = namedq;
	skb_queue_head_init(l_ptr->inputq);
P
Per Liden 已提交
297
	link_reset_statistics(l_ptr);
298
	tipc_node_attach_link(n_ptr, l_ptr);
299
	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
300
	link_state_event(l_ptr, STARTING_EVT);
P
Per Liden 已提交
301 302 303 304

	return l_ptr;
}

305
/**
306 307
 * tipc_link_delete - Delete a link
 * @l: link to be deleted
308
 */
309
void tipc_link_delete(struct tipc_link *l)
310
{
311 312 313 314 315 316 317 318
	tipc_link_reset(l);
	if (del_timer(&l->timer))
		tipc_link_put(l);
	l->flags |= LINK_STOPPED;
	/* Delete link now, or when timer is finished: */
	tipc_link_reset_fragments(l);
	tipc_node_detach_link(l->owner, l);
	tipc_link_put(l);
319 320
}

321
void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
322
{
323
	struct tipc_net *tn = net_generic(net, tipc_net_id);
324 325
	struct tipc_link *link;
	struct tipc_node *node;
326

327
	rcu_read_lock();
328 329
	list_for_each_entry_rcu(node, &tn->node_list, list) {
		tipc_node_lock(node);
330
		link = node->links[bearer_id].link;
331
		if (link)
332 333
			tipc_link_delete(link);
		tipc_node_unlock(node);
334
	}
335
	rcu_read_unlock();
336
}
P
Per Liden 已提交
337 338

/**
339
 * link_schedule_user - schedule a message sender for wakeup after congestion
340
 * @link: congested link
341
 * @list: message that was attempted sent
342
 * Create pseudo msg to send back to user when congestion abates
343
 * Only consumes message if there is an error
P
Per Liden 已提交
344
 */
345
static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
P
Per Liden 已提交
346
{
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	struct tipc_msg *msg = buf_msg(skb_peek(list));
	int imp = msg_importance(msg);
	u32 oport = msg_origport(msg);
	u32 addr = link_own_addr(link);
	struct sk_buff *skb;

	/* This really cannot happen...  */
	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
		tipc_link_reset(link);
		goto err;
	}
	/* Non-blocking sender: */
	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
		return -ELINKCONG;

	/* Create and schedule wakeup pseudo message */
	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
			      addr, addr, oport, 0, 0);
	if (!skb)
		goto err;
	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
	TIPC_SKB_CB(skb)->chain_imp = imp;
	skb_queue_tail(&link->wakeupq, skb);
371
	link->stats.link_congs++;
372 373 374 375
	return -ELINKCONG;
err:
	__skb_queue_purge(list);
	return -ENOBUFS;
P
Per Liden 已提交
376 377
}

378 379 380 381 382 383
/**
 * link_prepare_wakeup - prepare users for wakeup after congestion
 * @link: congested link
 * Move a number of waiting users, as permitted by available space in
 * the send queue, from link wait queue to node wait queue for wakeup
 */
384
void link_prepare_wakeup(struct tipc_link *l)
P
Per Liden 已提交
385
{
386 387
	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
	int imp, lim;
388
	struct sk_buff *skb, *tmp;
389

390 391 392 393 394
	skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
		imp = TIPC_SKB_CB(skb)->chain_imp;
		lim = l->window + l->backlog[imp].limit;
		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
		if ((pnd[imp] + l->backlog[imp].len) >= lim)
P
Per Liden 已提交
395
			break;
396
		skb_unlink(skb, &l->wakeupq);
397 398
		skb_queue_tail(l->inputq, skb);
		l->owner->inputq = l->inputq;
399
		l->owner->action_flags |= TIPC_MSG_EVT;
P
Per Liden 已提交
400 401 402 403
	}
}

/**
404
 * tipc_link_reset_fragments - purge link's inbound message fragments queue
P
Per Liden 已提交
405 406
 * @l_ptr: pointer to link
 */
407
void tipc_link_reset_fragments(struct tipc_link *l_ptr)
P
Per Liden 已提交
408
{
409 410
	kfree_skb(l_ptr->reasm_buf);
	l_ptr->reasm_buf = NULL;
P
Per Liden 已提交
411 412
}

413
void tipc_link_purge_backlog(struct tipc_link *l)
414 415 416 417 418 419 420 421 422
{
	__skb_queue_purge(&l->backlogq);
	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
}

423
/**
424
 * tipc_link_purge_queues - purge all pkt queues associated with link
P
Per Liden 已提交
425 426
 * @l_ptr: pointer to link
 */
427
void tipc_link_purge_queues(struct tipc_link *l_ptr)
P
Per Liden 已提交
428
{
J
Jon Paul Maloy 已提交
429 430
	__skb_queue_purge(&l_ptr->deferdq);
	__skb_queue_purge(&l_ptr->transmq);
431
	tipc_link_purge_backlog(l_ptr);
432
	tipc_link_reset_fragments(l_ptr);
P
Per Liden 已提交
433 434
}

435
void tipc_link_reset(struct tipc_link *l_ptr)
P
Per Liden 已提交
436 437
{
	u32 prev_state = l_ptr->state;
438
	int was_active_link = tipc_link_is_active(l_ptr);
439
	struct tipc_node *owner = l_ptr->owner;
440
	struct tipc_link *pl = tipc_parallel_link(l_ptr);
441

442
	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
P
Per Liden 已提交
443

444 445
	/* Link is down, accept any session */
	l_ptr->peer_session = INVALID_SESSION;
P
Per Liden 已提交
446

447 448
	/* Prepare for renewed mtu size negotiation */
	l_ptr->mtu = l_ptr->advertised_mtu;
449

P
Per Liden 已提交
450 451 452 453 454
	l_ptr->state = RESET_UNKNOWN;

	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
		return;

455
	tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
456
	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
457

458 459
	if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
		l_ptr->flags |= LINK_FAILINGOVER;
460
		l_ptr->failover_checkpt = l_ptr->rcv_nxt;
461
		pl->failover_pkts = FIRST_FAILOVER;
462
		pl->failover_checkpt = l_ptr->rcv_nxt;
463 464 465
		pl->failover_skb = l_ptr->reasm_buf;
	} else {
		kfree_skb(l_ptr->reasm_buf);
P
Per Liden 已提交
466
	}
467
	/* Clean up all queues, except inputq: */
J
Jon Paul Maloy 已提交
468 469
	__skb_queue_purge(&l_ptr->transmq);
	__skb_queue_purge(&l_ptr->deferdq);
470
	if (!owner->inputq)
471
		owner->inputq = l_ptr->inputq;
472 473
	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
	if (!skb_queue_empty(owner->inputq))
474
		owner->action_flags |= TIPC_MSG_EVT;
475
	tipc_link_purge_backlog(l_ptr);
476
	l_ptr->reasm_buf = NULL;
J
Jon Paul Maloy 已提交
477
	l_ptr->rcv_unacked = 0;
478 479
	l_ptr->snd_nxt = 1;
	l_ptr->silent_intv_cnt = 0;
P
Per Liden 已提交
480 481 482 483
	l_ptr->stale_count = 0;
	link_reset_statistics(l_ptr);
}

484
static void link_activate(struct tipc_link *link)
P
Per Liden 已提交
485
{
486 487
	struct tipc_node *node = link->owner;

488
	link->rcv_nxt = 1;
489
	link->stats.recv_info = 1;
490
	link->silent_intv_cnt = 0;
491
	tipc_node_link_up(node, link->bearer_id);
492
	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
P
Per Liden 已提交
493 494 495 496 497 498 499
}

/**
 * link_state_event - link finite state machine
 * @l_ptr: pointer to link
 * @event: state machine event to process
 */
500
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
P
Per Liden 已提交
501
{
502
	struct tipc_link *other;
503
	unsigned long timer_intv = l_ptr->keepalive_intv;
P
Per Liden 已提交
504

505 506 507
	if (l_ptr->flags & LINK_STOPPED)
		return;

508
	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
P
Per Liden 已提交
509 510
		return;		/* Not yet. */

511
	if (l_ptr->flags & LINK_FAILINGOVER)
512
		return;
P
Per Liden 已提交
513 514 515 516 517 518

	switch (l_ptr->state) {
	case WORKING_WORKING:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
519
			l_ptr->silent_intv_cnt = 0;
P
Per Liden 已提交
520
			break;
521 522 523
		case SILENCE_EVT:
			if (!l_ptr->silent_intv_cnt) {
				if (tipc_bclink_acks_missing(l_ptr->owner))
524
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
525
							     0, 0, 0, 0);
P
Per Liden 已提交
526 527 528
				break;
			}
			l_ptr->state = WORKING_UNKNOWN;
529
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
P
Per Liden 已提交
530 531
			break;
		case RESET_MSG:
532 533
			pr_debug("%s<%s>, requested by peer\n",
				 link_rst_msg, l_ptr->name);
534
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
535
			l_ptr->state = RESET_RESET;
536
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
537
					     0, 0, 0, 0);
P
Per Liden 已提交
538 539
			break;
		default:
540
			pr_debug("%s%u in WW state\n", link_unk_evt, event);
P
Per Liden 已提交
541 542 543 544 545 546 547
		}
		break;
	case WORKING_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
			l_ptr->state = WORKING_WORKING;
548
			l_ptr->silent_intv_cnt = 0;
P
Per Liden 已提交
549 550
			break;
		case RESET_MSG:
551 552
			pr_debug("%s<%s>, requested by peer while probing\n",
				 link_rst_msg, l_ptr->name);
553
			tipc_link_reset(l_ptr);
P
Per Liden 已提交
554
			l_ptr->state = RESET_RESET;
555
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
556
					     0, 0, 0, 0);
P
Per Liden 已提交
557
			break;
558 559
		case SILENCE_EVT:
			if (!l_ptr->silent_intv_cnt) {
P
Per Liden 已提交
560
				l_ptr->state = WORKING_WORKING;
561
				if (tipc_bclink_acks_missing(l_ptr->owner))
562
					tipc_link_proto_xmit(l_ptr, STATE_MSG,
563
							     0, 0, 0, 0);
564 565
			} else if (l_ptr->silent_intv_cnt <
				   l_ptr->abort_limit) {
566
				tipc_link_proto_xmit(l_ptr, STATE_MSG,
567
						     1, 0, 0, 0);
P
Per Liden 已提交
568
			} else {	/* Link has failed */
569 570
				pr_debug("%s<%s>, peer not responding\n",
					 link_rst_msg, l_ptr->name);
571
				tipc_link_reset(l_ptr);
P
Per Liden 已提交
572
				l_ptr->state = RESET_UNKNOWN;
573
				tipc_link_proto_xmit(l_ptr, RESET_MSG,
574
						     0, 0, 0, 0);
P
Per Liden 已提交
575 576 577
			}
			break;
		default:
578
			pr_err("%s%u in WU state\n", link_unk_evt, event);
P
Per Liden 已提交
579 580 581 582 583 584 585
		}
		break;
	case RESET_UNKNOWN:
		switch (event) {
		case TRAFFIC_MSG_EVT:
			break;
		case ACTIVATE_MSG:
586
			other = node_active_link(l_ptr->owner, 0);
587
			if (other && link_working_unknown(other))
P
Per Liden 已提交
588 589 590
				break;
			l_ptr->state = WORKING_WORKING;
			link_activate(l_ptr);
591
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
592
			if (l_ptr->owner->working_links == 1)
593
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
594 595 596
			break;
		case RESET_MSG:
			l_ptr->state = RESET_RESET;
597
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
598
					     1, 0, 0, 0);
P
Per Liden 已提交
599 600
			break;
		case STARTING_EVT:
601
			l_ptr->flags |= LINK_STARTED;
602
			link_set_timer(l_ptr, timer_intv);
603
			break;
604
		case SILENCE_EVT:
605
			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
P
Per Liden 已提交
606 607
			break;
		default:
608
			pr_err("%s%u in RU state\n", link_unk_evt, event);
P
Per Liden 已提交
609 610 611 612 613 614
		}
		break;
	case RESET_RESET:
		switch (event) {
		case TRAFFIC_MSG_EVT:
		case ACTIVATE_MSG:
615
			other = node_active_link(l_ptr->owner, 0);
616
			if (other && link_working_unknown(other))
P
Per Liden 已提交
617 618 619
				break;
			l_ptr->state = WORKING_WORKING;
			link_activate(l_ptr);
620
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
621
			if (l_ptr->owner->working_links == 1)
622
				tipc_link_sync_xmit(l_ptr);
P
Per Liden 已提交
623 624 625
			break;
		case RESET_MSG:
			break;
626
		case SILENCE_EVT:
627
			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
628
					     0, 0, 0, 0);
P
Per Liden 已提交
629 630
			break;
		default:
631
			pr_err("%s%u in RR state\n", link_unk_evt, event);
P
Per Liden 已提交
632 633 634
		}
		break;
	default:
635
		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
P
Per Liden 已提交
636 637 638
	}
}

639
/**
640
 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
641
 * @link: link to use
642 643
 * @list: chain of buffers containing message
 *
644 645 646 647
 * Consumes the buffer chain, except when returning -ELINKCONG,
 * since the caller then may want to make more send attempts.
 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
648
 */
649 650
int __tipc_link_xmit(struct net *net, struct tipc_link *link,
		     struct sk_buff_head *list)
651
{
652
	struct tipc_msg *msg = buf_msg(skb_peek(list));
J
Jon Paul Maloy 已提交
653
	unsigned int maxwin = link->window;
654
	unsigned int i, imp = msg_importance(msg);
655
	uint mtu = link->mtu;
656 657
	u16 ack = mod(link->rcv_nxt - 1);
	u16 seqno = link->snd_nxt;
658
	u16 bc_last_in = link->owner->bclink.last_in;
659
	struct tipc_media_addr *addr = &link->media_addr;
J
Jon Paul Maloy 已提交
660 661
	struct sk_buff_head *transmq = &link->transmq;
	struct sk_buff_head *backlogq = &link->backlogq;
662
	struct sk_buff *skb, *bskb;
663

664 665 666 667 668
	/* Match msg importance against this and all higher backlog limits: */
	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
			return link_schedule_user(link, list);
	}
J
Jon Paul Maloy 已提交
669
	if (unlikely(msg_size(msg) > mtu)) {
670
		__skb_queue_purge(list);
671 672
		return -EMSGSIZE;
	}
J
Jon Paul Maloy 已提交
673
	/* Prepare each packet for sending, and add to relevant queue: */
674 675
	while (skb_queue_len(list)) {
		skb = skb_peek(list);
676
		msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
677 678
		msg_set_seqno(msg, seqno);
		msg_set_ack(msg, ack);
679 680
		msg_set_bcast_ack(msg, bc_last_in);

J
Jon Paul Maloy 已提交
681
		if (likely(skb_queue_len(transmq) < maxwin)) {
682
			__skb_dequeue(list);
J
Jon Paul Maloy 已提交
683 684 685 686 687 688
			__skb_queue_tail(transmq, skb);
			tipc_bearer_send(net, link->bearer_id, skb, addr);
			link->rcv_unacked = 0;
			seqno++;
			continue;
		}
689 690
		if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
			kfree_skb(__skb_dequeue(list));
691 692
			link->stats.sent_bundled++;
			continue;
J
Jon Paul Maloy 已提交
693
		}
694 695 696 697
		if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
			kfree_skb(__skb_dequeue(list));
			__skb_queue_tail(backlogq, bskb);
			link->backlog[msg_importance(buf_msg(bskb))].len++;
698 699
			link->stats.sent_bundled++;
			link->stats.sent_bundles++;
700
			continue;
701
		}
702 703
		link->backlog[imp].len += skb_queue_len(list);
		skb_queue_splice_tail_init(list, backlogq);
704
	}
705
	link->snd_nxt = seqno;
706 707 708
	return 0;
}

709 710
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
711
	skb_queue_head_init(list);
712 713 714 715 716 717 718 719
	__skb_queue_tail(list, skb);
}

static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
	struct sk_buff_head head;

	skb2list(skb, &head);
720
	return __tipc_link_xmit(link->owner->net, link, &head);
721 722
}

723 724 725 726 727 728 729 730
/* tipc_link_xmit_skb(): send single buffer to destination
 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
 * messages, which will not be rejected
 * The only exception is datagram messages rerouted after secondary
 * lookup, which are rare and safe to dispose of anyway.
 * TODO: Return real return value, and let callers use
 * tipc_wait_for_sendpkt() where applicable
 */
731 732
int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
		       u32 selector)
733 734
{
	struct sk_buff_head head;
735
	int rc;
736 737

	skb2list(skb, &head);
738 739 740 741
	rc = tipc_link_xmit(net, &head, dnode, selector);
	if (rc == -ELINKCONG)
		kfree_skb(skb);
	return 0;
742 743
}

744
/**
745
 * tipc_link_xmit() is the general link level function for message sending
746
 * @net: the applicable net namespace
747
 * @list: chain of buffers containing message
748 749 750 751 752 753
 * @dsz: amount of user data to be sent
 * @dnode: address of destination node
 * @selector: a number used for deterministic link selection
 * Consumes the buffer chain, except when returning -ELINKCONG
 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
 */
754 755
int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
		   u32 selector)
756 757 758 759 760
{
	struct tipc_link *link = NULL;
	struct tipc_node *node;
	int rc = -EHOSTUNREACH;

761
	node = tipc_node_find(net, dnode);
762 763
	if (node) {
		tipc_node_lock(node);
764
		link = node_active_link(node, selector & 1);
765
		if (link)
766
			rc = __tipc_link_xmit(net, link, list);
767
		tipc_node_unlock(node);
768
		tipc_node_put(node);
769 770 771 772
	}
	if (link)
		return rc;

773 774 775 776
	if (likely(in_own_node(net, dnode))) {
		tipc_sk_rcv(net, list);
		return 0;
	}
777

778
	__skb_queue_purge(list);
779 780 781
	return rc;
}

782
/*
783
 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
784 785 786 787 788 789
 *
 * Give a newly added peer node the sequence number where it should
 * start receiving and acking broadcast packets.
 *
 * Called with node locked
 */
790
static void tipc_link_sync_xmit(struct tipc_link *link)
791
{
792
	struct sk_buff *skb;
793 794
	struct tipc_msg *msg;

795 796
	skb = tipc_buf_acquire(INT_H_SIZE);
	if (!skb)
797 798
		return;

799
	msg = buf_msg(skb);
800
	tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
801
		      INT_H_SIZE, link->addr);
802
	msg_set_last_bcast(msg, link->owner->bclink.acked);
803
	__tipc_link_xmit_skb(link, skb);
804 805 806
}

/*
807
 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
808 809 810 811 812 813
 * Receive the sequence number where we should start receiving and
 * acking broadcast packets from a newly added peer node, and open
 * up for reception of such packets.
 *
 * Called with node locked
 */
814
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
815 816 817 818 819 820 821 822
{
	struct tipc_msg *msg = buf_msg(buf);

	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
	n->bclink.recv_permitted = true;
	kfree_skb(buf);
}

823
/*
824 825 826 827 828 829
 * tipc_link_push_packets - push unsent packets to bearer
 *
 * Push out the unsent messages of a link where congestion
 * has abated. Node is locked.
 *
 * Called with node locked
P
Per Liden 已提交
830
 */
J
Jon Paul Maloy 已提交
831
void tipc_link_push_packets(struct tipc_link *link)
P
Per Liden 已提交
832
{
J
Jon Paul Maloy 已提交
833
	struct sk_buff *skb;
834
	struct tipc_msg *msg;
835
	u16 seqno = link->snd_nxt;
836
	u16 ack = mod(link->rcv_nxt - 1);
P
Per Liden 已提交
837

J
Jon Paul Maloy 已提交
838 839 840
	while (skb_queue_len(&link->transmq) < link->window) {
		skb = __skb_dequeue(&link->backlogq);
		if (!skb)
841
			break;
J
Jon Paul Maloy 已提交
842
		msg = buf_msg(skb);
843
		link->backlog[msg_importance(msg)].len--;
J
Jon Paul Maloy 已提交
844
		msg_set_ack(msg, ack);
845 846
		msg_set_seqno(msg, seqno);
		seqno = mod(seqno + 1);
J
Jon Paul Maloy 已提交
847 848 849 850 851
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		link->rcv_unacked = 0;
		__skb_queue_tail(&link->transmq, skb);
		tipc_bearer_send(link->owner->net, link->bearer_id,
				 skb, &link->media_addr);
P
Per Liden 已提交
852
	}
853
	link->snd_nxt = seqno;
P
Per Liden 已提交
854 855
}

856
void tipc_link_reset_all(struct tipc_node *node)
857 858 859 860
{
	char addr_string[16];
	u32 i;

861
	tipc_node_lock(node);
862

863
	pr_warn("Resetting all links to %s\n",
864
		tipc_addr_string_fill(addr_string, node->addr));
865 866

	for (i = 0; i < MAX_BEARERS; i++) {
867 868 869
		if (node->links[i].link) {
			link_print(node->links[i].link, "Resetting link\n");
			tipc_link_reset(node->links[i].link);
870 871 872
		}
	}

873
	tipc_node_unlock(node);
874 875
}

876
static void link_retransmit_failure(struct tipc_link *l_ptr,
877
				    struct sk_buff *buf)
878 879
{
	struct tipc_msg *msg = buf_msg(buf);
880
	struct net *net = l_ptr->owner->net;
881

882
	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
883 884 885

	if (l_ptr->addr) {
		/* Handle failure on standard link */
886
		link_print(l_ptr, "Resetting link\n");
887 888 889 890
		tipc_link_reset(l_ptr);

	} else {
		/* Handle failure on broadcast link */
891
		struct tipc_node *n_ptr;
892 893
		char addr_string[16];

894 895 896
		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
		pr_cont("Outstanding acks: %lu\n",
			(unsigned long) TIPC_SKB_CB(buf)->handle);
J
Jeff Garzik 已提交
897

898
		n_ptr = tipc_bclink_retransmit_to(net);
899

900
		tipc_addr_string_fill(addr_string, n_ptr->addr);
901
		pr_info("Broadcast link info for %s\n", addr_string);
902 903
		pr_info("Reception permitted: %d,  Acked: %u\n",
			n_ptr->bclink.recv_permitted,
904 905 906 907 908
			n_ptr->bclink.acked);
		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
			n_ptr->bclink.last_in,
			n_ptr->bclink.oos_state,
			n_ptr->bclink.last_sent);
909

910
		n_ptr->action_flags |= TIPC_BCAST_RESET;
911 912 913 914
		l_ptr->stale_count = 0;
	}
}

915
void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
916
			  u32 retransmits)
P
Per Liden 已提交
917 918 919
{
	struct tipc_msg *msg;

920
	if (!skb)
921 922
		return;

923
	msg = buf_msg(skb);
924

925
	/* Detect repeated retransmit failures */
926
	if (l_ptr->last_retransm == msg_seqno(msg)) {
927
		if (++l_ptr->stale_count > 100) {
928
			link_retransmit_failure(l_ptr, skb);
929
			return;
930 931
		}
	} else {
932
		l_ptr->last_retransm = msg_seqno(msg);
933
		l_ptr->stale_count = 1;
P
Per Liden 已提交
934
	}
935

J
Jon Paul Maloy 已提交
936 937
	skb_queue_walk_from(&l_ptr->transmq, skb) {
		if (!retransmits)
938 939
			break;
		msg = buf_msg(skb);
940
		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
941
		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
942 943
		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
				 &l_ptr->media_addr);
944 945
		retransmits--;
		l_ptr->stats.retransmitted++;
P
Per Liden 已提交
946 947 948
	}
}

949 950 951 952 953 954 955 956 957 958 959 960 961 962
/* link_synch(): check if all packets arrived before the synch
 *               point have been consumed
 * Returns true if the parallel links are synched, otherwise false
 */
static bool link_synch(struct tipc_link *l)
{
	unsigned int post_synch;
	struct tipc_link *pl;

	pl  = tipc_parallel_link(l);
	if (pl == l)
		goto synched;

	/* Was last pre-synch packet added to input queue ? */
963
	if (less_eq(pl->rcv_nxt, l->synch_point))
964 965 966
		return false;

	/* Is it still in the input queue ? */
967
	post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
968
	if (skb_queue_len(pl->inputq) > post_synch)
969 970 971 972 973 974
		return false;
synched:
	l->flags &= ~LINK_SYNCHING;
	return true;
}

975 976
static void link_retrieve_defq(struct tipc_link *link,
			       struct sk_buff_head *list)
P
Per Liden 已提交
977
{
978
	u16 seq_no;
P
Per Liden 已提交
979

J
Jon Paul Maloy 已提交
980
	if (skb_queue_empty(&link->deferdq))
981 982
		return;

J
Jon Paul Maloy 已提交
983
	seq_no = buf_seqno(skb_peek(&link->deferdq));
984
	if (seq_no == link->rcv_nxt)
J
Jon Paul Maloy 已提交
985
		skb_queue_splice_tail_init(&link->deferdq, list);
P
Per Liden 已提交
986 987
}

988
/**
989
 * tipc_rcv - process TIPC packets/messages arriving from off-node
990
 * @net: the applicable net namespace
991
 * @skb: TIPC packet
992
 * @b_ptr: pointer to bearer message arrived on
993 994 995 996
 *
 * Invoked with no locks held.  Bearer pointer must point to a valid bearer
 * structure (i.e. cannot be NULL), but bearer can be inactive.
 */
997
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
P
Per Liden 已提交
998
{
999
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1000 1001 1002 1003 1004
	struct sk_buff_head head;
	struct tipc_node *n_ptr;
	struct tipc_link *l_ptr;
	struct sk_buff *skb1, *tmp;
	struct tipc_msg *msg;
1005 1006
	u16 seq_no;
	u16 ackd;
1007
	u32 released;
P
Per Liden 已提交
1008

1009
	skb2list(skb, &head);
1010

1011
	while ((skb = __skb_dequeue(&head))) {
1012
		/* Ensure message is well-formed */
1013
		if (unlikely(!tipc_msg_validate(skb)))
1014
			goto discard;
P
Per Liden 已提交
1015

1016
		/* Handle arrival of a non-unicast link message */
1017
		msg = buf_msg(skb);
P
Per Liden 已提交
1018
		if (unlikely(msg_non_seq(msg))) {
1019
			if (msg_user(msg) ==  LINK_CONFIG)
1020
				tipc_disc_rcv(net, skb, b_ptr);
1021
			else
1022
				tipc_bclink_rcv(net, skb);
P
Per Liden 已提交
1023 1024
			continue;
		}
1025

1026
		/* Discard unicast link messages destined for another node */
1027
		if (unlikely(!msg_short(msg) &&
1028
			     (msg_destnode(msg) != tn->own_addr)))
1029
			goto discard;
1030

1031
		/* Locate neighboring node that sent message */
1032
		n_ptr = tipc_node_find(net, msg_prevnode(msg));
P
Per Liden 已提交
1033
		if (unlikely(!n_ptr))
1034
			goto discard;
1035

1036
		tipc_node_lock(n_ptr);
1037
		/* Locate unicast link endpoint that should handle message */
1038
		l_ptr = n_ptr->links[b_ptr->identity].link;
1039
		if (unlikely(!l_ptr))
1040
			goto unlock;
1041

1042
		/* Verify that communication with node is currently allowed */
Y
Ying Xue 已提交
1043
		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1044 1045 1046 1047
		    msg_user(msg) == LINK_PROTOCOL &&
		    (msg_type(msg) == RESET_MSG ||
		    msg_type(msg) == ACTIVATE_MSG) &&
		    !msg_redundant_link(msg))
Y
Ying Xue 已提交
1048
			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1049 1050

		if (tipc_node_blocked(n_ptr))
1051
			goto unlock;
1052 1053 1054 1055 1056 1057

		/* Validate message sequence number info */
		seq_no = msg_seqno(msg);
		ackd = msg_ack(msg);

		/* Release acked messages */
1058
		if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1059
			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
P
Per Liden 已提交
1060

1061
		released = 0;
J
Jon Paul Maloy 已提交
1062 1063
		skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
			if (more(buf_seqno(skb1), ackd))
1064
				break;
J
Jon Paul Maloy 已提交
1065
			 __skb_unlink(skb1, &l_ptr->transmq);
1066 1067
			 kfree_skb(skb1);
			 released = 1;
P
Per Liden 已提交
1068
		}
1069 1070

		/* Try sending any messages link endpoint has pending */
J
Jon Paul Maloy 已提交
1071
		if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1072
			tipc_link_push_packets(l_ptr);
1073

1074
		if (released && !skb_queue_empty(&l_ptr->wakeupq))
1075
			link_prepare_wakeup(l_ptr);
1076 1077

		/* Process the incoming packet */
1078 1079
		if (unlikely(!link_working_working(l_ptr))) {
			if (msg_user(msg) == LINK_PROTOCOL) {
1080
				tipc_link_proto_rcv(l_ptr, skb);
1081
				link_retrieve_defq(l_ptr, &head);
1082 1083
				skb = NULL;
				goto unlock;
P
Per Liden 已提交
1084
			}
1085 1086 1087 1088 1089 1090

			/* Traffic message. Conditionally activate link */
			link_state_event(l_ptr, TRAFFIC_MSG_EVT);

			if (link_working_working(l_ptr)) {
				/* Re-insert buffer in front of queue */
1091
				__skb_queue_head(&head, skb);
1092 1093
				skb = NULL;
				goto unlock;
1094
			}
1095
			goto unlock;
1096 1097 1098
		}

		/* Link is now in state WORKING_WORKING */
1099
		if (unlikely(seq_no != l_ptr->rcv_nxt)) {
1100
			link_handle_out_of_seq_msg(l_ptr, skb);
1101
			link_retrieve_defq(l_ptr, &head);
1102 1103
			skb = NULL;
			goto unlock;
P
Per Liden 已提交
1104
		}
1105 1106
		l_ptr->silent_intv_cnt = 0;

1107 1108
		/* Synchronize with parallel link if applicable */
		if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
1109 1110
			if (!link_synch(l_ptr))
				goto unlock;
1111
		}
1112
		l_ptr->rcv_nxt++;
J
Jon Paul Maloy 已提交
1113
		if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1114
			link_retrieve_defq(l_ptr, &head);
J
Jon Paul Maloy 已提交
1115
		if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1116
			l_ptr->stats.sent_acks++;
1117
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
1118
		}
1119 1120 1121
		tipc_link_input(l_ptr, skb);
		skb = NULL;
unlock:
1122
		tipc_node_unlock(n_ptr);
1123
		tipc_node_put(n_ptr);
1124
discard:
1125 1126
		if (unlikely(skb))
			kfree_skb(skb);
P
Per Liden 已提交
1127 1128 1129
	}
}

1130
/* tipc_data_input - deliver data and name distr msgs to upper layer
1131
 *
1132
 * Consumes buffer if message is of right type
1133 1134
 * Node lock must be held
 */
1135
static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1136
{
1137 1138 1139
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	u32 dport = msg_destport(msg);
1140 1141

	switch (msg_user(msg)) {
1142 1143 1144 1145 1146
	case TIPC_LOW_IMPORTANCE:
	case TIPC_MEDIUM_IMPORTANCE:
	case TIPC_HIGH_IMPORTANCE:
	case TIPC_CRITICAL_IMPORTANCE:
	case CONN_MANAGER:
1147 1148
		if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
			node->inputq = link->inputq;
1149
			node->action_flags |= TIPC_MSG_EVT;
1150
		}
1151
		return true;
1152
	case NAME_DISTRIBUTOR:
1153
		node->bclink.recv_permitted = true;
1154 1155 1156
		node->namedq = link->namedq;
		skb_queue_tail(link->namedq, skb);
		if (skb_queue_len(link->namedq) == 1)
1157 1158 1159
			node->action_flags |= TIPC_NAMED_MSG_EVT;
		return true;
	case MSG_BUNDLER:
1160
	case TUNNEL_PROTOCOL:
1161
	case MSG_FRAGMENTER:
1162
	case BCAST_PROTOCOL:
1163
		return false;
1164
	default:
1165 1166 1167 1168
		pr_warn("Dropping received illegal msg type\n");
		kfree_skb(skb);
		return false;
	};
1169
}
1170 1171 1172 1173 1174

/* tipc_link_input - process packet that has passed link protocol check
 *
 * Consumes buffer
 * Node lock must be held
1175
 */
1176
static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1177
{
1178 1179 1180 1181 1182 1183 1184
	struct tipc_node *node = link->owner;
	struct tipc_msg *msg = buf_msg(skb);
	struct sk_buff *iskb;
	int pos = 0;

	if (likely(tipc_data_input(link, skb)))
		return;
1185 1186

	switch (msg_user(msg)) {
1187
	case TUNNEL_PROTOCOL:
1188 1189 1190
		if (msg_dup(msg)) {
			link->flags |= LINK_SYNCHING;
			link->synch_point = msg_seqno(msg_get_wrapped(msg));
1191 1192
			kfree_skb(skb);
			break;
1193
		}
1194
		if (!tipc_link_failover_rcv(link, &skb))
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
			break;
		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
			tipc_data_input(link, skb);
			break;
		}
	case MSG_BUNDLER:
		link->stats.recv_bundles++;
		link->stats.recv_bundled += msg_msgcnt(msg);

		while (tipc_msg_extract(skb, &iskb, &pos))
			tipc_data_input(link, iskb);
1206
		break;
1207 1208 1209 1210 1211 1212 1213 1214
	case MSG_FRAGMENTER:
		link->stats.recv_fragments++;
		if (tipc_buf_append(&link->reasm_buf, &skb)) {
			link->stats.recv_fragmented++;
			tipc_data_input(link, skb);
		} else if (!link->reasm_buf) {
			tipc_link_reset(link);
		}
1215
		break;
1216 1217
	case BCAST_PROTOCOL:
		tipc_link_sync_rcv(node, skb);
1218 1219
		break;
	default:
1220 1221
		break;
	};
1222 1223
}

1224
/**
1225 1226 1227
 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
 *
 * Returns increase in queue length (i.e. 0 or 1)
P
Per Liden 已提交
1228
 */
1229
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
P
Per Liden 已提交
1230
{
1231
	struct sk_buff *skb1;
1232
	u16 seq_no = buf_seqno(skb);
P
Per Liden 已提交
1233 1234

	/* Empty queue ? */
1235 1236
	if (skb_queue_empty(list)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1237 1238 1239 1240
		return 1;
	}

	/* Last ? */
1241 1242
	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
		__skb_queue_tail(list, skb);
P
Per Liden 已提交
1243 1244 1245
		return 1;
	}

1246
	/* Locate insertion point in queue, then insert; discard if duplicate */
1247
	skb_queue_walk(list, skb1) {
1248
		u16 curr_seqno = buf_seqno(skb1);
P
Per Liden 已提交
1249

1250
		if (seq_no == curr_seqno) {
1251
			kfree_skb(skb);
1252
			return 0;
P
Per Liden 已提交
1253
		}
1254 1255

		if (less(seq_no, curr_seqno))
P
Per Liden 已提交
1256
			break;
1257
	}
P
Per Liden 已提交
1258

1259
	__skb_queue_before(list, skb1, skb);
1260
	return 1;
P
Per Liden 已提交
1261 1262
}

1263
/*
P
Per Liden 已提交
1264 1265
 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
 */
1266
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
P
Per Liden 已提交
1267 1268
				       struct sk_buff *buf)
{
1269
	u32 seq_no = buf_seqno(buf);
P
Per Liden 已提交
1270 1271

	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1272
		tipc_link_proto_rcv(l_ptr, buf);
P
Per Liden 已提交
1273 1274 1275
		return;
	}

1276 1277
	/* Record OOS packet arrival */
	l_ptr->silent_intv_cnt = 0;
P
Per Liden 已提交
1278

1279
	/*
P
Per Liden 已提交
1280 1281 1282
	 * Discard packet if a duplicate; otherwise add it to deferred queue
	 * and notify peer of gap as per protocol specification
	 */
1283
	if (less(seq_no, l_ptr->rcv_nxt)) {
P
Per Liden 已提交
1284
		l_ptr->stats.duplicates++;
1285
		kfree_skb(buf);
P
Per Liden 已提交
1286 1287 1288
		return;
	}

J
Jon Paul Maloy 已提交
1289
	if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
P
Per Liden 已提交
1290
		l_ptr->stats.deferred_recv++;
J
Jon Paul Maloy 已提交
1291
		if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1292
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
1293
	} else {
P
Per Liden 已提交
1294
		l_ptr->stats.duplicates++;
1295
	}
P
Per Liden 已提交
1296 1297 1298 1299 1300
}

/*
 * Send protocol message to the other endpoint.
 */
1301
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1302
			  u32 gap, u32 tolerance, u32 priority)
P
Per Liden 已提交
1303
{
1304
	struct sk_buff *buf = NULL;
P
Per Liden 已提交
1305
	struct tipc_msg *msg = l_ptr->pmsg;
1306
	u32 msg_size = sizeof(l_ptr->proto_msg);
1307
	int r_flag;
1308
	u16 last_rcv;
P
Per Liden 已提交
1309

1310 1311
	/* Don't send protocol message during link failover */
	if (l_ptr->flags & LINK_FAILINGOVER)
P
Per Liden 已提交
1312
		return;
1313 1314

	/* Abort non-RESET send if communication with node is prohibited */
1315
	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1316 1317
		return;

1318
	/* Create protocol message with "out-of-sequence" sequence number */
P
Per Liden 已提交
1319
	msg_set_type(msg, msg_typ);
1320
	msg_set_net_plane(msg, l_ptr->net_plane);
1321
	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1322
	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
P
Per Liden 已提交
1323 1324

	if (msg_typ == STATE_MSG) {
1325
		u16 next_sent = l_ptr->snd_nxt;
P
Per Liden 已提交
1326

1327
		if (!tipc_link_is_up(l_ptr))
P
Per Liden 已提交
1328 1329
			return;
		msg_set_next_sent(msg, next_sent);
J
Jon Paul Maloy 已提交
1330
		if (!skb_queue_empty(&l_ptr->deferdq)) {
1331
			last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
1332
			gap = mod(last_rcv - l_ptr->rcv_nxt);
P
Per Liden 已提交
1333 1334 1335 1336 1337 1338
		}
		msg_set_seq_gap(msg, gap);
		if (gap)
			l_ptr->stats.sent_nacks++;
		msg_set_link_tolerance(msg, tolerance);
		msg_set_linkprio(msg, priority);
1339
		msg_set_max_pkt(msg, l_ptr->mtu);
1340
		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
P
Per Liden 已提交
1341
		msg_set_probe(msg, probe_msg != 0);
1342
		if (probe_msg)
P
Per Liden 已提交
1343 1344 1345
			l_ptr->stats.sent_probes++;
		l_ptr->stats.sent_states++;
	} else {		/* RESET_MSG or ACTIVATE_MSG */
1346
		msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
P
Per Liden 已提交
1347 1348
		msg_set_seq_gap(msg, 0);
		msg_set_next_sent(msg, 1);
1349
		msg_set_probe(msg, 0);
P
Per Liden 已提交
1350 1351
		msg_set_link_tolerance(msg, l_ptr->tolerance);
		msg_set_linkprio(msg, l_ptr->priority);
1352
		msg_set_max_pkt(msg, l_ptr->advertised_mtu);
P
Per Liden 已提交
1353 1354
	}

1355 1356
	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
	msg_set_redundant_link(msg, r_flag);
P
Per Liden 已提交
1357
	msg_set_linkprio(msg, l_ptr->priority);
1358
	msg_set_size(msg, msg_size);
P
Per Liden 已提交
1359

1360
	msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
P
Per Liden 已提交
1361

1362
	buf = tipc_buf_acquire(msg_size);
P
Per Liden 已提交
1363 1364 1365
	if (!buf)
		return;

1366
	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1367
	buf->priority = TC_PRIO_CONTROL;
1368 1369
	tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
			 &l_ptr->media_addr);
J
Jon Paul Maloy 已提交
1370
	l_ptr->rcv_unacked = 0;
1371
	kfree_skb(buf);
P
Per Liden 已提交
1372 1373 1374 1375
}

/*
 * Receive protocol message :
1376 1377
 * Note that network plane id propagates through the network, and may
 * change at any time. The node with lowest address rules
P
Per Liden 已提交
1378
 */
1379
static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1380
				struct sk_buff *buf)
P
Per Liden 已提交
1381 1382 1383 1384 1385
{
	u32 rec_gap = 0;
	u32 msg_tol;
	struct tipc_msg *msg = buf_msg(buf);

1386
	if (l_ptr->flags & LINK_FAILINGOVER)
P
Per Liden 已提交
1387 1388
		goto exit;

1389
	if (l_ptr->net_plane != msg_net_plane(msg))
1390
		if (link_own_addr(l_ptr) > msg_prevnode(msg))
1391
			l_ptr->net_plane = msg_net_plane(msg);
P
Per Liden 已提交
1392 1393

	switch (msg_type(msg)) {
1394

P
Per Liden 已提交
1395
	case RESET_MSG:
1396 1397
		if (!link_working_unknown(l_ptr) &&
		    (l_ptr->peer_session != INVALID_SESSION)) {
1398 1399
			if (less_eq(msg_session(msg), l_ptr->peer_session))
				break; /* duplicate or old reset: ignore */
P
Per Liden 已提交
1400
		}
1401 1402 1403 1404 1405 1406 1407

		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
				link_working_unknown(l_ptr))) {
			/*
			 * peer has lost contact -- don't allow peer's links
			 * to reactivate before we recognize loss & clean up
			 */
1408
			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1409 1410
		}

1411 1412
		link_state_event(l_ptr, RESET_MSG);

P
Per Liden 已提交
1413 1414 1415 1416 1417
		/* fall thru' */
	case ACTIVATE_MSG:
		/* Update link settings according other endpoint's values */
		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));

1418 1419
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol > l_ptr->tolerance)
P
Per Liden 已提交
1420 1421 1422 1423 1424
			link_set_supervision_props(l_ptr, msg_tol);

		if (msg_linkprio(msg) > l_ptr->priority)
			l_ptr->priority = msg_linkprio(msg);

1425 1426
		if (l_ptr->mtu > msg_max_pkt(msg))
			l_ptr->mtu = msg_max_pkt(msg);
P
Per Liden 已提交
1427

1428
		/* Synchronize broadcast link info, if not done previously */
1429 1430 1431 1432 1433 1434
		if (!tipc_node_is_up(l_ptr->owner)) {
			l_ptr->owner->bclink.last_sent =
				l_ptr->owner->bclink.last_in =
				msg_last_bcast(msg);
			l_ptr->owner->bclink.oos_state = 0;
		}
1435

P
Per Liden 已提交
1436 1437
		l_ptr->peer_session = msg_session(msg);
		l_ptr->peer_bearer_id = msg_bearer_id(msg);
1438 1439 1440

		if (msg_type(msg) == ACTIVATE_MSG)
			link_state_event(l_ptr, ACTIVATE_MSG);
P
Per Liden 已提交
1441 1442 1443
		break;
	case STATE_MSG:

1444 1445
		msg_tol = msg_link_tolerance(msg);
		if (msg_tol)
P
Per Liden 已提交
1446
			link_set_supervision_props(l_ptr, msg_tol);
1447 1448

		if (msg_linkprio(msg) &&
P
Per Liden 已提交
1449
		    (msg_linkprio(msg) != l_ptr->priority)) {
1450 1451 1452
			pr_debug("%s<%s>, priority change %u->%u\n",
				 link_rst_msg, l_ptr->name,
				 l_ptr->priority, msg_linkprio(msg));
P
Per Liden 已提交
1453
			l_ptr->priority = msg_linkprio(msg);
1454
			tipc_link_reset(l_ptr); /* Enforce change to take effect */
P
Per Liden 已提交
1455 1456
			break;
		}
1457 1458

		/* Record reception; force mismatch at next timeout: */
1459
		l_ptr->silent_intv_cnt = 0;
1460

P
Per Liden 已提交
1461 1462 1463 1464 1465
		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
		l_ptr->stats.recv_states++;
		if (link_reset_unknown(l_ptr))
			break;

1466 1467
		if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
			rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
P
Per Liden 已提交
1468

1469
		if (msg_probe(msg))
P
Per Liden 已提交
1470 1471 1472
			l_ptr->stats.recv_probes++;

		/* Protocol message before retransmits, reduce loss risk */
1473
		if (l_ptr->owner->bclink.recv_permitted)
1474
			tipc_bclink_update_link_state(l_ptr->owner,
1475
						      msg_last_bcast(msg));
P
Per Liden 已提交
1476 1477

		if (rec_gap || (msg_probe(msg))) {
1478 1479
			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
					     rec_gap, 0, 0);
P
Per Liden 已提交
1480 1481 1482
		}
		if (msg_seq_gap(msg)) {
			l_ptr->stats.recv_nacks++;
J
Jon Paul Maloy 已提交
1483
			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1484
					     msg_seq_gap(msg));
P
Per Liden 已提交
1485 1486 1487 1488
		}
		break;
	}
exit:
1489
	kfree_skb(buf);
P
Per Liden 已提交
1490 1491 1492
}


1493 1494
/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
 * a different bearer. Owner node is locked.
P
Per Liden 已提交
1495
 */
1496 1497 1498 1499
static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
				  struct tipc_msg *tunnel_hdr,
				  struct tipc_msg *msg,
				  u32 selector)
P
Per Liden 已提交
1500
{
1501
	struct tipc_link *tunnel;
1502
	struct sk_buff *skb;
P
Per Liden 已提交
1503 1504
	u32 length = msg_size(msg);

1505
	tunnel = node_active_link(l_ptr->owner, selector & 1);
1506
	if (!tipc_link_is_up(tunnel)) {
1507
		pr_warn("%stunnel link no longer available\n", link_co_err);
P
Per Liden 已提交
1508
		return;
1509
	}
P
Per Liden 已提交
1510
	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1511 1512
	skb = tipc_buf_acquire(length + INT_H_SIZE);
	if (!skb) {
1513
		pr_warn("%sunable to send tunnel msg\n", link_co_err);
P
Per Liden 已提交
1514
		return;
1515
	}
1516 1517 1518
	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
	__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1519 1520 1521
}


1522 1523 1524 1525 1526
/* tipc_link_failover_send_queue(): A link has gone down, but a second
 * link is still active. We can do failover. Tunnel the failing link's
 * whole send queue via the remaining link. This way, we don't lose
 * any packets, and sequence order is preserved for subsequent traffic
 * sent over the remaining link. Owner node is locked.
P
Per Liden 已提交
1527
 */
1528
void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
P
Per Liden 已提交
1529
{
J
Jon Paul Maloy 已提交
1530
	int msgcount;
1531
	struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
P
Per Liden 已提交
1532
	struct tipc_msg tunnel_hdr;
1533
	struct sk_buff *skb;
1534
	int split_bundles;
P
Per Liden 已提交
1535 1536 1537 1538

	if (!tunnel)
		return;

1539 1540
	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
		      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1541 1542 1543 1544 1545

	skb_queue_walk(&l_ptr->backlogq, skb) {
		msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
		l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
	}
J
Jon Paul Maloy 已提交
1546
	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1547
	tipc_link_purge_backlog(l_ptr);
J
Jon Paul Maloy 已提交
1548
	msgcount = skb_queue_len(&l_ptr->transmq);
P
Per Liden 已提交
1549 1550
	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
	msg_set_msgcnt(&tunnel_hdr, msgcount);
1551

J
Jon Paul Maloy 已提交
1552
	if (skb_queue_empty(&l_ptr->transmq)) {
1553 1554 1555
		skb = tipc_buf_acquire(INT_H_SIZE);
		if (skb) {
			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
P
Per Liden 已提交
1556
			msg_set_size(&tunnel_hdr, INT_H_SIZE);
1557
			__tipc_link_xmit_skb(tunnel, skb);
P
Per Liden 已提交
1558
		} else {
1559 1560
			pr_warn("%sunable to send changeover msg\n",
				link_co_err);
P
Per Liden 已提交
1561 1562 1563
		}
		return;
	}
1564

1565 1566
	split_bundles = (node_active_link(l_ptr->owner, 0) !=
			 node_active_link(l_ptr->owner, 0));
1567

J
Jon Paul Maloy 已提交
1568
	skb_queue_walk(&l_ptr->transmq, skb) {
1569
		struct tipc_msg *msg = buf_msg(skb);
P
Per Liden 已提交
1570 1571 1572

		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
			struct tipc_msg *m = msg_get_wrapped(msg);
1573
			unchar *pos = (unchar *)m;
P
Per Liden 已提交
1574

1575
			msgcount = msg_msgcnt(msg);
P
Per Liden 已提交
1576
			while (msgcount--) {
1577
				msg_set_seqno(m, msg_seqno(msg));
1578 1579
				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
						      msg_link_selector(m));
P
Per Liden 已提交
1580 1581 1582 1583
				pos += align(msg_size(m));
				m = (struct tipc_msg *)pos;
			}
		} else {
1584 1585
			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
					      msg_link_selector(msg));
P
Per Liden 已提交
1586 1587 1588 1589
		}
	}
}

1590
/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1591 1592 1593 1594 1595 1596 1597 1598
 * duplicate of the first link's send queue via the new link. This way, we
 * are guaranteed that currently queued packets from a socket are delivered
 * before future traffic from the same socket, even if this is using the
 * new link. The last arriving copy of each duplicate packet is dropped at
 * the receiving end by the regular protocol check, so packet cardinality
 * and sequence order is preserved per sender/receiver socket pair.
 * Owner node is locked.
 */
J
Jon Paul Maloy 已提交
1599 1600
void tipc_link_dup_queue_xmit(struct tipc_link *link,
			      struct tipc_link *tnl)
P
Per Liden 已提交
1601
{
1602
	struct sk_buff *skb;
J
Jon Paul Maloy 已提交
1603 1604 1605
	struct tipc_msg tnl_hdr;
	struct sk_buff_head *queue = &link->transmq;
	int mcnt;
1606
	u16 seqno;
J
Jon Paul Maloy 已提交
1607

1608 1609
	tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
		      SYNCH_MSG, INT_H_SIZE, link->addr);
J
Jon Paul Maloy 已提交
1610 1611 1612 1613 1614 1615
	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
	msg_set_msgcnt(&tnl_hdr, mcnt);
	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);

tunnel_queue:
	skb_queue_walk(queue, skb) {
1616 1617
		struct sk_buff *outskb;
		struct tipc_msg *msg = buf_msg(skb);
J
Jon Paul Maloy 已提交
1618
		u32 len = msg_size(msg);
P
Per Liden 已提交
1619

1620
		msg_set_ack(msg, mod(link->rcv_nxt - 1));
J
Jon Paul Maloy 已提交
1621 1622 1623
		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
		outskb = tipc_buf_acquire(len + INT_H_SIZE);
1624
		if (outskb == NULL) {
1625 1626
			pr_warn("%sunable to send duplicate msg\n",
				link_co_err);
P
Per Liden 已提交
1627 1628
			return;
		}
J
Jon Paul Maloy 已提交
1629 1630 1631 1632 1633
		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
					       skb->data, len);
		__tipc_link_xmit_skb(tnl, outskb);
		if (!tipc_link_is_up(link))
P
Per Liden 已提交
1634 1635
			return;
	}
J
Jon Paul Maloy 已提交
1636 1637
	if (queue == &link->backlogq)
		return;
1638 1639 1640 1641 1642
	seqno = link->snd_nxt;
	skb_queue_walk(&link->backlogq, skb) {
		msg_set_seqno(buf_msg(skb), seqno);
		seqno = mod(seqno + 1);
	}
J
Jon Paul Maloy 已提交
1643 1644
	queue = &link->backlogq;
	goto tunnel_queue;
P
Per Liden 已提交
1645 1646
}

1647
/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1648 1649
 *  Owner node is locked.
 */
1650
static bool tipc_link_failover_rcv(struct tipc_link *link,
1651
				   struct sk_buff **skb)
1652
{
1653 1654
	struct tipc_msg *msg = buf_msg(*skb);
	struct sk_buff *iskb = NULL;
1655
	struct tipc_link *pl = NULL;
1656
	int bearer_id = msg_bearer_id(msg);
1657
	int pos = 0;
1658

1659
	if (msg_type(msg) != FAILOVER_MSG) {
1660 1661
		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
		goto exit;
1662
	}
1663 1664
	if (bearer_id >= MAX_BEARERS)
		goto exit;
1665 1666

	if (bearer_id == link->bearer_id)
1667
		goto exit;
P
Per Liden 已提交
1668

1669
	pl = link->owner->links[bearer_id].link;
1670 1671 1672 1673 1674
	if (pl && tipc_link_is_up(pl))
		tipc_link_reset(pl);

	if (link->failover_pkts == FIRST_FAILOVER)
		link->failover_pkts = msg_msgcnt(msg);
1675

1676
	/* Should we expect an inner packet? */
1677
	if (!link->failover_pkts)
1678
		goto exit;
1679

1680 1681 1682
	if (!tipc_msg_extract(*skb, &iskb, &pos)) {
		pr_warn("%sno inner failover pkt\n", link_co_err);
		*skb = NULL;
P
Per Liden 已提交
1683
		goto exit;
1684
	}
1685
	link->failover_pkts--;
1686
	*skb = NULL;
P
Per Liden 已提交
1687

1688 1689
	/* Was this packet already delivered? */
	if (less(buf_seqno(iskb), link->failover_checkpt)) {
1690 1691 1692 1693 1694 1695
		kfree_skb(iskb);
		iskb = NULL;
		goto exit;
	}
	if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
		link->stats.recv_fragments++;
1696
		tipc_buf_append(&link->failover_skb, &iskb);
1697
	}
P
Per Liden 已提交
1698
exit:
1699 1700
	if (!link->failover_pkts && pl)
		pl->flags &= ~LINK_FAILINGOVER;
1701 1702 1703
	kfree_skb(*skb);
	*skb = iskb;
	return *skb;
P
Per Liden 已提交
1704 1705
}

1706
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
P
Per Liden 已提交
1707
{
1708 1709 1710
	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;

	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1711 1712
		return;

1713
	l_ptr->tolerance = tol;
1714 1715
	l_ptr->keepalive_intv = msecs_to_jiffies(intv);
	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
P
Per Liden 已提交
1716 1717
}

1718
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
P
Per Liden 已提交
1719
{
1720
	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1721 1722

	l->window = win;
1723 1724 1725 1726 1727
	l->backlog[TIPC_LOW_IMPORTANCE].limit      = win / 2;
	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = win;
	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = win / 2 * 3;
	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
P
Per Liden 已提交
1728 1729
}

1730
/* tipc_link_find_owner - locate owner node of link by link's name
1731
 * @net: the applicable net namespace
1732 1733
 * @name: pointer to link name string
 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1734
 *
1735
 * Returns pointer to node owning the link, or 0 if no matching link is found.
P
Per Liden 已提交
1736
 */
1737 1738
static struct tipc_node *tipc_link_find_owner(struct net *net,
					      const char *link_name,
1739
					      unsigned int *bearer_id)
P
Per Liden 已提交
1740
{
1741
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1742
	struct tipc_link *l_ptr;
1743
	struct tipc_node *n_ptr;
1744
	struct tipc_node *found_node = NULL;
1745
	int i;
P
Per Liden 已提交
1746

1747
	*bearer_id = 0;
1748
	rcu_read_lock();
1749
	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1750
		tipc_node_lock(n_ptr);
1751
		for (i = 0; i < MAX_BEARERS; i++) {
1752
			l_ptr = n_ptr->links[i].link;
1753 1754 1755 1756 1757
			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
				*bearer_id = i;
				found_node = n_ptr;
				break;
			}
1758
		}
1759
		tipc_node_unlock(n_ptr);
1760 1761
		if (found_node)
			break;
1762
	}
1763 1764
	rcu_read_unlock();

1765
	return found_node;
P
Per Liden 已提交
1766 1767 1768 1769 1770 1771
}

/**
 * link_reset_statistics - reset link statistics
 * @l_ptr: pointer to link
 */
1772
static void link_reset_statistics(struct tipc_link *l_ptr)
P
Per Liden 已提交
1773 1774
{
	memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1775 1776
	l_ptr->stats.sent_info = l_ptr->snd_nxt;
	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
P
Per Liden 已提交
1777 1778
}

1779
static void link_print(struct tipc_link *l_ptr, const char *str)
P
Per Liden 已提交
1780
{
1781
	struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1782 1783 1784
	struct tipc_bearer *b_ptr;

	rcu_read_lock();
1785
	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1786 1787 1788
	if (b_ptr)
		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
	rcu_read_unlock();
1789

P
Per Liden 已提交
1790
	if (link_working_unknown(l_ptr))
1791
		pr_cont(":WU\n");
1792
	else if (link_reset_reset(l_ptr))
1793
		pr_cont(":RR\n");
1794
	else if (link_reset_unknown(l_ptr))
1795
		pr_cont(":RU\n");
1796
	else if (link_working_working(l_ptr))
1797 1798 1799
		pr_cont(":WW\n");
	else
		pr_cont("\n");
P
Per Liden 已提交
1800
}
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838

/* Parse and validate nested (link) properties valid for media, bearer and link
 */
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
	int err;

	err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
			       tipc_nl_prop_policy);
	if (err)
		return err;

	if (props[TIPC_NLA_PROP_PRIO]) {
		u32 prio;

		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
		if (prio > TIPC_MAX_LINK_PRI)
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_TOL]) {
		u32 tol;

		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
			return -EINVAL;
	}

	if (props[TIPC_NLA_PROP_WIN]) {
		u32 win;

		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
			return -EINVAL;
	}

	return 0;
}
1839

1840 1841 1842 1843 1844 1845 1846 1847 1848
int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	int res = 0;
	int bearer_id;
	char *name;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1849
	struct net *net = sock_net(skb->sk);
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

1865 1866 1867
	if (strcmp(name, tipc_bclink_name) == 0)
		return tipc_nl_bc_link_set(net, attrs);

1868
	node = tipc_link_find_owner(net, name, &bearer_id);
1869 1870 1871 1872 1873
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

1874
	link = node->links[bearer_id].link;
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
	if (!link) {
		res = -EINVAL;
		goto out;
	}

	if (attrs[TIPC_NLA_LINK_PROP]) {
		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];

		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
					      props);
		if (err) {
			res = err;
			goto out;
		}

		if (props[TIPC_NLA_PROP_TOL]) {
			u32 tol;

			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
			link_set_supervision_props(link, tol);
1895
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1896 1897 1898 1899 1900 1901
		}
		if (props[TIPC_NLA_PROP_PRIO]) {
			u32 prio;

			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
			link->priority = prio;
1902
			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
		}
		if (props[TIPC_NLA_PROP_WIN]) {
			u32 win;

			win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
			tipc_link_set_queue_limits(link, win);
		}
	}

out:
	tipc_node_unlock(node);

	return res;
}
1917 1918

static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
{
	int i;
	struct nlattr *stats;

	struct nla_map {
		u32 key;
		u32 val;
	};

	struct nla_map map[] = {
		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
			s->msg_length_counts : 1},
		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
			(s->accu_queue_sz / s->queue_sz_counts) : 0}
	};

	stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
	if (!stats)
		return -EMSGSIZE;

	for (i = 0; i <  ARRAY_SIZE(map); i++)
		if (nla_put_u32(skb, map[i].key, map[i].val))
			goto msg_full;

	nla_nest_end(skb, stats);

	return 0;
msg_full:
	nla_nest_cancel(skb, stats);

	return -EMSGSIZE;
}

/* Caller should hold appropriate locks to protect the link */
1984
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1985
			      struct tipc_link *link, int nlflags)
1986 1987 1988 1989 1990
{
	int err;
	void *hdr;
	struct nlattr *attrs;
	struct nlattr *prop;
1991
	struct tipc_net *tn = net_generic(net, tipc_net_id);
1992

1993
	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1994
			  nlflags, TIPC_NL_LINK_GET);
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
	if (!hdr)
		return -EMSGSIZE;

	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
	if (!attrs)
		goto msg_full;

	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2005
			tipc_cluster_mask(tn->own_addr)))
2006
		goto attr_msg_full;
2007
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2008
		goto attr_msg_full;
2009
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
2010
		goto attr_msg_full;
2011
	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
		goto attr_msg_full;

	if (tipc_link_is_up(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
			goto attr_msg_full;
	if (tipc_link_is_active(link))
		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
			goto attr_msg_full;

	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
	if (!prop)
		goto attr_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2029
			link->window))
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
		goto prop_msg_full;
	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
		goto prop_msg_full;
	nla_nest_end(msg->skb, prop);

	err = __tipc_nl_add_stats(msg->skb, &link->stats);
	if (err)
		goto attr_msg_full;

	nla_nest_end(msg->skb, attrs);
	genlmsg_end(msg->skb, hdr);

	return 0;

prop_msg_full:
	nla_nest_cancel(msg->skb, prop);
attr_msg_full:
	nla_nest_cancel(msg->skb, attrs);
msg_full:
	genlmsg_cancel(msg->skb, hdr);

	return -EMSGSIZE;
}

/* Caller should hold node lock  */
2055 2056
static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
				    struct tipc_node *node, u32 *prev_link)
2057 2058 2059 2060 2061 2062 2063
{
	u32 i;
	int err;

	for (i = *prev_link; i < MAX_BEARERS; i++) {
		*prev_link = i;

2064
		if (!node->links[i].link)
2065 2066
			continue;

2067 2068
		err = __tipc_nl_add_link(net, msg,
					 node->links[i].link, NLM_F_MULTI);
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
		if (err)
			return err;
	}
	*prev_link = 0;

	return 0;
}

int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
2079 2080
	struct net *net = sock_net(skb->sk);
	struct tipc_net *tn = net_generic(net, tipc_net_id);
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
	struct tipc_node *node;
	struct tipc_nl_msg msg;
	u32 prev_node = cb->args[0];
	u32 prev_link = cb->args[1];
	int done = cb->args[2];
	int err;

	if (done)
		return 0;

	msg.skb = skb;
	msg.portid = NETLINK_CB(cb->skb).portid;
	msg.seq = cb->nlh->nlmsg_seq;

	rcu_read_lock();
	if (prev_node) {
2097
		node = tipc_node_find(net, prev_node);
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
		if (!node) {
			/* We never set seq or call nl_dump_check_consistent()
			 * this means that setting prev_seq here will cause the
			 * consistence check to fail in the netlink callback
			 * handler. Resulting in the last NLMSG_DONE message
			 * having the NLM_F_DUMP_INTR flag set.
			 */
			cb->prev_seq = 1;
			goto out;
		}
2108
		tipc_node_put(node);
2109

2110 2111
		list_for_each_entry_continue_rcu(node, &tn->node_list,
						 list) {
2112
			tipc_node_lock(node);
2113 2114
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2115 2116 2117 2118 2119 2120 2121
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	} else {
2122
		err = tipc_nl_add_bc_link(net, &msg);
2123 2124 2125
		if (err)
			goto out;

2126
		list_for_each_entry_rcu(node, &tn->node_list, list) {
2127
			tipc_node_lock(node);
2128 2129
			err = __tipc_nl_add_node_links(net, &msg, node,
						       &prev_link);
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
			tipc_node_unlock(node);
			if (err)
				goto out;

			prev_node = node->addr;
		}
	}
	done = 1;
out:
	rcu_read_unlock();

	cb->args[0] = prev_node;
	cb->args[1] = prev_link;
	cb->args[2] = done;

	return skb->len;
}

int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
2150
	struct net *net = genl_info_net(info);
2151 2152 2153 2154
	struct tipc_nl_msg msg;
	char *name;
	int err;

2155 2156 2157
	msg.portid = info->snd_portid;
	msg.seq = info->snd_seq;

2158 2159 2160 2161
	if (!info->attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;
	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);

2162 2163
	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!msg.skb)
2164 2165
		return -ENOMEM;

2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
	if (strcmp(name, tipc_bclink_name) == 0) {
		err = tipc_nl_add_bc_link(net, &msg);
		if (err) {
			nlmsg_free(msg.skb);
			return err;
		}
	} else {
		int bearer_id;
		struct tipc_node *node;
		struct tipc_link *link;
2176

2177 2178 2179
		node = tipc_link_find_owner(net, name, &bearer_id);
		if (!node)
			return -EINVAL;
2180

2181
		tipc_node_lock(node);
2182
		link = node->links[bearer_id].link;
2183 2184 2185 2186 2187
		if (!link) {
			tipc_node_unlock(node);
			nlmsg_free(msg.skb);
			return -EINVAL;
		}
2188

2189 2190 2191 2192 2193 2194 2195
		err = __tipc_nl_add_link(net, &msg, link, 0);
		tipc_node_unlock(node);
		if (err) {
			nlmsg_free(msg.skb);
			return err;
		}
	}
2196

2197
	return genlmsg_reply(msg.skb, info);
2198
}
2199 2200 2201 2202 2203 2204 2205 2206 2207

int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
{
	int err;
	char *link_name;
	unsigned int bearer_id;
	struct tipc_link *link;
	struct tipc_node *node;
	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2208
	struct net *net = sock_net(skb->sk);
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224

	if (!info->attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
			       info->attrs[TIPC_NLA_LINK],
			       tipc_nl_link_policy);
	if (err)
		return err;

	if (!attrs[TIPC_NLA_LINK_NAME])
		return -EINVAL;

	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);

	if (strcmp(link_name, tipc_bclink_name) == 0) {
2225
		err = tipc_bclink_reset_stats(net);
2226 2227 2228 2229 2230
		if (err)
			return err;
		return 0;
	}

2231
	node = tipc_link_find_owner(net, link_name, &bearer_id);
2232 2233 2234 2235 2236
	if (!node)
		return -EINVAL;

	tipc_node_lock(node);

2237
	link = node->links[bearer_id].link;
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
	if (!link) {
		tipc_node_unlock(node);
		return -EINVAL;
	}

	link_reset_statistics(link);

	tipc_node_unlock(node);

	return 0;
}