msg.c 18.7 KB
Newer Older
P
Per Liden 已提交
1 2
/*
 * net/tipc/msg.c: TIPC message header routines
3
 *
4
 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5
 * Copyright (c) 2005, 2010-2011, Wind River Systems
P
Per Liden 已提交
6 7
 * All rights reserved.
 *
P
Per Liden 已提交
8
 * Redistribution and use in source and binary forms, with or without
P
Per Liden 已提交
9 10
 * modification, are permitted provided that the following conditions are met:
 *
P
Per Liden 已提交
11 12 13 14 15 16 17 18
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
P
Per Liden 已提交
19
 *
P
Per Liden 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
P
Per Liden 已提交
34 35 36
 * POSSIBILITY OF SUCH DAMAGE.
 */

37
#include <net/sock.h>
P
Per Liden 已提交
38 39
#include "core.h"
#include "msg.h"
40 41
#include "addr.h"
#include "name_table.h"
P
Per Liden 已提交
42

43
#define MAX_FORWARD_SIZE 1024
44 45
#define BUF_HEADROOM (LL_MAX_HEADER + 48)
#define BUF_TAILROOM 16
46

47
static unsigned int align(unsigned int i)
48
{
49
	return (i + 3) & ~3u;
50 51
}

Y
Ying Xue 已提交
52 53 54 55 56 57 58 59 60
/**
 * tipc_buf_acquire - creates a TIPC message buffer
 * @size: message size (including TIPC header)
 *
 * Returns a new buffer with data pointers set to the specified size.
 *
 * NOTE: Headroom is reserved to allow prepending of a data link header.
 *       There may also be unrequested tailroom present at the buffer's end.
 */
61
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
Y
Ying Xue 已提交
62 63 64 65
{
	struct sk_buff *skb;
	unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;

66
	skb = alloc_skb_fclone(buf_size, gfp);
Y
Ying Xue 已提交
67 68 69 70 71 72 73 74
	if (skb) {
		skb_reserve(skb, BUF_HEADROOM);
		skb_put(skb, size);
		skb->next = NULL;
	}
	return skb;
}

75 76
void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
		   u32 hsize, u32 dnode)
77 78 79 80 81 82
{
	memset(m, 0, hsize);
	msg_set_version(m);
	msg_set_user(m, user);
	msg_set_hdr_sz(m, hsize);
	msg_set_size(m, hsize);
83
	msg_set_prevnode(m, own_node);
84
	msg_set_type(m, type);
85
	if (hsize > SHORT_H_SIZE) {
86 87
		msg_set_orignode(m, own_node);
		msg_set_destnode(m, dnode);
88 89 90
	}
}

91
struct sk_buff *tipc_msg_create(uint user, uint type,
92 93
				uint hdr_sz, uint data_sz, u32 dnode,
				u32 onode, u32 dport, u32 oport, int errcode)
94 95 96 97
{
	struct tipc_msg *msg;
	struct sk_buff *buf;

98
	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
99 100 101 102
	if (unlikely(!buf))
		return NULL;

	msg = buf_msg(buf);
103
	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
104 105 106 107 108 109 110 111 112
	msg_set_size(msg, hdr_sz + data_sz);
	msg_set_origport(msg, oport);
	msg_set_destport(msg, dport);
	msg_set_errcode(msg, errcode);
	if (hdr_sz > SHORT_H_SIZE) {
		msg_set_orignode(msg, onode);
		msg_set_destnode(msg, dnode);
	}
	return buf;
113 114
}

115
/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
116 117 118
 * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
 *            out: set when successful non-complete reassembly, otherwise NULL
 * @*buf:     in:  the buffer to append. Always defined
S
stephen hemminger 已提交
119
 *            out: head buf after successful complete reassembly, otherwise NULL
120
 * Returns 1 when reassembly complete, otherwise 0
121 122 123 124 125
 */
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
{
	struct sk_buff *head = *headbuf;
	struct sk_buff *frag = *buf;
126
	struct sk_buff *tail = NULL;
127 128
	struct tipc_msg *msg;
	u32 fragid;
129
	int delta;
130
	bool headstolen;
131

132 133 134 135 136 137
	if (!frag)
		goto err;

	msg = buf_msg(frag);
	fragid = msg_type(msg);
	frag->next = NULL;
138 139 140
	skb_pull(frag, msg_hdr_sz(msg));

	if (fragid == FIRST_FRAGMENT) {
141 142 143 144
		if (unlikely(head))
			goto err;
		if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
			goto err;
145
		head = *headbuf = frag;
146
		*buf = NULL;
147 148 149 150 151 152 153 154
		TIPC_SKB_CB(head)->tail = NULL;
		if (skb_is_nonlinear(head)) {
			skb_walk_frags(head, tail) {
				TIPC_SKB_CB(head)->tail = tail;
			}
		} else {
			skb_frag_list_init(head);
		}
155 156
		return 0;
	}
157

158
	if (!head)
159 160
		goto err;

161 162 163
	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
		kfree_skb_partial(frag, headstolen);
	} else {
164
		tail = TIPC_SKB_CB(head)->tail;
165 166 167 168 169 170 171 172 173
		if (!skb_has_frag_list(head))
			skb_shinfo(head)->frag_list = frag;
		else
			tail->next = frag;
		head->truesize += frag->truesize;
		head->data_len += frag->len;
		head->len += frag->len;
		TIPC_SKB_CB(head)->tail = frag;
	}
174

175
	if (fragid == LAST_FRAGMENT) {
176
		TIPC_SKB_CB(head)->validated = false;
177
		if (unlikely(!tipc_msg_validate(&head)))
178
			goto err;
179 180 181 182 183 184 185
		*buf = head;
		TIPC_SKB_CB(head)->tail = NULL;
		*headbuf = NULL;
		return 1;
	}
	*buf = NULL;
	return 0;
186
err:
187
	kfree_skb(*buf);
188 189
	kfree_skb(*headbuf);
	*buf = *headbuf = NULL;
190 191
	return 0;
}
192

193 194 195 196 197 198 199 200 201 202 203
/* tipc_msg_validate - validate basic format of received message
 *
 * This routine ensures a TIPC message has an acceptable header, and at least
 * as much data as the header indicates it should.  The routine also ensures
 * that the entire message header is stored in the main fragment of the message
 * buffer, to simplify future access to message header fields.
 *
 * Note: Having extra info present in the message header or data areas is OK.
 * TIPC will ignore the excess, under the assumption that it is optional info
 * introduced by a later release of the protocol.
 */
204
bool tipc_msg_validate(struct sk_buff **_skb)
205
{
206 207
	struct sk_buff *skb = *_skb;
	struct tipc_msg *hdr;
208 209
	int msz, hsz;

210
	/* Ensure that flow control ratio condition is satisfied */
211 212
	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
213 214 215 216 217 218
		if (!skb)
			return false;
		kfree_skb(*_skb);
		*_skb = skb;
	}

219 220 221 222 223 224 225 226 227 228 229
	if (unlikely(TIPC_SKB_CB(skb)->validated))
		return true;
	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
		return false;

	hsz = msg_hdr_sz(buf_msg(skb));
	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
		return false;
	if (unlikely(!pskb_may_pull(skb, hsz)))
		return false;

230 231
	hdr = buf_msg(skb);
	if (unlikely(msg_version(hdr) != TIPC_VERSION))
232 233
		return false;

234
	msz = msg_size(hdr);
235 236 237 238 239 240 241 242 243 244
	if (unlikely(msz < hsz))
		return false;
	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
		return false;
	if (unlikely(skb->len < msz))
		return false;

	TIPC_SKB_CB(skb)->validated = true;
	return true;
}
245 246

/**
247
 * tipc_msg_build - create buffer chain containing specified header and data
248
 * @mhdr: Message header, to be prepended to data
249
 * @m: User message
250 251
 * @dsz: Total length of user data
 * @pktmax: Max packet size that can be used
252 253
 * @list: Buffer or chain of buffers to be returned to caller
 *
254 255 256
 * Note that the recursive call we are making here is safe, since it can
 * logically go only one further level down.
 *
257 258
 * Returns message data size or errno: -ENOMEM, -EFAULT
 */
259 260
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
		   int dsz, int pktmax, struct sk_buff_head *list)
261 262
{
	int mhsz = msg_hdr_sz(mhdr);
263
	struct tipc_msg pkthdr;
264 265
	int msz = mhsz + dsz;
	int pktrem = pktmax;
266
	struct sk_buff *skb;
267 268
	int drem = dsz;
	int pktno = 1;
269
	char *pktpos;
270
	int pktsz;
271
	int rc;
272

273 274 275 276
	msg_set_size(mhdr, msz);

	/* No fragmentation needed? */
	if (likely(msz <= pktmax)) {
277
		skb = tipc_buf_acquire(msz, GFP_KERNEL);
278 279 280 281 282 283 284 285 286 287

		/* Fall back to smaller MTU if node local message */
		if (unlikely(!skb)) {
			if (pktmax != MAX_MSG_SIZE)
				return -ENOMEM;
			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
			if (rc != dsz)
				return rc;
			if (tipc_msg_assemble(list))
				return dsz;
288
			return -ENOMEM;
289
		}
290
		skb_orphan(skb);
291 292 293
		__skb_queue_tail(list, skb);
		skb_copy_to_linear_data(skb, mhdr, mhsz);
		pktpos = skb->data + mhsz;
294
		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
295 296 297 298 299 300
			return dsz;
		rc = -EFAULT;
		goto error;
	}

	/* Prepare reusable fragment header */
301 302
	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
303 304
	msg_set_size(&pkthdr, pktmax);
	msg_set_fragm_no(&pkthdr, pktno);
305
	msg_set_importance(&pkthdr, msg_importance(mhdr));
306 307

	/* Prepare first fragment */
308
	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
309
	if (!skb)
310
		return -ENOMEM;
311
	skb_orphan(skb);
312 313 314
	__skb_queue_tail(list, skb);
	pktpos = skb->data;
	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
315 316
	pktpos += INT_H_SIZE;
	pktrem -= INT_H_SIZE;
317
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
318 319 320 321 322 323 324
	pktpos += mhsz;
	pktrem -= mhsz;

	do {
		if (drem < pktrem)
			pktrem = drem;

325
		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
326 327 328 329 330 331 332 333 334 335 336 337 338
			rc = -EFAULT;
			goto error;
		}
		drem -= pktrem;

		if (!drem)
			break;

		/* Prepare new fragment: */
		if (drem < (pktmax - INT_H_SIZE))
			pktsz = drem + INT_H_SIZE;
		else
			pktsz = pktmax;
339
		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
340
		if (!skb) {
341 342 343
			rc = -ENOMEM;
			goto error;
		}
344
		skb_orphan(skb);
345
		__skb_queue_tail(list, skb);
346 347 348
		msg_set_type(&pkthdr, FRAGMENT);
		msg_set_size(&pkthdr, pktsz);
		msg_set_fragm_no(&pkthdr, ++pktno);
349 350
		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
		pktpos = skb->data + INT_H_SIZE;
351 352 353
		pktrem = pktsz - INT_H_SIZE;

	} while (1);
354
	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
355 356
	return dsz;
error:
357 358
	__skb_queue_purge(list);
	__skb_queue_head_init(list);
359 360 361
	return rc;
}

362 363
/**
 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
364 365
 * @skb: the buffer to append to ("bundle")
 * @msg:  message to be appended
366 367 368 369
 * @mtu:  max allowable size for the bundle buffer
 * Consumes buffer if successful
 * Returns true if bundling could be performed, otherwise false
 */
370
bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
371
{
J
Jon Paul Maloy 已提交
372 373
	struct tipc_msg *bmsg;
	unsigned int bsz;
374
	unsigned int msz = msg_size(msg);
J
Jon Paul Maloy 已提交
375
	u32 start, pad;
376 377 378 379
	u32 max = mtu - INT_H_SIZE;

	if (likely(msg_user(msg) == MSG_FRAGMENTER))
		return false;
380
	if (!skb)
J
Jon Paul Maloy 已提交
381
		return false;
382
	bmsg = buf_msg(skb);
J
Jon Paul Maloy 已提交
383 384 385 386
	bsz = msg_size(bmsg);
	start = align(bsz);
	pad = start - bsz;

387
	if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
388 389 390
		return false;
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
		return false;
391
	if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
392
		return false;
393
	if (unlikely(skb_tailroom(skb) < (pad + msz)))
394 395 396
		return false;
	if (unlikely(max < (start + msz)))
		return false;
397 398 399
	if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
	    (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
		return false;
400

401 402
	skb_put(skb, pad + msz);
	skb_copy_to_linear_data_offset(skb, start, msg, msz);
403 404 405 406 407
	msg_set_size(bmsg, start + msz);
	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
	return true;
}

408 409
/**
 *  tipc_msg_extract(): extract bundled inner packet from buffer
410
 *  @skb: buffer to be extracted from.
411
 *  @iskb: extracted inner buffer, to be returned
412 413
 *  @pos: position in outer message of msg to be extracted.
 *        Returns position of next msg
414 415 416 417 418
 *  Consumes outer buffer when last packet extracted
 *  Returns true when when there is an extracted buffer, otherwise false
 */
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
419 420
	struct tipc_msg *hdr, *ihdr;
	int imsz;
421

422
	*iskb = NULL;
423
	if (unlikely(skb_linearize(skb)))
424 425
		goto none;

426 427
	hdr = buf_msg(skb);
	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
428 429
		goto none;

430 431 432 433
	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
	imsz = msg_size(ihdr);

	if ((*pos + imsz) > msg_data_sz(hdr))
434
		goto none;
435 436 437 438 439 440

	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
	if (!*iskb)
		goto none;

	skb_copy_to_linear_data(*iskb, ihdr, imsz);
441
	if (unlikely(!tipc_msg_validate(iskb)))
442
		goto none;
443

444 445 446 447
	*pos += align(imsz);
	return true;
none:
	kfree_skb(skb);
448
	kfree_skb(*iskb);
449 450 451 452
	*iskb = NULL;
	return false;
}

453 454
/**
 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
455 456 457
 * @list: the buffer chain, where head is the buffer to replace/append
 * @skb: buffer to be created, appended to and returned in case of success
 * @msg: message to be appended
458
 * @mtu: max allowable size for the bundle buffer, inclusive header
459
 * @dnode: destination node for message. (Not always present in header)
S
stephen hemminger 已提交
460
 * Returns true if success, otherwise false
461
 */
462 463
bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
			  u32 mtu, u32 dnode)
464
{
465
	struct sk_buff *_skb;
466 467 468 469 470 471
	struct tipc_msg *bmsg;
	u32 msz = msg_size(msg);
	u32 max = mtu - INT_H_SIZE;

	if (msg_user(msg) == MSG_FRAGMENTER)
		return false;
472
	if (msg_user(msg) == TUNNEL_PROTOCOL)
473 474 475 476 477 478
		return false;
	if (msg_user(msg) == BCAST_PROTOCOL)
		return false;
	if (msz > (max / 2))
		return false;

479
	_skb = tipc_buf_acquire(max, GFP_ATOMIC);
480
	if (!_skb)
481 482
		return false;

483 484
	skb_trim(_skb, INT_H_SIZE);
	bmsg = buf_msg(_skb);
485 486
	tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
		      INT_H_SIZE, dnode);
487
	msg_set_importance(bmsg, msg_importance(msg));
488 489 490
	msg_set_seqno(bmsg, msg_seqno(msg));
	msg_set_ack(bmsg, msg_ack(msg));
	msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
491 492
	tipc_msg_bundle(_skb, msg, mtu);
	*skb = _skb;
J
Jon Paul Maloy 已提交
493
	return true;
494
}
495 496 497

/**
 * tipc_msg_reverse(): swap source and destination addresses and add error code
498 499 500 501
 * @own_node: originating node id for reversed message
 * @skb:  buffer containing message to be reversed; may be replaced.
 * @err:  error code to be set in message, if any
 * Consumes buffer at failure
502 503
 * Returns true if success, otherwise false
 */
504
bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
505
{
506
	struct sk_buff *_skb = *skb;
507
	struct tipc_msg *hdr;
508
	struct tipc_msg ohdr;
509
	int dlen;
510

511
	if (skb_linearize(_skb))
512
		goto exit;
513
	hdr = buf_msg(_skb);
514
	dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
515
	if (msg_dest_droppable(hdr))
516
		goto exit;
517
	if (msg_errcode(hdr))
518
		goto exit;
519 520 521 522 523 524

	/* Take a copy of original header before altering message */
	memcpy(&ohdr, hdr, msg_hdr_sz(hdr));

	/* Never return SHORT header; expand by replacing buffer if necessary */
	if (msg_short(hdr)) {
525
		*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
526 527 528 529 530 531 532 533
		if (!*skb)
			goto exit;
		memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
		kfree_skb(_skb);
		_skb = *skb;
		hdr = buf_msg(_skb);
		memcpy(hdr, &ohdr, BASIC_H_SIZE);
		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
534
	}
535 536 537

	/* Now reverse the concerned fields */
	msg_set_errcode(hdr, err);
538
	msg_set_non_seq(hdr, 0);
539 540 541 542 543 544 545 546
	msg_set_origport(hdr, msg_destport(&ohdr));
	msg_set_destport(hdr, msg_origport(&ohdr));
	msg_set_destnode(hdr, msg_prevnode(&ohdr));
	msg_set_prevnode(hdr, own_node);
	msg_set_orignode(hdr, own_node);
	msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
	skb_trim(_skb, msg_size(hdr));
	skb_orphan(_skb);
547 548
	return true;
exit:
549 550
	kfree_skb(_skb);
	*skb = NULL;
551 552
	return false;
}
553 554

/**
555 556
 * tipc_msg_lookup_dest(): try to find new destination for named message
 * @skb: the buffer containing the message.
557
 * @err: error code to be used by caller if lookup fails
558
 * Does not consume buffer
559
 * Returns true if a destination is found, false otherwise
560
 */
561
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
562
{
563
	struct tipc_msg *msg = buf_msg(skb);
564 565
	u32 dport, dnode;
	u32 onode = tipc_own_addr(net);
566

567 568 569 570
	if (!msg_isdata(msg))
		return false;
	if (!msg_named(msg))
		return false;
571 572
	if (msg_errcode(msg))
		return false;
573
	*err = TIPC_ERR_NO_NAME;
574 575
	if (skb_linearize(skb))
		return false;
576
	msg = buf_msg(skb);
577
	if (msg_reroute_cnt(msg))
578
		return false;
J
Jon Maloy 已提交
579
	dnode = tipc_scope2node(net, msg_lookup_scope(msg));
580
	dport = tipc_nametbl_translate(net, msg_nametype(msg),
581
				       msg_nameinst(msg), &dnode);
582
	if (!dport)
583
		return false;
584
	msg_incr_reroute_cnt(msg);
585 586 587
	if (dnode != onode)
		msg_set_prevnode(msg, onode);
	msg_set_destnode(msg, dnode);
588
	msg_set_destport(msg, dport);
589
	*err = TIPC_OK;
590 591 592 593

	if (!skb_cloned(skb))
		return true;

594
	return true;
595
}
596

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/* tipc_msg_assemble() - assemble chain of fragments into one message
 */
bool tipc_msg_assemble(struct sk_buff_head *list)
{
	struct sk_buff *skb, *tmp = NULL;

	if (skb_queue_len(list) == 1)
		return true;

	while ((skb = __skb_dequeue(list))) {
		skb->next = NULL;
		if (tipc_buf_append(&tmp, &skb)) {
			__skb_queue_tail(list, skb);
			return true;
		}
		if (!tmp)
			break;
	}
	__skb_queue_purge(list);
	__skb_queue_head_init(list);
	pr_warn("Failed do assemble buffer\n");
	return false;
}

621 622 623
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
 *                         reassemble the clones into one message
 */
624
bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
625
{
626
	struct sk_buff *skb, *_skb;
627
	struct sk_buff *frag = NULL;
628
	struct sk_buff *head = NULL;
629
	int hdr_len;
630 631

	/* Copy header if single buffer */
632 633
	if (skb_queue_len(list) == 1) {
		skb = skb_peek(list);
634 635 636 637 638 639
		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
		if (!_skb)
			return false;
		__skb_queue_tail(rcvq, _skb);
		return true;
640 641 642
	}

	/* Clone all fragments and reassemble */
643 644
	skb_queue_walk(list, skb) {
		frag = skb_clone(skb, GFP_ATOMIC);
645 646 647 648 649 650 651 652
		if (!frag)
			goto error;
		frag->next = NULL;
		if (tipc_buf_append(&head, &frag))
			break;
		if (!head)
			goto error;
	}
653 654
	__skb_queue_tail(rcvq, frag);
	return true;
655 656 657
error:
	pr_warn("Failed do clone local mcast rcv buffer\n");
	kfree_skb(head);
658
	return false;
659
}
660

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
			struct sk_buff_head *cpy)
{
	struct sk_buff *skb, *_skb;

	skb_queue_walk(msg, skb) {
		_skb = pskb_copy(skb, GFP_ATOMIC);
		if (!_skb) {
			__skb_queue_purge(cpy);
			return false;
		}
		msg_set_destnode(buf_msg(_skb), dst);
		__skb_queue_tail(cpy, _skb);
	}
	return true;
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
 * @list: list to be appended to
 * @seqno: sequence number of buffer to add
 * @skb: buffer to add
 */
void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
			     struct sk_buff *skb)
{
	struct sk_buff *_skb, *tmp;

	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
		__skb_queue_head(list, skb);
		return;
	}

	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
		__skb_queue_tail(list, skb);
		return;
	}

	skb_queue_walk_safe(list, _skb, tmp) {
		if (more(seqno, buf_seqno(_skb)))
			continue;
		if (seqno == buf_seqno(_skb))
			break;
		__skb_queue_before(list, _skb, skb);
		return;
	}
	kfree_skb(skb);
}
J
Jon Maloy 已提交
708 709 710 711 712 713 714

void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
		     struct sk_buff_head *xmitq)
{
	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
		__skb_queue_tail(xmitq, skb);
}