reassembly.c 14.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*	6LoWPAN fragment reassembly
 *
 *
 *	Authors:
 *	Alexander Aring		<aar@pengutronix.de>
 *
 *	Based on: net/ipv6/reassembly.c
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#define pr_fmt(fmt) "6LoWPAN: " fmt

#include <linux/net.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/export.h>

#include <net/ieee802154_netdev.h>
27
#include <net/6lowpan.h>
28 29 30
#include <net/ipv6.h>
#include <net/inet_frag.h>

31
#include "6lowpan_i.h"
32

33 34
static const char lowpan_frags_cache_name[] = "lowpan-frags";

35
struct lowpan_frag_info {
36
	u16 d_tag;
37 38 39 40
	u16 d_size;
	u8 d_offset;
};

41
static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
42 43 44 45
{
	return (struct lowpan_frag_info *)skb->cb;
}

46 47 48
static struct inet_frags lowpan_frags;

static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
49
			     struct sk_buff *prev, struct net_device *ldev);
50

51
static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
52 53
				     const struct ieee802154_addr *saddr,
				     const struct ieee802154_addr *daddr)
54 55
{
	net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
56 57 58 59
	return jhash_3words(ieee802154_addr_hash(saddr),
			    ieee802154_addr_hash(daddr),
			    (__force u32)(tag + (d_size << 16)),
			    lowpan_frags.rnd);
60 61
}

62
static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
63
{
64
	const struct lowpan_frag_queue *fq;
65 66 67 68 69

	fq = container_of(q, struct lowpan_frag_queue, q);
	return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
}

70
static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
71
{
72 73
	const struct lowpan_frag_queue *fq;
	const struct lowpan_create_arg *arg = a;
74 75 76

	fq = container_of(q, struct lowpan_frag_queue, q);
	return	fq->tag == arg->tag && fq->d_size == arg->d_size &&
77 78
		ieee802154_addr_equal(&fq->saddr, arg->src) &&
		ieee802154_addr_equal(&fq->daddr, arg->dst);
79 80
}

81
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
82
{
83
	const struct lowpan_create_arg *arg = a;
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	struct lowpan_frag_queue *fq;

	fq = container_of(q, struct lowpan_frag_queue, q);

	fq->tag = arg->tag;
	fq->d_size = arg->d_size;
	fq->saddr = *arg->src;
	fq->daddr = *arg->dst;
}

static void lowpan_frag_expire(unsigned long data)
{
	struct frag_queue *fq;
	struct net *net;

	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
	net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);

102 103
	spin_lock(&fq->q.lock);

104
	if (fq->q.flags & INET_FRAG_COMPLETE)
105 106 107 108 109 110
		goto out;

	inet_frag_kill(&fq->q, &lowpan_frags);
out:
	spin_unlock(&fq->q.lock);
	inet_frag_put(&fq->q, &lowpan_frags);
111 112 113
}

static inline struct lowpan_frag_queue *
114
fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
115 116
	const struct ieee802154_addr *src,
	const struct ieee802154_addr *dst)
117 118 119 120
{
	struct inet_frag_queue *q;
	struct lowpan_create_arg arg;
	unsigned int hash;
121 122
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
123 124 125 126 127 128 129 130

	arg.tag = frag_info->d_tag;
	arg.d_size = frag_info->d_size;
	arg.src = src;
	arg.dst = dst;

	hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);

131
	q = inet_frag_find(&ieee802154_lowpan->frags,
132 133 134 135 136 137 138 139 140 141 142 143
			   &lowpan_frags, &arg, hash);
	if (IS_ERR_OR_NULL(q)) {
		inet_frag_maybe_warn_overflow(q, pr_fmt());
		return NULL;
	}
	return container_of(q, struct lowpan_frag_queue, q);
}

static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
			     struct sk_buff *skb, const u8 frag_type)
{
	struct sk_buff *prev, *next;
144
	struct net_device *ldev;
145 146
	int end, offset;

147
	if (fq->q.flags & INET_FRAG_COMPLETE)
148 149
		goto err;

150 151
	offset = lowpan_cb(skb)->d_offset << 3;
	end = lowpan_cb(skb)->d_size;
152 153 154 155 156 157 158

	/* Is this the final fragment? */
	if (offset + skb->len == end) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
159
		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
160
			goto err;
161
		fq->q.flags |= INET_FRAG_LAST_IN;
162 163 164 165
		fq->q.len = end;
	} else {
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
166
			if (fq->q.flags & INET_FRAG_LAST_IN)
167 168 169 170 171 172 173 174 175 176
				goto err;
			fq->q.len = end;
		}
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = fq->q.fragments_tail;
177
	if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
178 179 180 181 182
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = fq->q.fragments; next != NULL; next = next->next) {
183
		if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
184 185 186 187 188 189 190 191 192 193 194 195 196 197
			break;	/* bingo! */
		prev = next;
	}

found:
	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

198 199
	ldev = skb->dev;
	if (ldev)
200 201 202 203 204 205
		skb->dev = NULL;

	fq->q.stamp = skb->tstamp;
	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
		/* Calculate uncomp. 6lowpan header to estimate full size */
		fq->q.meat += lowpan_uncompress_size(skb, NULL);
206
		fq->q.flags |= INET_FRAG_FIRST_IN;
207 208 209
	} else {
		fq->q.meat += skb->len;
	}
210
	add_frag_mem_limit(fq->q.net, skb->truesize);
211

212
	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 214 215 216 217
	    fq->q.meat == fq->q.len) {
		int res;
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
218
		res = lowpan_frag_reasm(fq, prev, ldev);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
		skb->_skb_refdst = orefdst;
		return res;
	}

	return -1;
err:
	kfree_skb(skb);
	return -1;
}

/*	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
238
			     struct net_device *ldev)
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
{
	struct sk_buff *fp, *head = fq->q.fragments;
	int sum_truesize;

	inet_frag_kill(&fq->q, &lowpan_frags);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		consume_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments.
	 */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = head->data_len - plen;
		clone->data_len = clone->len;
		head->data_len -= clone->len;
		head->len -= clone->len;
290
		add_frag_mem_limit(fq->q.net, clone->truesize);
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
	}

	WARN_ON(head == NULL);

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
313
	sub_frag_mem_limit(fq->q.net, sum_truesize);
314 315

	head->next = NULL;
316
	head->dev = ldev;
317 318 319 320 321 322 323 324 325 326 327 328
	head->tstamp = fq->q.stamp;

	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;

	return 1;
out_oom:
	net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
	return -1;
}

static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
329
				struct lowpan_frag_info *frag_info)
330 331 332
{
	bool fail;
	u8 pattern = 0, low = 0;
333
	__be16 d_tag = 0;
334 335 336 337

	fail = lowpan_fetch_skb(skb, &pattern, 1);
	fail |= lowpan_fetch_skb(skb, &low, 1);
	frag_info->d_size = (pattern & 7) << 8 | low;
338 339
	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
	frag_info->d_tag = ntohs(d_tag);
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357

	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
		fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
	} else {
		skb_reset_network_header(skb);
		frag_info->d_offset = 0;
	}

	if (unlikely(fail))
		return -EIO;

	return 0;
}

int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
{
	struct lowpan_frag_queue *fq;
	struct net *net = dev_net(skb->dev);
358
	struct lowpan_frag_info *frag_info = lowpan_cb(skb);
359
	struct ieee802154_addr source, dest;
360 361
	int err;

362 363 364
	source = mac_cb(skb)->source;
	dest = mac_cb(skb)->dest;

365 366 367 368
	err = lowpan_get_frag_info(skb, frag_type, frag_info);
	if (err < 0)
		goto err;

369 370
	if (frag_info->d_size > IPV6_MIN_MTU) {
		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
371
		goto err;
372
	}
373

374
	fq = fq_find(net, frag_info, &source, &dest);
375 376
	if (fq != NULL) {
		int ret;
377

378 379 380 381 382 383 384 385 386 387 388 389 390 391
		spin_lock(&fq->q.lock);
		ret = lowpan_frag_queue(fq, skb, frag_type);
		spin_unlock(&fq->q.lock);

		inet_frag_put(&fq->q, &lowpan_frags);
		return ret;
	}

err:
	kfree_skb(skb);
	return -1;
}

#ifdef CONFIG_SYSCTL
392 393
static int zero;

394 395 396 397 398 399
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
	{
		.procname	= "6lowpanfrag_high_thresh",
		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
400 401
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
402 403 404 405 406 407
	},
	{
		.procname	= "6lowpanfrag_low_thresh",
		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
408 409 410
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &zero,
		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
411 412 413 414 415 416 417 418 419 420 421
	},
	{
		.procname	= "6lowpanfrag_time",
		.data		= &init_net.ieee802154_lowpan.frags.timeout,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
	{ }
};

422 423
/* secret interval has been deprecated */
static int lowpan_frags_secret_interval_unused;
424 425 426
static struct ctl_table lowpan_frags_ctl_table[] = {
	{
		.procname	= "6lowpanfrag_secret_interval",
427
		.data		= &lowpan_frags_secret_interval_unused,
428 429 430 431 432 433 434 435 436 437 438
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
	{ }
};

static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
{
	struct ctl_table *table;
	struct ctl_table_header *hdr;
439 440
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
441 442 443 444 445 446 447 448

	table = lowpan_frags_ns_ctl_table;
	if (!net_eq(net, &init_net)) {
		table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
				GFP_KERNEL);
		if (table == NULL)
			goto err_alloc;

449
		table[0].data = &ieee802154_lowpan->frags.high_thresh;
450 451
		table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
		table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
452
		table[1].data = &ieee802154_lowpan->frags.low_thresh;
453
		table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
454
		table[2].data = &ieee802154_lowpan->frags.timeout;
455 456 457 458 459 460 461 462 463 464

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			table[0].procname = NULL;
	}

	hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
	if (hdr == NULL)
		goto err_reg;

465
	ieee802154_lowpan->sysctl.frags_hdr = hdr;
466 467 468 469 470 471 472 473 474 475 476 477
	return 0;

err_reg:
	if (!net_eq(net, &init_net))
		kfree(table);
err_alloc:
	return -ENOMEM;
}

static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
{
	struct ctl_table *table;
478 479
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
480

481 482
	table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
	unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
483 484 485 486 487 488
	if (!net_eq(net, &init_net))
		kfree(table);
}

static struct ctl_table_header *lowpan_ctl_header;

489
static int __init lowpan_frags_sysctl_register(void)
490 491 492 493 494 495 496 497 498 499 500 501
{
	lowpan_ctl_header = register_net_sysctl(&init_net,
						"net/ieee802154/6lowpan",
						lowpan_frags_ctl_table);
	return lowpan_ctl_header == NULL ? -ENOMEM : 0;
}

static void lowpan_frags_sysctl_unregister(void)
{
	unregister_net_sysctl_table(lowpan_ctl_header);
}
#else
502
static inline int lowpan_frags_ns_sysctl_register(struct net *net)
503 504 505 506 507 508 509 510
{
	return 0;
}

static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
{
}

511
static inline int __init lowpan_frags_sysctl_register(void)
512 513 514 515 516 517 518 519 520 521 522
{
	return 0;
}

static inline void lowpan_frags_sysctl_unregister(void)
{
}
#endif

static int __net_init lowpan_frags_init_net(struct net *net)
{
523 524
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
525

526 527 528 529 530
	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;

	inet_frags_init_net(&ieee802154_lowpan->frags);
531 532 533 534 535 536

	return lowpan_frags_ns_sysctl_register(net);
}

static void __net_exit lowpan_frags_exit_net(struct net *net)
{
537 538 539
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);

540
	lowpan_frags_ns_sysctl_unregister(net);
541
	inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
542 543 544 545 546 547 548 549 550 551 552 553 554
}

static struct pernet_operations lowpan_frags_ops = {
	.init = lowpan_frags_init_net,
	.exit = lowpan_frags_exit_net,
};

int __init lowpan_net_frag_init(void)
{
	int ret;

	ret = lowpan_frags_sysctl_register();
	if (ret)
555
		return ret;
556 557 558 559 560 561 562 563 564 565 566 567

	ret = register_pernet_subsys(&lowpan_frags_ops);
	if (ret)
		goto err_pernet;

	lowpan_frags.hashfn = lowpan_hashfn;
	lowpan_frags.constructor = lowpan_frag_init;
	lowpan_frags.destructor = NULL;
	lowpan_frags.skb_free = NULL;
	lowpan_frags.qsize = sizeof(struct frag_queue);
	lowpan_frags.match = lowpan_frag_match;
	lowpan_frags.frag_expire = lowpan_frag_expire;
568 569 570 571
	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
	ret = inet_frags_init(&lowpan_frags);
	if (ret)
		goto err_pernet;
572 573

	return ret;
574 575 576 577 578 579 580 581 582 583 584
err_pernet:
	lowpan_frags_sysctl_unregister();
	return ret;
}

void lowpan_net_frag_exit(void)
{
	inet_frags_fini(&lowpan_frags);
	lowpan_frags_sysctl_unregister();
	unregister_pernet_subsys(&lowpan_frags_ops);
}