reassembly.c 15.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*	6LoWPAN fragment reassembly
 *
 *
 *	Authors:
 *	Alexander Aring		<aar@pengutronix.de>
 *
 *	Based on: net/ipv6/reassembly.c
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#define pr_fmt(fmt) "6LoWPAN: " fmt

#include <linux/net.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/export.h>

#include <net/ieee802154_netdev.h>
27
#include <net/6lowpan.h>
28 29 30
#include <net/ipv6.h>
#include <net/inet_frag.h>

31
#include "6lowpan_i.h"
32

33 34
static const char lowpan_frags_cache_name[] = "lowpan-frags";

35 36 37
static struct inet_frags lowpan_frags;

static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
38
			     struct sk_buff *prev, struct net_device *ldev);
39

40
static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
41 42
				     const struct ieee802154_addr *saddr,
				     const struct ieee802154_addr *daddr)
43 44
{
	net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
45 46 47 48
	return jhash_3words(ieee802154_addr_hash(saddr),
			    ieee802154_addr_hash(daddr),
			    (__force u32)(tag + (d_size << 16)),
			    lowpan_frags.rnd);
49 50
}

51
static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
52
{
53
	const struct lowpan_frag_queue *fq;
54 55 56 57 58

	fq = container_of(q, struct lowpan_frag_queue, q);
	return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
}

59
static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
60
{
61 62
	const struct lowpan_frag_queue *fq;
	const struct lowpan_create_arg *arg = a;
63 64 65

	fq = container_of(q, struct lowpan_frag_queue, q);
	return	fq->tag == arg->tag && fq->d_size == arg->d_size &&
66 67
		ieee802154_addr_equal(&fq->saddr, arg->src) &&
		ieee802154_addr_equal(&fq->daddr, arg->dst);
68 69
}

70
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
71
{
72
	const struct lowpan_create_arg *arg = a;
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
	struct lowpan_frag_queue *fq;

	fq = container_of(q, struct lowpan_frag_queue, q);

	fq->tag = arg->tag;
	fq->d_size = arg->d_size;
	fq->saddr = *arg->src;
	fq->daddr = *arg->dst;
}

static void lowpan_frag_expire(unsigned long data)
{
	struct frag_queue *fq;
	struct net *net;

	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
	net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);

91 92
	spin_lock(&fq->q.lock);

93
	if (fq->q.flags & INET_FRAG_COMPLETE)
94 95 96 97 98 99
		goto out;

	inet_frag_kill(&fq->q, &lowpan_frags);
out:
	spin_unlock(&fq->q.lock);
	inet_frag_put(&fq->q, &lowpan_frags);
100 101 102
}

static inline struct lowpan_frag_queue *
103
fq_find(struct net *net, const struct lowpan_802154_cb *cb,
104 105
	const struct ieee802154_addr *src,
	const struct ieee802154_addr *dst)
106 107 108 109
{
	struct inet_frag_queue *q;
	struct lowpan_create_arg arg;
	unsigned int hash;
110 111
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
112

113 114
	arg.tag = cb->d_tag;
	arg.d_size = cb->d_size;
115 116 117
	arg.src = src;
	arg.dst = dst;

118
	hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
119

120
	q = inet_frag_find(&ieee802154_lowpan->frags,
121 122 123 124 125 126 127 128 129
			   &lowpan_frags, &arg, hash);
	if (IS_ERR_OR_NULL(q)) {
		inet_frag_maybe_warn_overflow(q, pr_fmt());
		return NULL;
	}
	return container_of(q, struct lowpan_frag_queue, q);
}

static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
130
			     struct sk_buff *skb, u8 frag_type)
131 132
{
	struct sk_buff *prev, *next;
133
	struct net_device *ldev;
134 135
	int end, offset;

136
	if (fq->q.flags & INET_FRAG_COMPLETE)
137 138
		goto err;

139 140
	offset = lowpan_802154_cb(skb)->d_offset << 3;
	end = lowpan_802154_cb(skb)->d_size;
141 142 143 144 145 146 147

	/* Is this the final fragment? */
	if (offset + skb->len == end) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
148
		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
149
			goto err;
150
		fq->q.flags |= INET_FRAG_LAST_IN;
151 152 153 154
		fq->q.len = end;
	} else {
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
155
			if (fq->q.flags & INET_FRAG_LAST_IN)
156 157 158 159 160 161 162 163 164 165
				goto err;
			fq->q.len = end;
		}
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = fq->q.fragments_tail;
166 167 168
	if (!prev ||
	    lowpan_802154_cb(prev)->d_offset <
	    lowpan_802154_cb(skb)->d_offset) {
169 170 171 172 173
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = fq->q.fragments; next != NULL; next = next->next) {
174 175
		if (lowpan_802154_cb(next)->d_offset >=
		    lowpan_802154_cb(skb)->d_offset)
176 177 178 179 180 181 182 183 184 185 186 187 188 189
			break;	/* bingo! */
		prev = next;
	}

found:
	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

190 191
	ldev = skb->dev;
	if (ldev)
192 193 194
		skb->dev = NULL;

	fq->q.stamp = skb->tstamp;
195
	if (frag_type == LOWPAN_DISPATCH_FRAG1)
196
		fq->q.flags |= INET_FRAG_FIRST_IN;
197 198

	fq->q.meat += skb->len;
199
	add_frag_mem_limit(fq->q.net, skb->truesize);
200

201
	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
202 203 204 205 206
	    fq->q.meat == fq->q.len) {
		int res;
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
207
		res = lowpan_frag_reasm(fq, prev, ldev);
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
		skb->_skb_refdst = orefdst;
		return res;
	}

	return -1;
err:
	kfree_skb(skb);
	return -1;
}

/*	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
227
			     struct net_device *ldev)
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
{
	struct sk_buff *fp, *head = fq->q.fragments;
	int sum_truesize;

	inet_frag_kill(&fq->q, &lowpan_frags);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		consume_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments.
	 */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = head->data_len - plen;
		clone->data_len = clone->len;
		head->data_len -= clone->len;
		head->len -= clone->len;
279
		add_frag_mem_limit(fq->q.net, clone->truesize);
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	}

	WARN_ON(head == NULL);

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
302
	sub_frag_mem_limit(fq->q.net, sum_truesize);
303 304

	head->next = NULL;
305
	head->dev = ldev;
306 307 308 309 310 311 312 313 314 315 316
	head->tstamp = fq->q.stamp;

	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;

	return 1;
out_oom:
	net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
	return -1;
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
					  lowpan_rx_result res)
{
	switch (res) {
	case RX_QUEUED:
		return NET_RX_SUCCESS;
	case RX_CONTINUE:
		/* nobody cared about this packet */
		net_warn_ratelimited("%s: received unknown dispatch\n",
				     __func__);

		/* fall-through */
	default:
		/* all others failure */
		return NET_RX_DROP;
	}
}

static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
{
	int ret;

	if (!lowpan_is_iphc(*skb_network_header(skb)))
		return RX_CONTINUE;

	ret = lowpan_iphc_decompress(skb);
	if (ret < 0)
		return RX_DROP;

	return RX_QUEUED;
}

static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
{
	lowpan_rx_result res;

#define CALL_RXH(rxh)			\
	do {				\
		res = rxh(skb);	\
		if (res != RX_CONTINUE)	\
			goto rxh_next;	\
	} while (0)

	/* likely at first */
	CALL_RXH(lowpan_frag_rx_h_iphc);
	CALL_RXH(lowpan_rx_h_ipv6);

rxh_next:
	return lowpan_frag_rx_handlers_result(skb, res);
#undef CALL_RXH
}

#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK	0x07
#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT	8

static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
			 struct lowpan_802154_cb *cb)
374 375
{
	bool fail;
376
	u8 high = 0, low = 0;
377
	__be16 d_tag = 0;
378

379
	fail = lowpan_fetch_skb(skb, &high, 1);
380
	fail |= lowpan_fetch_skb(skb, &low, 1);
381 382 383 384 385
	/* remove the dispatch value and use first three bits as high value
	 * for the datagram size
	 */
	cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
		LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
386
	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
387
	cb->d_tag = ntohs(d_tag);
388 389

	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
390
		fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
391 392
	} else {
		skb_reset_network_header(skb);
393 394 395 396 397
		cb->d_offset = 0;
		/* check if datagram_size has ipv6hdr on FRAG1 */
		fail |= cb->d_size < sizeof(struct ipv6hdr);
		/* check if we can dereference the dispatch value */
		fail |= !skb->len;
398 399 400 401 402 403 404 405
	}

	if (unlikely(fail))
		return -EIO;

	return 0;
}

406
int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
407 408 409
{
	struct lowpan_frag_queue *fq;
	struct net *net = dev_net(skb->dev);
410 411
	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
	struct ieee802154_hdr hdr;
412 413
	int err;

414 415
	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
		goto err;
416

417
	err = lowpan_get_cb(skb, frag_type, cb);
418 419 420
	if (err < 0)
		goto err;

421 422 423 424 425 426 427
	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
		err = lowpan_invoke_frag_rx_handlers(skb);
		if (err == NET_RX_DROP)
			goto err;
	}

	if (cb->d_size > IPV6_MIN_MTU) {
428
		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
429
		goto err;
430
	}
431

432
	fq = fq_find(net, cb, &hdr.source, &hdr.dest);
433 434
	if (fq != NULL) {
		int ret;
435

436 437 438 439 440 441 442 443 444 445 446 447 448 449
		spin_lock(&fq->q.lock);
		ret = lowpan_frag_queue(fq, skb, frag_type);
		spin_unlock(&fq->q.lock);

		inet_frag_put(&fq->q, &lowpan_frags);
		return ret;
	}

err:
	kfree_skb(skb);
	return -1;
}

#ifdef CONFIG_SYSCTL
450 451
static int zero;

452 453 454 455 456 457
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
	{
		.procname	= "6lowpanfrag_high_thresh",
		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
458 459
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
460 461 462 463 464 465
	},
	{
		.procname	= "6lowpanfrag_low_thresh",
		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
466 467 468
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &zero,
		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
469 470 471 472 473 474 475 476 477 478 479
	},
	{
		.procname	= "6lowpanfrag_time",
		.data		= &init_net.ieee802154_lowpan.frags.timeout,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
	{ }
};

480 481
/* secret interval has been deprecated */
static int lowpan_frags_secret_interval_unused;
482 483 484
static struct ctl_table lowpan_frags_ctl_table[] = {
	{
		.procname	= "6lowpanfrag_secret_interval",
485
		.data		= &lowpan_frags_secret_interval_unused,
486 487 488 489 490 491 492 493 494 495 496
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
	{ }
};

static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
{
	struct ctl_table *table;
	struct ctl_table_header *hdr;
497 498
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
499 500 501 502 503 504 505 506

	table = lowpan_frags_ns_ctl_table;
	if (!net_eq(net, &init_net)) {
		table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
				GFP_KERNEL);
		if (table == NULL)
			goto err_alloc;

507
		table[0].data = &ieee802154_lowpan->frags.high_thresh;
508 509
		table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
		table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
510
		table[1].data = &ieee802154_lowpan->frags.low_thresh;
511
		table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
512
		table[2].data = &ieee802154_lowpan->frags.timeout;
513 514 515 516 517 518 519 520 521 522

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			table[0].procname = NULL;
	}

	hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
	if (hdr == NULL)
		goto err_reg;

523
	ieee802154_lowpan->sysctl.frags_hdr = hdr;
524 525 526 527 528 529 530 531 532 533 534 535
	return 0;

err_reg:
	if (!net_eq(net, &init_net))
		kfree(table);
err_alloc:
	return -ENOMEM;
}

static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
{
	struct ctl_table *table;
536 537
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
538

539 540
	table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
	unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
541 542 543 544 545 546
	if (!net_eq(net, &init_net))
		kfree(table);
}

static struct ctl_table_header *lowpan_ctl_header;

547
static int __init lowpan_frags_sysctl_register(void)
548 549 550 551 552 553 554 555 556 557 558 559
{
	lowpan_ctl_header = register_net_sysctl(&init_net,
						"net/ieee802154/6lowpan",
						lowpan_frags_ctl_table);
	return lowpan_ctl_header == NULL ? -ENOMEM : 0;
}

static void lowpan_frags_sysctl_unregister(void)
{
	unregister_net_sysctl_table(lowpan_ctl_header);
}
#else
560
static inline int lowpan_frags_ns_sysctl_register(struct net *net)
561 562 563 564 565 566 567 568
{
	return 0;
}

static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
{
}

569
static inline int __init lowpan_frags_sysctl_register(void)
570 571 572 573 574 575 576 577 578 579 580
{
	return 0;
}

static inline void lowpan_frags_sysctl_unregister(void)
{
}
#endif

static int __net_init lowpan_frags_init_net(struct net *net)
{
581 582
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
583

584 585 586 587
	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;

588 589 590
	inet_frags_init_net(&ieee802154_lowpan->frags);

	return lowpan_frags_ns_sysctl_register(net);
591 592 593 594
}

static void __net_exit lowpan_frags_exit_net(struct net *net)
{
595 596 597
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);

598
	lowpan_frags_ns_sysctl_unregister(net);
599
	inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
600 601 602 603 604 605 606 607 608 609 610 611 612
}

static struct pernet_operations lowpan_frags_ops = {
	.init = lowpan_frags_init_net,
	.exit = lowpan_frags_exit_net,
};

int __init lowpan_net_frag_init(void)
{
	int ret;

	ret = lowpan_frags_sysctl_register();
	if (ret)
613
		return ret;
614 615 616 617 618 619 620 621 622 623 624

	ret = register_pernet_subsys(&lowpan_frags_ops);
	if (ret)
		goto err_pernet;

	lowpan_frags.hashfn = lowpan_hashfn;
	lowpan_frags.constructor = lowpan_frag_init;
	lowpan_frags.destructor = NULL;
	lowpan_frags.qsize = sizeof(struct frag_queue);
	lowpan_frags.match = lowpan_frag_match;
	lowpan_frags.frag_expire = lowpan_frag_expire;
625 626 627 628
	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
	ret = inet_frags_init(&lowpan_frags);
	if (ret)
		goto err_pernet;
629 630

	return ret;
631 632 633 634 635 636 637 638 639 640 641
err_pernet:
	lowpan_frags_sysctl_unregister();
	return ret;
}

void lowpan_net_frag_exit(void)
{
	inet_frags_fini(&lowpan_frags);
	lowpan_frags_sysctl_unregister();
	unregister_pernet_subsys(&lowpan_frags_ops);
}