reassembly.c 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*	6LoWPAN fragment reassembly
 *
 *
 *	Authors:
 *	Alexander Aring		<aar@pengutronix.de>
 *
 *	Based on: net/ipv6/reassembly.c
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#define pr_fmt(fmt) "6LoWPAN: " fmt

#include <linux/net.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/export.h>

#include <net/ieee802154_netdev.h>
27
#include <net/6lowpan.h>
28 29 30
#include <net/ipv6.h>
#include <net/inet_frag.h>

31
#include "6lowpan_i.h"
32

33 34
static const char lowpan_frags_cache_name[] = "lowpan-frags";

35 36 37
static struct inet_frags lowpan_frags;

static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
38
			     struct sk_buff *prev, struct net_device *ldev);
39

40
static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
41 42
				     const struct ieee802154_addr *saddr,
				     const struct ieee802154_addr *daddr)
43 44
{
	net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
45 46 47 48
	return jhash_3words(ieee802154_addr_hash(saddr),
			    ieee802154_addr_hash(daddr),
			    (__force u32)(tag + (d_size << 16)),
			    lowpan_frags.rnd);
49 50
}

51
static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
52
{
53
	const struct lowpan_frag_queue *fq;
54 55 56 57 58

	fq = container_of(q, struct lowpan_frag_queue, q);
	return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
}

59
static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
60
{
61 62
	const struct lowpan_frag_queue *fq;
	const struct lowpan_create_arg *arg = a;
63 64 65

	fq = container_of(q, struct lowpan_frag_queue, q);
	return	fq->tag == arg->tag && fq->d_size == arg->d_size &&
66 67
		ieee802154_addr_equal(&fq->saddr, arg->src) &&
		ieee802154_addr_equal(&fq->daddr, arg->dst);
68 69
}

70
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
71
{
72
	const struct lowpan_create_arg *arg = a;
73 74 75 76 77 78 79 80 81 82
	struct lowpan_frag_queue *fq;

	fq = container_of(q, struct lowpan_frag_queue, q);

	fq->tag = arg->tag;
	fq->d_size = arg->d_size;
	fq->saddr = *arg->src;
	fq->daddr = *arg->dst;
}

83
static void lowpan_frag_expire(struct timer_list *t)
84
{
85
	struct inet_frag_queue *frag = from_timer(frag, t, timer);
86 87 88
	struct frag_queue *fq;
	struct net *net;

89
	fq = container_of(frag, struct frag_queue, q);
90 91
	net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);

92 93
	spin_lock(&fq->q.lock);

94
	if (fq->q.flags & INET_FRAG_COMPLETE)
95 96 97 98 99 100
		goto out;

	inet_frag_kill(&fq->q, &lowpan_frags);
out:
	spin_unlock(&fq->q.lock);
	inet_frag_put(&fq->q, &lowpan_frags);
101 102 103
}

static inline struct lowpan_frag_queue *
104
fq_find(struct net *net, const struct lowpan_802154_cb *cb,
105 106
	const struct ieee802154_addr *src,
	const struct ieee802154_addr *dst)
107 108 109 110
{
	struct inet_frag_queue *q;
	struct lowpan_create_arg arg;
	unsigned int hash;
111 112
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
113

114 115
	arg.tag = cb->d_tag;
	arg.d_size = cb->d_size;
116 117 118
	arg.src = src;
	arg.dst = dst;

119
	hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
120

121
	q = inet_frag_find(&ieee802154_lowpan->frags,
122 123 124 125 126 127 128 129 130
			   &lowpan_frags, &arg, hash);
	if (IS_ERR_OR_NULL(q)) {
		inet_frag_maybe_warn_overflow(q, pr_fmt());
		return NULL;
	}
	return container_of(q, struct lowpan_frag_queue, q);
}

static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
131
			     struct sk_buff *skb, u8 frag_type)
132 133
{
	struct sk_buff *prev, *next;
134
	struct net_device *ldev;
135 136
	int end, offset;

137
	if (fq->q.flags & INET_FRAG_COMPLETE)
138 139
		goto err;

140 141
	offset = lowpan_802154_cb(skb)->d_offset << 3;
	end = lowpan_802154_cb(skb)->d_size;
142 143 144 145 146 147 148

	/* Is this the final fragment? */
	if (offset + skb->len == end) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
		if (end < fq->q.len ||
149
		    ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
150
			goto err;
151
		fq->q.flags |= INET_FRAG_LAST_IN;
152 153 154 155
		fq->q.len = end;
	} else {
		if (end > fq->q.len) {
			/* Some bits beyond end -> corruption. */
156
			if (fq->q.flags & INET_FRAG_LAST_IN)
157 158 159 160 161 162 163 164 165 166
				goto err;
			fq->q.len = end;
		}
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = fq->q.fragments_tail;
167 168 169
	if (!prev ||
	    lowpan_802154_cb(prev)->d_offset <
	    lowpan_802154_cb(skb)->d_offset) {
170 171 172 173 174
		next = NULL;
		goto found;
	}
	prev = NULL;
	for (next = fq->q.fragments; next != NULL; next = next->next) {
175 176
		if (lowpan_802154_cb(next)->d_offset >=
		    lowpan_802154_cb(skb)->d_offset)
177 178 179 180 181 182 183 184 185 186 187 188 189 190
			break;	/* bingo! */
		prev = next;
	}

found:
	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (!next)
		fq->q.fragments_tail = skb;
	if (prev)
		prev->next = skb;
	else
		fq->q.fragments = skb;

191 192
	ldev = skb->dev;
	if (ldev)
193 194 195
		skb->dev = NULL;

	fq->q.stamp = skb->tstamp;
196
	if (frag_type == LOWPAN_DISPATCH_FRAG1)
197
		fq->q.flags |= INET_FRAG_FIRST_IN;
198 199

	fq->q.meat += skb->len;
200
	add_frag_mem_limit(fq->q.net, skb->truesize);
201

202
	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
203 204 205 206 207
	    fq->q.meat == fq->q.len) {
		int res;
		unsigned long orefdst = skb->_skb_refdst;

		skb->_skb_refdst = 0UL;
208
		res = lowpan_frag_reasm(fq, prev, ldev);
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
		skb->_skb_refdst = orefdst;
		return res;
	}

	return -1;
err:
	kfree_skb(skb);
	return -1;
}

/*	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
228
			     struct net_device *ldev)
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
{
	struct sk_buff *fp, *head = fq->q.fragments;
	int sum_truesize;

	inet_frag_kill(&fq->q, &lowpan_frags);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		consume_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments.
	 */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = head->data_len - plen;
		clone->data_len = clone->len;
		head->data_len -= clone->len;
		head->len -= clone->len;
280
		add_frag_mem_limit(fq->q.net, clone->truesize);
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	}

	WARN_ON(head == NULL);

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
303
	sub_frag_mem_limit(fq->q.net, sum_truesize);
304 305

	head->next = NULL;
306
	head->dev = ldev;
307 308 309 310 311 312 313 314 315 316 317
	head->tstamp = fq->q.stamp;

	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;

	return 1;
out_oom:
	net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
	return -1;
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
					  lowpan_rx_result res)
{
	switch (res) {
	case RX_QUEUED:
		return NET_RX_SUCCESS;
	case RX_CONTINUE:
		/* nobody cared about this packet */
		net_warn_ratelimited("%s: received unknown dispatch\n",
				     __func__);

		/* fall-through */
	default:
		/* all others failure */
		return NET_RX_DROP;
	}
}

static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
{
	int ret;

	if (!lowpan_is_iphc(*skb_network_header(skb)))
		return RX_CONTINUE;

	ret = lowpan_iphc_decompress(skb);
	if (ret < 0)
		return RX_DROP;

	return RX_QUEUED;
}

static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
{
	lowpan_rx_result res;

#define CALL_RXH(rxh)			\
	do {				\
		res = rxh(skb);	\
		if (res != RX_CONTINUE)	\
			goto rxh_next;	\
	} while (0)

	/* likely at first */
	CALL_RXH(lowpan_frag_rx_h_iphc);
	CALL_RXH(lowpan_rx_h_ipv6);

rxh_next:
	return lowpan_frag_rx_handlers_result(skb, res);
#undef CALL_RXH
}

#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK	0x07
#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT	8

static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
			 struct lowpan_802154_cb *cb)
375 376
{
	bool fail;
377
	u8 high = 0, low = 0;
378
	__be16 d_tag = 0;
379

380
	fail = lowpan_fetch_skb(skb, &high, 1);
381
	fail |= lowpan_fetch_skb(skb, &low, 1);
382 383 384 385 386
	/* remove the dispatch value and use first three bits as high value
	 * for the datagram size
	 */
	cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
		LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
387
	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
388
	cb->d_tag = ntohs(d_tag);
389 390

	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
391
		fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
392 393
	} else {
		skb_reset_network_header(skb);
394 395 396 397 398
		cb->d_offset = 0;
		/* check if datagram_size has ipv6hdr on FRAG1 */
		fail |= cb->d_size < sizeof(struct ipv6hdr);
		/* check if we can dereference the dispatch value */
		fail |= !skb->len;
399 400 401 402 403 404 405 406
	}

	if (unlikely(fail))
		return -EIO;

	return 0;
}

407
int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
408 409 410
{
	struct lowpan_frag_queue *fq;
	struct net *net = dev_net(skb->dev);
411 412
	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
	struct ieee802154_hdr hdr;
413 414
	int err;

415 416
	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
		goto err;
417

418
	err = lowpan_get_cb(skb, frag_type, cb);
419 420 421
	if (err < 0)
		goto err;

422 423 424 425 426 427 428
	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
		err = lowpan_invoke_frag_rx_handlers(skb);
		if (err == NET_RX_DROP)
			goto err;
	}

	if (cb->d_size > IPV6_MIN_MTU) {
429
		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
430
		goto err;
431
	}
432

433
	fq = fq_find(net, cb, &hdr.source, &hdr.dest);
434 435
	if (fq != NULL) {
		int ret;
436

437 438 439 440 441 442 443 444 445 446 447 448 449 450
		spin_lock(&fq->q.lock);
		ret = lowpan_frag_queue(fq, skb, frag_type);
		spin_unlock(&fq->q.lock);

		inet_frag_put(&fq->q, &lowpan_frags);
		return ret;
	}

err:
	kfree_skb(skb);
	return -1;
}

#ifdef CONFIG_SYSCTL
451 452
static int zero;

453 454 455 456 457 458
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
	{
		.procname	= "6lowpanfrag_high_thresh",
		.data		= &init_net.ieee802154_lowpan.frags.high_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
459 460
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &init_net.ieee802154_lowpan.frags.low_thresh
461 462 463 464 465 466
	},
	{
		.procname	= "6lowpanfrag_low_thresh",
		.data		= &init_net.ieee802154_lowpan.frags.low_thresh,
		.maxlen		= sizeof(int),
		.mode		= 0644,
467 468 469
		.proc_handler	= proc_dointvec_minmax,
		.extra1		= &zero,
		.extra2		= &init_net.ieee802154_lowpan.frags.high_thresh
470 471 472 473 474 475 476 477 478 479 480
	},
	{
		.procname	= "6lowpanfrag_time",
		.data		= &init_net.ieee802154_lowpan.frags.timeout,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
	{ }
};

481 482
/* secret interval has been deprecated */
static int lowpan_frags_secret_interval_unused;
483 484 485
static struct ctl_table lowpan_frags_ctl_table[] = {
	{
		.procname	= "6lowpanfrag_secret_interval",
486
		.data		= &lowpan_frags_secret_interval_unused,
487 488 489 490 491 492 493 494 495 496 497
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
	{ }
};

static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
{
	struct ctl_table *table;
	struct ctl_table_header *hdr;
498 499
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
500 501 502 503 504 505 506 507

	table = lowpan_frags_ns_ctl_table;
	if (!net_eq(net, &init_net)) {
		table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
				GFP_KERNEL);
		if (table == NULL)
			goto err_alloc;

508
		table[0].data = &ieee802154_lowpan->frags.high_thresh;
509 510
		table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
		table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
511
		table[1].data = &ieee802154_lowpan->frags.low_thresh;
512
		table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
513
		table[2].data = &ieee802154_lowpan->frags.timeout;
514 515 516 517 518 519 520 521 522 523

		/* Don't export sysctls to unprivileged users */
		if (net->user_ns != &init_user_ns)
			table[0].procname = NULL;
	}

	hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
	if (hdr == NULL)
		goto err_reg;

524
	ieee802154_lowpan->sysctl.frags_hdr = hdr;
525 526 527 528 529 530 531 532 533 534 535 536
	return 0;

err_reg:
	if (!net_eq(net, &init_net))
		kfree(table);
err_alloc:
	return -ENOMEM;
}

static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
{
	struct ctl_table *table;
537 538
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
539

540 541
	table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
	unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
542 543 544 545 546 547
	if (!net_eq(net, &init_net))
		kfree(table);
}

static struct ctl_table_header *lowpan_ctl_header;

548
static int __init lowpan_frags_sysctl_register(void)
549 550 551 552 553 554 555 556 557 558 559 560
{
	lowpan_ctl_header = register_net_sysctl(&init_net,
						"net/ieee802154/6lowpan",
						lowpan_frags_ctl_table);
	return lowpan_ctl_header == NULL ? -ENOMEM : 0;
}

static void lowpan_frags_sysctl_unregister(void)
{
	unregister_net_sysctl_table(lowpan_ctl_header);
}
#else
561
static inline int lowpan_frags_ns_sysctl_register(struct net *net)
562 563 564 565 566 567 568 569
{
	return 0;
}

static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
{
}

570
static inline int __init lowpan_frags_sysctl_register(void)
571 572 573 574 575 576 577 578 579 580 581
{
	return 0;
}

static inline void lowpan_frags_sysctl_unregister(void)
{
}
#endif

static int __net_init lowpan_frags_init_net(struct net *net)
{
582 583
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);
584
	int res;
585

586 587 588 589
	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;

590 591 592 593 594 595 596
	res = inet_frags_init_net(&ieee802154_lowpan->frags);
	if (res < 0)
		return res;
	res = lowpan_frags_ns_sysctl_register(net);
	if (res < 0)
		inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
	return res;
597 598 599 600
}

static void __net_exit lowpan_frags_exit_net(struct net *net)
{
601 602 603
	struct netns_ieee802154_lowpan *ieee802154_lowpan =
		net_ieee802154_lowpan(net);

604
	lowpan_frags_ns_sysctl_unregister(net);
605
	inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
606 607 608 609 610 611 612 613 614 615 616 617 618
}

static struct pernet_operations lowpan_frags_ops = {
	.init = lowpan_frags_init_net,
	.exit = lowpan_frags_exit_net,
};

int __init lowpan_net_frag_init(void)
{
	int ret;

	ret = lowpan_frags_sysctl_register();
	if (ret)
619
		return ret;
620 621 622 623 624 625 626 627 628 629 630

	ret = register_pernet_subsys(&lowpan_frags_ops);
	if (ret)
		goto err_pernet;

	lowpan_frags.hashfn = lowpan_hashfn;
	lowpan_frags.constructor = lowpan_frag_init;
	lowpan_frags.destructor = NULL;
	lowpan_frags.qsize = sizeof(struct frag_queue);
	lowpan_frags.match = lowpan_frag_match;
	lowpan_frags.frag_expire = lowpan_frag_expire;
631 632 633 634
	lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
	ret = inet_frags_init(&lowpan_frags);
	if (ret)
		goto err_pernet;
635 636

	return ret;
637 638 639 640 641 642 643 644 645 646 647
err_pernet:
	lowpan_frags_sysctl_unregister();
	return ret;
}

void lowpan_net_frag_exit(void)
{
	inet_frags_fini(&lowpan_frags);
	lowpan_frags_sysctl_unregister();
	unregister_pernet_subsys(&lowpan_frags_ops);
}