nf_conntrack_reasm.c 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * IPv6 fragment reassembly for connection tracking
 *
 * Copyright (C)2004 USAGI/WIDE Project
 *
 * Author:
 *	Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
 *
 * Based on: net/ipv6/reassembly.c
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/errno.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/jiffies.h>
#include <linux/net.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/random.h>
#include <linux/jhash.h>

#include <net/sock.h>
#include <net/snmp.h>
34
#include <net/inet_frag.h>
35 36 37 38 39 40 41

#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/rawv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
42
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include <linux/sysctl.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/kernel.h>
#include <linux/module.h>

#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
#define NF_CT_FRAG6_LOW_THRESH 196608  /* == 192*1024 */
#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT

struct nf_ct_frag6_skb_cb
{
	struct inet6_skb_parm	h;
	int			offset;
	struct sk_buff		*orig;
};

#define NFCT_FRAG6_CB(skb)	((struct nf_ct_frag6_skb_cb*)((skb)->cb))

struct nf_ct_frag6_queue
{
64
	struct inet_frag_queue	q;
65

66
	__be32			id;		/* fragment id		*/
67 68 69 70 71 72 73
	struct in6_addr		saddr;
	struct in6_addr		daddr;

	unsigned int		csum;
	__u16			nhoffset;
};

74
static struct inet_frags nf_frags;
75
static struct netns_frags nf_init_frags;
76

77 78 79 80
#ifdef CONFIG_SYSCTL
struct ctl_table nf_ct_ipv6_sysctl_table[] = {
	{
		.procname	= "nf_conntrack_frag6_timeout",
81
		.data		= &nf_init_frags.timeout,
82 83 84 85 86 87 88
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec_jiffies,
	},
	{
		.ctl_name	= NET_NF_CONNTRACK_FRAG6_LOW_THRESH,
		.procname	= "nf_conntrack_frag6_low_thresh",
89
		.data		= &nf_init_frags.low_thresh,
90 91 92 93 94 95 96
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec,
	},
	{
		.ctl_name	= NET_NF_CONNTRACK_FRAG6_HIGH_THRESH,
		.procname	= "nf_conntrack_frag6_high_thresh",
97
		.data		= &nf_init_frags.high_thresh,
98 99 100 101 102 103 104 105
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= &proc_dointvec,
	},
	{ .ctl_name = 0 }
};
#endif

106
static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
107 108 109 110
			       struct in6_addr *daddr)
{
	u32 a, b, c;

111 112 113
	a = (__force u32)saddr->s6_addr32[0];
	b = (__force u32)saddr->s6_addr32[1];
	c = (__force u32)saddr->s6_addr32[2];
114 115 116

	a += JHASH_GOLDEN_RATIO;
	b += JHASH_GOLDEN_RATIO;
117
	c += nf_frags.rnd;
118 119
	__jhash_mix(a, b, c);

120 121 122
	a += (__force u32)saddr->s6_addr32[3];
	b += (__force u32)daddr->s6_addr32[0];
	c += (__force u32)daddr->s6_addr32[1];
123 124
	__jhash_mix(a, b, c);

125 126 127
	a += (__force u32)daddr->s6_addr32[2];
	b += (__force u32)daddr->s6_addr32[3];
	c += (__force u32)id;
128 129
	__jhash_mix(a, b, c);

130
	return c & (INETFRAGS_HASHSZ - 1);
131 132
}

133
static unsigned int nf_hashfn(struct inet_frag_queue *q)
134
{
135
	struct nf_ct_frag6_queue *nq;
136

137 138
	nq = container_of(q, struct nf_ct_frag6_queue, q);
	return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr);
139 140
}

141 142 143 144 145 146
static void nf_skb_free(struct sk_buff *skb)
{
	if (NFCT_FRAG6_CB(skb)->orig)
		kfree_skb(NFCT_FRAG6_CB(skb)->orig);
}

147
/* Memory Tracking Functions. */
148
static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
149
{
150 151
	if (work)
		*work -= skb->truesize;
152
	atomic_sub(skb->truesize, &nf_init_frags.mem);
153
	nf_skb_free(skb);
154 155 156 157 158
	kfree_skb(skb);
}

/* Destruction primitives. */

159
static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
160
{
P
Pavel Emelyanov 已提交
161
	inet_frag_put(&fq->q, &nf_frags);
162 163 164 165 166 167 168
}

/* Kill fq entry. It is not destroyed immediately,
 * because caller (and someone more) holds reference count.
 */
static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
{
169
	inet_frag_kill(&fq->q, &nf_frags);
170 171 172 173
}

static void nf_ct_frag6_evictor(void)
{
174
	inet_frag_evictor(&nf_init_frags, &nf_frags);
175 176 177 178
}

static void nf_ct_frag6_expire(unsigned long data)
{
179 180 181 182
	struct nf_ct_frag6_queue *fq;

	fq = container_of((struct inet_frag_queue *)data,
			struct nf_ct_frag6_queue, q);
183

184
	spin_lock(&fq->q.lock);
185

186
	if (fq->q.last_in & INET_FRAG_COMPLETE)
187 188 189 190 191
		goto out;

	fq_kill(fq);

out:
192
	spin_unlock(&fq->q.lock);
193
	fq_put(fq);
194 195 196 197
}

/* Creation primitives. */

198 199
static __inline__ struct nf_ct_frag6_queue *
fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
200
{
201
	struct inet_frag_queue *q;
202
	struct ip6_create_arg arg;
203
	unsigned int hash;
204

205 206 207
	arg.id = id;
	arg.src = src;
	arg.dst = dst;
208
	hash = ip6qhashfn(id, src, dst);
209

210
	q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
211
	if (q == NULL)
212 213
		goto oom;

214
	return container_of(q, struct nf_ct_frag6_queue, q);
215 216

oom:
217
	pr_debug("Can't alloc new queue\n");
218 219 220 221
	return NULL;
}


222
static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
223 224 225 226 227
			     struct frag_hdr *fhdr, int nhoff)
{
	struct sk_buff *prev, *next;
	int offset, end;

228
	if (fq->q.last_in & INET_FRAG_COMPLETE) {
229
		pr_debug("Allready completed\n");
230 231 232 233
		goto err;
	}

	offset = ntohs(fhdr->frag_off) & ~0x7;
234 235
	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
236 237

	if ((unsigned int)end > IPV6_MAXPLEN) {
238
		pr_debug("offset is too large.\n");
239
		return -1;
240 241
	}

242 243
	if (skb->ip_summed == CHECKSUM_COMPLETE) {
		const unsigned char *nh = skb_network_header(skb);
244
		skb->csum = csum_sub(skb->csum,
245
				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
246
						  0));
247
	}
248 249 250 251 252 253

	/* Is this the final fragment? */
	if (!(fhdr->frag_off & htons(IP6_MF))) {
		/* If we already have some bits beyond end
		 * or have different end, the segment is corrupted.
		 */
254
		if (end < fq->q.len ||
255
		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) {
256
			pr_debug("already received last fragment\n");
257 258
			goto err;
		}
259
		fq->q.last_in |= INET_FRAG_LAST_IN;
260
		fq->q.len = end;
261 262 263 264 265 266 267 268
	} else {
		/* Check if the fragment is rounded to 8 bytes.
		 * Required by the RFC.
		 */
		if (end & 0x7) {
			/* RFC2460 says always send parameter problem in
			 * this case. -DaveM
			 */
269
			pr_debug("end of fragment not rounded to 8 bytes.\n");
270 271
			return -1;
		}
272
		if (end > fq->q.len) {
273
			/* Some bits beyond end -> corruption. */
274
			if (fq->q.last_in & INET_FRAG_LAST_IN) {
275
				pr_debug("last packet already reached.\n");
276 277
				goto err;
			}
278
			fq->q.len = end;
279 280 281 282 283 284 285 286
		}
	}

	if (end == offset)
		goto err;

	/* Point into the IP datagram 'data' part. */
	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
287
		pr_debug("queue: message is too short.\n");
288 289
		goto err;
	}
H
Herbert Xu 已提交
290
	if (pskb_trim_rcsum(skb, end - offset)) {
291
		pr_debug("Can't trim\n");
H
Herbert Xu 已提交
292
		goto err;
293 294 295 296 297 298 299
	}

	/* Find out which fragments are in front and at the back of us
	 * in the chain of fragments so far.  We must know where to put
	 * this fragment, right?
	 */
	prev = NULL;
300
	for (next = fq->q.fragments; next != NULL; next = next->next) {
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
		if (NFCT_FRAG6_CB(next)->offset >= offset)
			break;	/* bingo! */
		prev = next;
	}

	/* We found where to put this one.  Check for overlap with
	 * preceding fragment, and, if needed, align things so that
	 * any overlaps are eliminated.
	 */
	if (prev) {
		int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;

		if (i > 0) {
			offset += i;
			if (end <= offset) {
316
				pr_debug("overlap\n");
317 318 319
				goto err;
			}
			if (!pskb_pull(skb, i)) {
320
				pr_debug("Can't pull\n");
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
				goto err;
			}
			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
				skb->ip_summed = CHECKSUM_NONE;
		}
	}

	/* Look for overlap with succeeding segments.
	 * If we can merge fragments, do it.
	 */
	while (next && NFCT_FRAG6_CB(next)->offset < end) {
		/* overlap is 'i' bytes */
		int i = end - NFCT_FRAG6_CB(next)->offset;

		if (i < next->len) {
			/* Eat head of the next overlapped fragment
			 * and leave the loop. The next ones cannot overlap.
			 */
339
			pr_debug("Eat head of the overlapped parts.: %d", i);
340 341 342 343 344
			if (!pskb_pull(next, i))
				goto err;

			/* next fragment */
			NFCT_FRAG6_CB(next)->offset += i;
345
			fq->q.meat -= i;
346 347 348 349 350 351 352 353 354 355 356 357 358 359
			if (next->ip_summed != CHECKSUM_UNNECESSARY)
				next->ip_summed = CHECKSUM_NONE;
			break;
		} else {
			struct sk_buff *free_it = next;

			/* Old fragmnet is completely overridden with
			 * new one drop it.
			 */
			next = next->next;

			if (prev)
				prev->next = next;
			else
360
				fq->q.fragments = next;
361

362
			fq->q.meat -= free_it->len;
363
			frag_kfree_skb(free_it, NULL);
364 365 366 367 368 369 370 371 372 373
		}
	}

	NFCT_FRAG6_CB(skb)->offset = offset;

	/* Insert this fragment in the chain of fragments. */
	skb->next = next;
	if (prev)
		prev->next = skb;
	else
374
		fq->q.fragments = skb;
375 376

	skb->dev = NULL;
377 378
	fq->q.stamp = skb->tstamp;
	fq->q.meat += skb->len;
379
	atomic_add(skb->truesize, &nf_init_frags.mem);
380 381 382 383 384 385

	/* The first fragment.
	 * nhoffset is obtained from the first fragment, of course.
	 */
	if (offset == 0) {
		fq->nhoffset = nhoff;
386
		fq->q.last_in |= INET_FRAG_FIRST_IN;
387
	}
388
	write_lock(&nf_frags.lock);
389
	list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list);
390
	write_unlock(&nf_frags.lock);
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	return 0;

err:
	return -1;
}

/*
 *	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static struct sk_buff *
nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
{
409
	struct sk_buff *fp, *op, *head = fq->q.fragments;
410 411 412 413 414 415 416 417
	int    payload_len;

	fq_kill(fq);

	BUG_TRAP(head != NULL);
	BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);

	/* Unfragmented part is taken from the first segment. */
418
	payload_len = ((head->data - skb_network_header(head)) -
419
		       sizeof(struct ipv6hdr) + fq->q.len -
420
		       sizeof(struct frag_hdr));
421
	if (payload_len > IPV6_MAXPLEN) {
422
		pr_debug("payload len is too large.\n");
423 424 425 426 427
		goto out_oversize;
	}

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
428
		pr_debug("skb is cloned but can't expand head");
429 430 431 432 433 434 435 436 437 438 439
		goto out_oom;
	}

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_shinfo(head)->frag_list) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
440
			pr_debug("Can't alloc skb\n");
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
			goto out_oom;
		}
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_shinfo(head)->frag_list = NULL;
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;

		NFCT_FRAG6_CB(clone)->orig = NULL;
456
		atomic_add(clone->truesize, &nf_init_frags.mem);
457 458 459 460
	}

	/* We have to remove fragment header from datagram and to relocate
	 * header in order to calculate ICV correctly. */
461
	skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
462
	memmove(head->head + sizeof(struct frag_hdr), head->head,
463
		(head->data - head->head) - sizeof(struct frag_hdr));
464 465
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);
466 467

	skb_shinfo(head)->frag_list = head->next;
468
	skb_reset_transport_header(head);
469
	skb_push(head, head->data - skb_network_header(head));
470
	atomic_sub(head->truesize, &nf_init_frags.mem);
471 472 473 474 475 476

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
477
		else if (head->ip_summed == CHECKSUM_COMPLETE)
478 479
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
480
		atomic_sub(fp->truesize, &nf_init_frags.mem);
481 482 483 484
	}

	head->next = NULL;
	head->dev = dev;
485
	head->tstamp = fq->q.stamp;
486
	ipv6_hdr(head)->payload_len = htons(payload_len);
487 488

	/* Yes, and fold redundant checksum back. 8) */
489
	if (head->ip_summed == CHECKSUM_COMPLETE)
490
		head->csum = csum_partial(skb_network_header(head),
491
					  skb_network_header_len(head),
492
					  head->csum);
493

494
	fq->q.fragments = NULL;
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539

	/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
	fp = skb_shinfo(head)->frag_list;
	if (NFCT_FRAG6_CB(fp)->orig == NULL)
		/* at above code, head skb is divided into two skbs. */
		fp = fp->next;

	op = NFCT_FRAG6_CB(head)->orig;
	for (; fp; fp = fp->next) {
		struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;

		op->next = orig;
		op = orig;
		NFCT_FRAG6_CB(fp)->orig = NULL;
	}

	return head;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
out_fail:
	return NULL;
}

/*
 * find the header just before Fragment Header.
 *
 * if success return 0 and set ...
 * (*prevhdrp): the value of "Next Header Field" in the header
 *		just before Fragment Header.
 * (*prevhoff): the offset of "Next Header Field" in the header
 *		just before Fragment Header.
 * (*fhoff)   : the offset of Fragment Header.
 *
 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
 *
 */
static int
find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
{
540
	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
541 542 543
	const int netoff = skb_network_offset(skb);
	u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
	int start = netoff + sizeof(struct ipv6hdr);
544 545 546
	int len = skb->len - start;
	u8 prevhdr = NEXTHDR_IPV6;

547 548 549
	while (nexthdr != NEXTHDR_FRAGMENT) {
		struct ipv6_opt_hdr hdr;
		int hdrlen;
550 551 552 553

		if (!ipv6_ext_hdr(nexthdr)) {
			return -1;
		}
554
		if (len < (int)sizeof(struct ipv6_opt_hdr)) {
555
			pr_debug("too short\n");
556 557
			return -1;
		}
558
		if (nexthdr == NEXTHDR_NONE) {
559
			pr_debug("next header is none\n");
560 561
			return -1;
		}
562 563 564 565 566 567
		if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
			BUG();
		if (nexthdr == NEXTHDR_AUTH)
			hdrlen = (hdr.hdrlen+2)<<2;
		else
			hdrlen = ipv6_optlen(&hdr);
568 569 570 571

		prevhdr = nexthdr;
		prev_nhoff = start;

572 573 574 575
		nexthdr = hdr.nexthdr;
		len -= hdrlen;
		start += hdrlen;
	}
576 577 578 579 580 581 582 583 584 585 586 587 588

	if (len < 0)
		return -1;

	*prevhdrp = prevhdr;
	*prevhoff = prev_nhoff;
	*fhoff = start;

	return 0;
}

struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
{
589
	struct sk_buff *clone;
590 591 592 593 594 595 596 597 598
	struct net_device *dev = skb->dev;
	struct frag_hdr *fhdr;
	struct nf_ct_frag6_queue *fq;
	struct ipv6hdr *hdr;
	int fhoff, nhoff;
	u8 prevhdr;
	struct sk_buff *ret_skb = NULL;

	/* Jumbo payload inhibits frag. header */
599
	if (ipv6_hdr(skb)->payload_len == 0) {
600
		pr_debug("payload len = 0\n");
601 602 603 604 605 606 607 608
		return skb;
	}

	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
		return skb;

	clone = skb_clone(skb, GFP_ATOMIC);
	if (clone == NULL) {
609
		pr_debug("Can't clone skb\n");
610 611 612 613 614 615
		return skb;
	}

	NFCT_FRAG6_CB(clone)->orig = skb;

	if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) {
616
		pr_debug("message is too short.\n");
617 618 619
		goto ret_orig;
	}

620
	skb_set_transport_header(clone, fhoff);
621
	hdr = ipv6_hdr(clone);
622
	fhdr = (struct frag_hdr *)skb_transport_header(clone);
623 624

	if (!(fhdr->frag_off & htons(0xFFF9))) {
625
		pr_debug("Invalid fragment offset\n");
626 627 628 629
		/* It is not a fragmented frame */
		goto ret_orig;
	}

630
	if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
631 632 633 634
		nf_ct_frag6_evictor();

	fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
	if (fq == NULL) {
635
		pr_debug("Can't find and can't create new queue\n");
636 637 638
		goto ret_orig;
	}

639
	spin_lock(&fq->q.lock);
640 641

	if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
642
		spin_unlock(&fq->q.lock);
643
		pr_debug("Can't insert skb to queue\n");
644
		fq_put(fq);
645 646 647
		goto ret_orig;
	}

648 649
	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
	    fq->q.meat == fq->q.len) {
650 651
		ret_skb = nf_ct_frag6_reasm(fq, dev);
		if (ret_skb == NULL)
652
			pr_debug("Can't reassemble fragmented packets\n");
653
	}
654
	spin_unlock(&fq->q.lock);
655

656
	fq_put(fq);
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
	return ret_skb;

ret_orig:
	kfree_skb(clone);
	return skb;
}

void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
			struct net_device *in, struct net_device *out,
			int (*okfn)(struct sk_buff *))
{
	struct sk_buff *s, *s2;

	for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
		nf_conntrack_put_reasm(s->nfct_reasm);
		nf_conntrack_get_reasm(skb);
		s->nfct_reasm = skb;

		s2 = s->next;
676 677
		s->next = NULL;

678 679 680 681 682 683 684 685 686
		NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
			       NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
		s = s2;
	}
	nf_conntrack_put_reasm(skb);
}

int nf_ct_frag6_init(void)
{
687
	nf_frags.hashfn = nf_hashfn;
688
	nf_frags.constructor = ip6_frag_init;
689
	nf_frags.destructor = NULL;
690 691
	nf_frags.skb_free = nf_skb_free;
	nf_frags.qsize = sizeof(struct nf_ct_frag6_queue);
692
	nf_frags.match = ip6_frag_match;
693
	nf_frags.frag_expire = nf_ct_frag6_expire;
694
	nf_frags.secret_interval = 10 * 60 * HZ;
695
	nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
696 697
	nf_init_frags.high_thresh = 256 * 1024;
	nf_init_frags.low_thresh = 192 * 1024;
698
	inet_frags_init_net(&nf_init_frags);
699
	inet_frags_init(&nf_frags);
700 701 702 703 704 705

	return 0;
}

void nf_ct_frag6_cleanup(void)
{
706 707
	inet_frags_fini(&nf_frags);

708
	nf_init_frags.low_thresh = 0;
709 710
	nf_ct_frag6_evictor();
}