nf_conntrack_core.c 31.5 KB
Newer Older
1 2 3 4 5
/* Connection state tracking for netfilter.  This is separated from,
   but required by, the NAT layer; it can also be used by an iptables
   extension. */

/* (C) 1999-2001 Paul `Rusty' Russell
6
 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
A
Al Viro 已提交
31
#include <linux/mm.h>
32 33 34

#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
35
#include <net/netfilter/nf_conntrack_l4proto.h>
36
#include <net/netfilter/nf_conntrack_expect.h>
37 38
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
39
#include <net/netfilter/nf_conntrack_extend.h>
40

41
#define NF_CONNTRACK_VERSION	"0.5.0"
42 43

DEFINE_RWLOCK(nf_conntrack_lock);
44
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
45 46 47

/* nf_conntrack_standalone needs this */
atomic_t nf_conntrack_count = ATOMIC_INIT(0);
48
EXPORT_SYMBOL_GPL(nf_conntrack_count);
49

50
unsigned int nf_conntrack_htable_size __read_mostly;
51 52
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);

53
int nf_conntrack_max __read_mostly;
54
EXPORT_SYMBOL_GPL(nf_conntrack_max);
55

56
struct hlist_head *nf_conntrack_hash __read_mostly;
57 58
EXPORT_SYMBOL_GPL(nf_conntrack_hash);

59
struct nf_conn nf_conntrack_untracked __read_mostly;
60 61
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);

62
unsigned int nf_ct_log_invalid __read_mostly;
63
HLIST_HEAD(unconfirmed);
64
static int nf_conntrack_vmalloc __read_mostly;
65
static struct kmem_cache *nf_conntrack_cachep __read_mostly;
66
static unsigned int nf_conntrack_next_id;
67

68 69 70 71 72 73 74 75 76 77
DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);

static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;

static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
				  unsigned int size, unsigned int rnd)
{
	unsigned int a, b;
78 79 80 81 82

	a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all),
		   (tuple->src.l3num << 16) | tuple->dst.protonum);
	b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
		   (tuple->src.u.all << 16) | tuple->dst.u.all);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

	return jhash_2words(a, b, rnd) % size;
}

static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
{
	return __hash_conntrack(tuple, nf_conntrack_htable_size,
				nf_conntrack_hash_rnd);
}

int
nf_ct_get_tuple(const struct sk_buff *skb,
		unsigned int nhoff,
		unsigned int dataoff,
		u_int16_t l3num,
		u_int8_t protonum,
		struct nf_conntrack_tuple *tuple,
		const struct nf_conntrack_l3proto *l3proto,
101
		const struct nf_conntrack_l4proto *l4proto)
102 103 104 105 106 107 108 109 110 111
{
	NF_CT_TUPLE_U_BLANK(tuple);

	tuple->src.l3num = l3num;
	if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
		return 0;

	tuple->dst.protonum = protonum;
	tuple->dst.dir = IP_CT_DIR_ORIGINAL;

112
	return l4proto->pkt_to_tuple(skb, dataoff, tuple);
113
}
114
EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
115

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
int nf_ct_get_tuplepr(const struct sk_buff *skb,
		      unsigned int nhoff,
		      u_int16_t l3num,
		      struct nf_conntrack_tuple *tuple)
{
	struct nf_conntrack_l3proto *l3proto;
	struct nf_conntrack_l4proto *l4proto;
	unsigned int protoff;
	u_int8_t protonum;
	int ret;

	rcu_read_lock();

	l3proto = __nf_ct_l3proto_find(l3num);
	ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
	if (ret != NF_ACCEPT) {
		rcu_read_unlock();
		return 0;
	}

	l4proto = __nf_ct_l4proto_find(l3num, protonum);

	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
			      l3proto, l4proto);

	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);

146 147 148 149
int
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
		   const struct nf_conntrack_tuple *orig,
		   const struct nf_conntrack_l3proto *l3proto,
150
		   const struct nf_conntrack_l4proto *l4proto)
151 152 153 154 155 156 157 158 159 160
{
	NF_CT_TUPLE_U_BLANK(inverse);

	inverse->src.l3num = orig->src.l3num;
	if (l3proto->invert_tuple(inverse, orig) == 0)
		return 0;

	inverse->dst.dir = !orig->dst.dir;

	inverse->dst.protonum = orig->dst.protonum;
161
	return l4proto->invert_tuple(inverse, orig);
162
}
163
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
164 165 166 167

static void
clean_from_lists(struct nf_conn *ct)
{
168
	pr_debug("clean_from_lists(%p)\n", ct);
169 170
	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
	hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
171 172

	/* Destroy all pending expectations */
173
	nf_ct_remove_expectations(ct);
174 175 176 177 178 179
}

static void
destroy_conntrack(struct nf_conntrack *nfct)
{
	struct nf_conn *ct = (struct nf_conn *)nfct;
180
	struct nf_conntrack_l4proto *l4proto;
181

182
	pr_debug("destroy_conntrack(%p)\n", ct);
183 184 185 186 187 188 189 190 191
	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
	NF_CT_ASSERT(!timer_pending(&ct->timeout));

	nf_conntrack_event(IPCT_DESTROY, ct);
	set_bit(IPS_DYING_BIT, &ct->status);

	/* To make sure we don't get any weird locking issues here:
	 * destroy_conntrack() MUST NOT be called with a write lock
	 * to nf_conntrack_lock!!! -HW */
192 193 194
	rcu_read_lock();
	l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
				       ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
195 196
	if (l4proto && l4proto->destroy)
		l4proto->destroy(ct);
197

198 199
	nf_ct_ext_destroy(ct);

200
	rcu_read_unlock();
201 202 203 204 205 206

	write_lock_bh(&nf_conntrack_lock);
	/* Expectations will have been removed in clean_from_lists,
	 * except TFTP can create an expectation on the first packet,
	 * before connection is in the list, so we need to clean here,
	 * too. */
207
	nf_ct_remove_expectations(ct);
208 209 210

	/* We overload first tuple to link into unconfirmed list. */
	if (!nf_ct_is_confirmed(ct)) {
211 212
		BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
		hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
213 214 215 216 217 218 219 220
	}

	NF_CT_STAT_INC(delete);
	write_unlock_bh(&nf_conntrack_lock);

	if (ct->master)
		nf_ct_put(ct->master);

221
	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
222 223 224 225 226 227
	nf_conntrack_free(ct);
}

static void death_by_timeout(unsigned long ul_conntrack)
{
	struct nf_conn *ct = (void *)ul_conntrack;
228
	struct nf_conn_help *help = nfct_help(ct);
229
	struct nf_conntrack_helper *helper;
230

231 232 233 234 235 236 237
	if (help) {
		rcu_read_lock();
		helper = rcu_dereference(help->helper);
		if (helper && helper->destroy)
			helper->destroy(ct);
		rcu_read_unlock();
	}
238 239 240 241 242 243 244 245 246 247

	write_lock_bh(&nf_conntrack_lock);
	/* Inside lock so preempt is disabled on module removal path.
	 * Otherwise we can get spurious warnings. */
	NF_CT_STAT_INC(delete_list);
	clean_from_lists(ct);
	write_unlock_bh(&nf_conntrack_lock);
	nf_ct_put(ct);
}

248
struct nf_conntrack_tuple_hash *
249 250 251 252
__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
		    const struct nf_conn *ignored_conntrack)
{
	struct nf_conntrack_tuple_hash *h;
253
	struct hlist_node *n;
254 255
	unsigned int hash = hash_conntrack(tuple);

256
	hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
P
Patrick McHardy 已提交
257 258
		if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
		    nf_ct_tuple_equal(tuple, &h->tuple)) {
259 260 261 262 263 264 265 266
			NF_CT_STAT_INC(found);
			return h;
		}
		NF_CT_STAT_INC(searched);
	}

	return NULL;
}
267
EXPORT_SYMBOL_GPL(__nf_conntrack_find);
268 269 270

/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
271
nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
272 273 274 275
{
	struct nf_conntrack_tuple_hash *h;

	read_lock_bh(&nf_conntrack_lock);
276
	h = __nf_conntrack_find(tuple, NULL);
277 278 279 280 281 282
	if (h)
		atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
	read_unlock_bh(&nf_conntrack_lock);

	return h;
}
283
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
284

285 286
static void __nf_conntrack_hash_insert(struct nf_conn *ct,
				       unsigned int hash,
287
				       unsigned int repl_hash)
288 289
{
	ct->id = ++nf_conntrack_next_id;
290 291 292 293
	hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
		       &nf_conntrack_hash[hash]);
	hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
		       &nf_conntrack_hash[repl_hash]);
294 295 296 297 298 299 300 301 302 303 304 305 306
}

void nf_conntrack_hash_insert(struct nf_conn *ct)
{
	unsigned int hash, repl_hash;

	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);

	write_lock_bh(&nf_conntrack_lock);
	__nf_conntrack_hash_insert(ct, hash, repl_hash);
	write_unlock_bh(&nf_conntrack_lock);
}
307
EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
308

309 310 311 312 313
/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff **pskb)
{
	unsigned int hash, repl_hash;
P
Patrick McHardy 已提交
314
	struct nf_conntrack_tuple_hash *h;
315
	struct nf_conn *ct;
P
Patrick McHardy 已提交
316
	struct nf_conn_help *help;
317
	struct hlist_node *n;
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	enum ip_conntrack_info ctinfo;

	ct = nf_ct_get(*pskb, &ctinfo);

	/* ipt_REJECT uses nf_conntrack_attach to attach related
	   ICMP/TCP RST packets in other direction.  Actual packet
	   which created connection will be IP_CT_NEW or for an
	   expected connection, IP_CT_RELATED. */
	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
		return NF_ACCEPT;

	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);

	/* We're not in hash table, and we refuse to set up related
	   connections for unconfirmed conns.  But packet copies and
	   REJECT will give spurious warnings here. */
	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */

	/* No external references means noone else could have
	   confirmed us. */
	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
340
	pr_debug("Confirming conntrack %p\n", ct);
341 342 343 344 345 346

	write_lock_bh(&nf_conntrack_lock);

	/* See if there's one in the list already, including reverse:
	   NAT could have grabbed it without realizing, since we're
	   not in the hash.  If there is, we lost race. */
347
	hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)
P
Patrick McHardy 已提交
348 349 350
		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
				      &h->tuple))
			goto out;
351
	hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)
P
Patrick McHardy 已提交
352 353 354
		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
				      &h->tuple))
			goto out;
355

P
Patrick McHardy 已提交
356
	/* Remove from unconfirmed list */
357
	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
P
Patrick McHardy 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371

	__nf_conntrack_hash_insert(ct, hash, repl_hash);
	/* Timer relative to confirmation time, not original
	   setting time, otherwise we'd get timer wrap in
	   weird delay cases. */
	ct->timeout.expires += jiffies;
	add_timer(&ct->timeout);
	atomic_inc(&ct->ct_general.use);
	set_bit(IPS_CONFIRMED_BIT, &ct->status);
	NF_CT_STAT_INC(insert);
	write_unlock_bh(&nf_conntrack_lock);
	help = nfct_help(ct);
	if (help && help->helper)
		nf_conntrack_event_cache(IPCT_HELPER, *pskb);
372
#ifdef CONFIG_NF_NAT_NEEDED
P
Patrick McHardy 已提交
373 374 375
	if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
	    test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
		nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
376
#endif
P
Patrick McHardy 已提交
377 378 379
	nf_conntrack_event_cache(master_ct(ct) ?
				 IPCT_RELATED : IPCT_NEW, *pskb);
	return NF_ACCEPT;
380

P
Patrick McHardy 已提交
381
out:
382 383 384 385
	NF_CT_STAT_INC(insert_failed);
	write_unlock_bh(&nf_conntrack_lock);
	return NF_DROP;
}
386
EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401

/* Returns true if a connection correspondings to the tuple (required
   for NAT). */
int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
			 const struct nf_conn *ignored_conntrack)
{
	struct nf_conntrack_tuple_hash *h;

	read_lock_bh(&nf_conntrack_lock);
	h = __nf_conntrack_find(tuple, ignored_conntrack);
	read_unlock_bh(&nf_conntrack_lock);

	return h != NULL;
}
402
EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
403

404 405
#define NF_CT_EVICTION_RANGE	8

406 407
/* There's a small race here where we may free a just-assured
   connection.  Too bad: we're in trouble anyway. */
408
static int early_drop(unsigned int hash)
409
{
410
	/* Use oldest entry, which is roughly LRU */
411
	struct nf_conntrack_tuple_hash *h;
P
Patrick McHardy 已提交
412
	struct nf_conn *ct = NULL, *tmp;
413
	struct hlist_node *n;
414
	unsigned int i, cnt = 0;
415 416 417
	int dropped = 0;

	read_lock_bh(&nf_conntrack_lock);
418 419 420 421 422 423 424 425 426 427
	for (i = 0; i < nf_conntrack_htable_size; i++) {
		hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
			tmp = nf_ct_tuplehash_to_ctrack(h);
			if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
				ct = tmp;
			cnt++;
		}
		if (ct || cnt >= NF_CT_EVICTION_RANGE)
			break;
		hash = (hash + 1) % nf_conntrack_htable_size;
428
	}
429 430
	if (ct)
		atomic_inc(&ct->ct_general.use);
431 432 433 434 435 436 437 438
	read_unlock_bh(&nf_conntrack_lock);

	if (!ct)
		return dropped;

	if (del_timer(&ct->timeout)) {
		death_by_timeout((unsigned long)ct);
		dropped = 1;
439
		NF_CT_STAT_INC_ATOMIC(early_drop);
440 441 442 443 444
	}
	nf_ct_put(ct);
	return dropped;
}

445 446
struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
				   const struct nf_conntrack_tuple *repl)
447 448 449
{
	struct nf_conn *conntrack = NULL;

450
	if (unlikely(!nf_conntrack_hash_rnd_initted)) {
451 452 453 454
		get_random_bytes(&nf_conntrack_hash_rnd, 4);
		nf_conntrack_hash_rnd_initted = 1;
	}

455 456 457
	/* We don't want any race condition at early drop stage */
	atomic_inc(&nf_conntrack_count);

458
	if (nf_conntrack_max
459
	    && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
460
		unsigned int hash = hash_conntrack(orig);
461
		if (!early_drop(hash)) {
462
			atomic_dec(&nf_conntrack_count);
463 464 465 466 467 468 469 470
			if (net_ratelimit())
				printk(KERN_WARNING
				       "nf_conntrack: table full, dropping"
				       " packet.\n");
			return ERR_PTR(-ENOMEM);
		}
	}

471
	conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);
472
	if (conntrack == NULL) {
473
		pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
474 475
		atomic_dec(&nf_conntrack_count);
		return ERR_PTR(-ENOMEM);
476 477 478 479 480 481
	}

	atomic_set(&conntrack->ct_general.use, 1);
	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
	/* Don't set timer yet: wait for confirmation */
P
Patrick McHardy 已提交
482 483
	setup_timer(&conntrack->timeout, death_by_timeout,
		    (unsigned long)conntrack);
484 485 486

	return conntrack;
}
487
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
488 489 490

void nf_conntrack_free(struct nf_conn *conntrack)
{
491
	nf_ct_ext_free(conntrack);
492
	kmem_cache_free(nf_conntrack_cachep, conntrack);
493 494
	atomic_dec(&nf_conntrack_count);
}
495
EXPORT_SYMBOL_GPL(nf_conntrack_free);
496 497 498 499 500 501

/* Allocate a new conntrack: we return -ENOMEM if classification
   failed due to stress.  Otherwise it really is unclassifiable. */
static struct nf_conntrack_tuple_hash *
init_conntrack(const struct nf_conntrack_tuple *tuple,
	       struct nf_conntrack_l3proto *l3proto,
502
	       struct nf_conntrack_l4proto *l4proto,
503 504 505 506
	       struct sk_buff *skb,
	       unsigned int dataoff)
{
	struct nf_conn *conntrack;
507
	struct nf_conn_help *help;
508 509 510
	struct nf_conntrack_tuple repl_tuple;
	struct nf_conntrack_expect *exp;

511
	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
512
		pr_debug("Can't invert tuple.\n");
513 514 515
		return NULL;
	}

516
	conntrack = nf_conntrack_alloc(tuple, &repl_tuple);
517
	if (conntrack == NULL || IS_ERR(conntrack)) {
518
		pr_debug("Can't allocate conntrack.\n");
519 520 521
		return (struct nf_conntrack_tuple_hash *)conntrack;
	}

522
	if (!l4proto->new(conntrack, skb, dataoff)) {
523
		nf_conntrack_free(conntrack);
524
		pr_debug("init conntrack: can't track with proto module\n");
525 526 527 528
		return NULL;
	}

	write_lock_bh(&nf_conntrack_lock);
529
	exp = nf_ct_find_expectation(tuple);
530
	if (exp) {
531 532
		pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
			 conntrack, exp);
533 534 535
		/* Welcome, Mr. Bond.  We've been expecting you... */
		__set_bit(IPS_EXPECTED_BIT, &conntrack->status);
		conntrack->master = exp->master;
536
		if (exp->helper) {
537
			help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
538 539 540 541
			if (help)
				rcu_assign_pointer(help->helper, exp->helper);
		}

542 543
#ifdef CONFIG_NF_CONNTRACK_MARK
		conntrack->mark = exp->master->mark;
544 545 546
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
		conntrack->secmark = exp->master->secmark;
547 548 549
#endif
		nf_conntrack_get(&conntrack->master->ct_general);
		NF_CT_STAT_INC(expect_new);
550
	} else {
551 552 553 554
		struct nf_conntrack_helper *helper;

		helper = __nf_ct_helper_find(&repl_tuple);
		if (helper) {
555
			help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
556 557
			if (help)
				rcu_assign_pointer(help->helper, helper);
558
		}
559
		NF_CT_STAT_INC(new);
560
	}
561 562

	/* Overload tuple linked list to put us in unconfirmed list. */
563 564
	hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
		       &unconfirmed);
565 566 567 568 569 570

	write_unlock_bh(&nf_conntrack_lock);

	if (exp) {
		if (exp->expectfn)
			exp->expectfn(conntrack, exp);
571
		nf_ct_expect_put(exp);
572 573 574 575 576 577 578 579 580 581 582 583
	}

	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
}

/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
static inline struct nf_conn *
resolve_normal_ct(struct sk_buff *skb,
		  unsigned int dataoff,
		  u_int16_t l3num,
		  u_int8_t protonum,
		  struct nf_conntrack_l3proto *l3proto,
584
		  struct nf_conntrack_l4proto *l4proto,
585 586 587 588 589 590 591
		  int *set_reply,
		  enum ip_conntrack_info *ctinfo)
{
	struct nf_conntrack_tuple tuple;
	struct nf_conntrack_tuple_hash *h;
	struct nf_conn *ct;

592
	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
593
			     dataoff, l3num, protonum, &tuple, l3proto,
594
			     l4proto)) {
595
		pr_debug("resolve_normal_ct: Can't get tuple\n");
596 597 598 599
		return NULL;
	}

	/* look for tuple match */
600
	h = nf_conntrack_find_get(&tuple);
601
	if (!h) {
602
		h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
		if (!h)
			return NULL;
		if (IS_ERR(h))
			return (void *)h;
	}
	ct = nf_ct_tuplehash_to_ctrack(h);

	/* It exists; we have (non-exclusive) reference. */
	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
		/* Please set reply bit if this packet OK */
		*set_reply = 1;
	} else {
		/* Once we've had two way comms, always ESTABLISHED. */
		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
618
			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
619 620
			*ctinfo = IP_CT_ESTABLISHED;
		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
621 622
			pr_debug("nf_conntrack_in: related packet for %p\n",
				 ct);
623 624
			*ctinfo = IP_CT_RELATED;
		} else {
625
			pr_debug("nf_conntrack_in: new packet for %p\n", ct);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
			*ctinfo = IP_CT_NEW;
		}
		*set_reply = 0;
	}
	skb->nfct = &ct->ct_general;
	skb->nfctinfo = *ctinfo;
	return ct;
}

unsigned int
nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conntrack_l3proto *l3proto;
641
	struct nf_conntrack_l4proto *l4proto;
642 643 644 645 646 647 648
	unsigned int dataoff;
	u_int8_t protonum;
	int set_reply = 0;
	int ret;

	/* Previously seen (loopback or untracked)?  Ignore. */
	if ((*pskb)->nfct) {
649
		NF_CT_STAT_INC_ATOMIC(ignore);
650 651 652
		return NF_ACCEPT;
	}

653
	/* rcu_read_lock()ed by nf_hook_slow */
654
	l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
655 656 657
	ret = l3proto->get_l4proto(*pskb, skb_network_offset(*pskb),
				   &dataoff, &protonum);
	if (ret <= 0) {
658
		pr_debug("not prepared to track yet or error occured\n");
659 660
		NF_CT_STAT_INC_ATOMIC(error);
		NF_CT_STAT_INC_ATOMIC(invalid);
661 662 663
		return -ret;
	}

664
	l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);
665 666 667 668

	/* It may be an special packet, error, unclean...
	 * inverse of the return code tells to the netfilter
	 * core what to do with the packet. */
669 670
	if (l4proto->error != NULL &&
	    (ret = l4proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
671 672
		NF_CT_STAT_INC_ATOMIC(error);
		NF_CT_STAT_INC_ATOMIC(invalid);
673 674 675
		return -ret;
	}

676
	ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, l4proto,
677 678 679
			       &set_reply, &ctinfo);
	if (!ct) {
		/* Not valid part of a connection */
680
		NF_CT_STAT_INC_ATOMIC(invalid);
681 682 683 684 685
		return NF_ACCEPT;
	}

	if (IS_ERR(ct)) {
		/* Too stressed to deal. */
686
		NF_CT_STAT_INC_ATOMIC(drop);
687 688 689 690 691
		return NF_DROP;
	}

	NF_CT_ASSERT((*pskb)->nfct);

692
	ret = l4proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
693 694 695
	if (ret < 0) {
		/* Invalid: inverse of the return code tells
		 * the netfilter core what to do */
696
		pr_debug("nf_conntrack_in: Can't track with proto module\n");
697 698
		nf_conntrack_put((*pskb)->nfct);
		(*pskb)->nfct = NULL;
699
		NF_CT_STAT_INC_ATOMIC(invalid);
700 701 702 703 704 705 706 707
		return -ret;
	}

	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
		nf_conntrack_event_cache(IPCT_STATUS, *pskb);

	return ret;
}
708
EXPORT_SYMBOL_GPL(nf_conntrack_in);
709 710 711 712

int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
			 const struct nf_conntrack_tuple *orig)
{
713 714 715 716 717 718 719 720 721
	int ret;

	rcu_read_lock();
	ret = nf_ct_invert_tuple(inverse, orig,
				 __nf_ct_l3proto_find(orig->src.l3num),
				 __nf_ct_l4proto_find(orig->src.l3num,
						      orig->dst.protonum));
	rcu_read_unlock();
	return ret;
722
}
723
EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
724

725 726 727 728 729 730
/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
   implicitly racy: see __nf_conntrack_confirm */
void nf_conntrack_alter_reply(struct nf_conn *ct,
			      const struct nf_conntrack_tuple *newreply)
{
	struct nf_conn_help *help = nfct_help(ct);
731
	struct nf_conntrack_helper *helper;
732 733 734 735 736

	write_lock_bh(&nf_conntrack_lock);
	/* Should be unconfirmed, so not in hash table yet */
	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));

737
	pr_debug("Altering reply tuple of %p to ", ct);
738 739 740
	NF_CT_DUMP_TUPLE(newreply);

	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
741 742 743 744 745 746 747 748
	if (ct->master || (help && help->expecting != 0))
		goto out;

	helper = __nf_ct_helper_find(newreply);
	if (helper == NULL) {
		if (help)
			rcu_assign_pointer(help->helper, NULL);
		goto out;
749
	}
750 751

	if (help == NULL) {
752 753
		help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
		if (help == NULL)
754 755 756 757 758 759 760
			goto out;
	} else {
		memset(&help->help, 0, sizeof(help->help));
	}

	rcu_assign_pointer(help->helper, helper);
out:
761 762
	write_unlock_bh(&nf_conntrack_lock);
}
763
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
764

765 766 767 768 769 770 771 772 773 774 775 776 777 778
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
void __nf_ct_refresh_acct(struct nf_conn *ct,
			  enum ip_conntrack_info ctinfo,
			  const struct sk_buff *skb,
			  unsigned long extra_jiffies,
			  int do_acct)
{
	int event = 0;

	NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
	NF_CT_ASSERT(skb);

	write_lock_bh(&nf_conntrack_lock);

779 780 781 782 783 784
	/* Only update if this is not a fixed timeout */
	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
		write_unlock_bh(&nf_conntrack_lock);
		return;
	}

785 786 787 788 789
	/* If not in hash table, timer will not be active yet */
	if (!nf_ct_is_confirmed(ct)) {
		ct->timeout.expires = extra_jiffies;
		event = IPCT_REFRESH;
	} else {
790 791 792 793 794 795 796 797
		unsigned long newtime = jiffies + extra_jiffies;

		/* Only update the timeout if the new timeout is at least
		   HZ jiffies from the old timeout. Need del_timer for race
		   avoidance (may already be dying). */
		if (newtime - ct->timeout.expires >= HZ
		    && del_timer(&ct->timeout)) {
			ct->timeout.expires = newtime;
798 799 800 801 802 803 804 805 806
			add_timer(&ct->timeout);
			event = IPCT_REFRESH;
		}
	}

#ifdef CONFIG_NF_CT_ACCT
	if (do_acct) {
		ct->counters[CTINFO2DIR(ctinfo)].packets++;
		ct->counters[CTINFO2DIR(ctinfo)].bytes +=
807
			skb->len - skb_network_offset(skb);
808 809 810 811

		if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
		    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
			event |= IPCT_COUNTER_FILLING;
812 813 814 815 816 817 818 819 820
	}
#endif

	write_unlock_bh(&nf_conntrack_lock);

	/* must be unlocked when calling event cache */
	if (event)
		nf_conntrack_event_cache(event, skb);
}
821
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
822

823
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
824 825 826

#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
I
Ingo Molnar 已提交
827 828
#include <linux/mutex.h>

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844

/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
 * in ip_conntrack_core, since we don't want the protocols to autoload
 * or depend on ctnetlink */
int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,
			       const struct nf_conntrack_tuple *tuple)
{
	NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
		&tuple->src.u.tcp.port);
	NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
		&tuple->dst.u.tcp.port);
	return 0;

nfattr_failure:
	return -1;
}
845
EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nfattr);
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860

static const size_t cta_min_proto[CTA_PROTO_MAX] = {
	[CTA_PROTO_SRC_PORT-1]  = sizeof(u_int16_t),
	[CTA_PROTO_DST_PORT-1]  = sizeof(u_int16_t)
};

int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[],
			       struct nf_conntrack_tuple *t)
{
	if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
		return -EINVAL;

	if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
		return -EINVAL;

861 862
	t->src.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
	t->dst.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
863 864 865

	return 0;
}
866
EXPORT_SYMBOL_GPL(nf_ct_port_nfattr_to_tuple);
867 868
#endif

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
/* Used by ipt_REJECT and ip6t_REJECT. */
void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;

	/* This ICMP is in reverse direction to the packet which caused it */
	ct = nf_ct_get(skb, &ctinfo);
	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
		ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
	else
		ctinfo = IP_CT_RELATED;

	/* Attach to new skbuff, and increment count */
	nskb->nfct = &ct->ct_general;
	nskb->nfctinfo = ctinfo;
	nf_conntrack_get(nskb->nfct);
}
887
EXPORT_SYMBOL_GPL(__nf_conntrack_attach);
888 889 890 891 892 893 894 895 896 897

static inline int
do_iter(const struct nf_conntrack_tuple_hash *i,
	int (*iter)(struct nf_conn *i, void *data),
	void *data)
{
	return iter(nf_ct_tuplehash_to_ctrack(i), data);
}

/* Bring out ya dead! */
P
Patrick McHardy 已提交
898
static struct nf_conn *
899 900 901
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
		void *data, unsigned int *bucket)
{
P
Patrick McHardy 已提交
902 903
	struct nf_conntrack_tuple_hash *h;
	struct nf_conn *ct;
904
	struct hlist_node *n;
905 906 907

	write_lock_bh(&nf_conntrack_lock);
	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
908
		hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
P
Patrick McHardy 已提交
909 910 911 912
			ct = nf_ct_tuplehash_to_ctrack(h);
			if (iter(ct, data))
				goto found;
		}
913
	}
914
	hlist_for_each_entry(h, n, &unconfirmed, hnode) {
P
Patrick McHardy 已提交
915 916
		ct = nf_ct_tuplehash_to_ctrack(h);
		if (iter(ct, data))
917
			set_bit(IPS_DYING_BIT, &ct->status);
P
Patrick McHardy 已提交
918
	}
919
	write_unlock_bh(&nf_conntrack_lock);
P
Patrick McHardy 已提交
920 921
	return NULL;
found:
922
	atomic_inc(&ct->ct_general.use);
923
	write_unlock_bh(&nf_conntrack_lock);
P
Patrick McHardy 已提交
924
	return ct;
925 926 927 928 929
}

void
nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
{
P
Patrick McHardy 已提交
930
	struct nf_conn *ct;
931 932
	unsigned int bucket = 0;

P
Patrick McHardy 已提交
933
	while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
934 935 936 937 938 939 940 941
		/* Time to push up daises... */
		if (del_timer(&ct->timeout))
			death_by_timeout((unsigned long)ct);
		/* ... else the timer will get him soon. */

		nf_ct_put(ct);
	}
}
942
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
943 944 945 946 947 948

static int kill_all(struct nf_conn *i, void *data)
{
	return 1;
}

949
void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, int size)
950 951 952 953
{
	if (vmalloced)
		vfree(hash);
	else
954
		free_pages((unsigned long)hash,
955
			   get_order(sizeof(struct hlist_head) * size));
956
}
957
EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
958

959
void nf_conntrack_flush(void)
960 961 962
{
	nf_ct_iterate_cleanup(kill_all, NULL);
}
963
EXPORT_SYMBOL_GPL(nf_conntrack_flush);
964

965 966 967 968
/* Mishearing the voices in his head, our hero wonders how he's
   supposed to kill the mall. */
void nf_conntrack_cleanup(void)
{
969
	rcu_assign_pointer(ip_ct_attach, NULL);
970

971 972 973 974 975 976 977
	/* This makes sure all current packets have passed through
	   netfilter framework.  Roll on, two-stage module
	   delete... */
	synchronize_net();

	nf_ct_event_cache_flush();
 i_see_dead_people:
978
	nf_conntrack_flush();
979 980 981 982
	if (atomic_read(&nf_conntrack_count) != 0) {
		schedule();
		goto i_see_dead_people;
	}
983 984 985
	/* wait until all references to nf_conntrack_untracked are dropped */
	while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
		schedule();
986

987 988
	rcu_assign_pointer(nf_ct_destroy, NULL);

989
	kmem_cache_destroy(nf_conntrack_cachep);
990 991
	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
			     nf_conntrack_htable_size);
992

993
	nf_conntrack_proto_fini();
994
	nf_conntrack_helper_fini();
995
	nf_conntrack_expect_fini();
996 997
}

998
struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
999
{
1000
	struct hlist_head *hash;
1001
	unsigned int size, i;
1002

1003
	*vmalloced = 0;
1004

1005
	size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1006
	hash = (void*)__get_free_pages(GFP_KERNEL,
1007
				       get_order(sizeof(struct hlist_head)
1008
						 * size));
1009
	if (!hash) {
1010 1011
		*vmalloced = 1;
		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1012
		hash = vmalloc(sizeof(struct hlist_head) * size);
1013 1014 1015
	}

	if (hash)
1016
		for (i = 0; i < size; i++)
1017
			INIT_HLIST_HEAD(&hash[i]);
1018 1019 1020

	return hash;
}
1021
EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1022 1023 1024 1025 1026 1027

int set_hashsize(const char *val, struct kernel_param *kp)
{
	int i, bucket, hashsize, vmalloced;
	int old_vmalloced, old_size;
	int rnd;
1028
	struct hlist_head *hash, *old_hash;
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	struct nf_conntrack_tuple_hash *h;

	/* On boot, we can set this without any fancy locking. */
	if (!nf_conntrack_htable_size)
		return param_set_uint(val, kp);

	hashsize = simple_strtol(val, NULL, 0);
	if (!hashsize)
		return -EINVAL;

1039
	hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1040 1041 1042 1043 1044 1045 1046 1047 1048
	if (!hash)
		return -ENOMEM;

	/* We have to rehahs for the new table anyway, so we also can
	 * use a newrandom seed */
	get_random_bytes(&rnd, 4);

	write_lock_bh(&nf_conntrack_lock);
	for (i = 0; i < nf_conntrack_htable_size; i++) {
1049 1050 1051 1052
		while (!hlist_empty(&nf_conntrack_hash[i])) {
			h = hlist_entry(nf_conntrack_hash[i].first,
					struct nf_conntrack_tuple_hash, hnode);
			hlist_del(&h->hnode);
1053
			bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1054
			hlist_add_head(&h->hnode, &hash[bucket]);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
		}
	}
	old_size = nf_conntrack_htable_size;
	old_vmalloced = nf_conntrack_vmalloc;
	old_hash = nf_conntrack_hash;

	nf_conntrack_htable_size = hashsize;
	nf_conntrack_vmalloc = vmalloced;
	nf_conntrack_hash = hash;
	nf_conntrack_hash_rnd = rnd;
	write_unlock_bh(&nf_conntrack_lock);

1067
	nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1068 1069 1070 1071 1072 1073 1074 1075
	return 0;
}

module_param_call(hashsize, set_hashsize, param_get_uint,
		  &nf_conntrack_htable_size, 0600);

int __init nf_conntrack_init(void)
{
1076
	int max_factor = 8;
1077 1078 1079
	int ret;

	/* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1080
	 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1081 1082 1083
	if (!nf_conntrack_htable_size) {
		nf_conntrack_htable_size
			= (((num_physpages << PAGE_SHIFT) / 16384)
1084
			   / sizeof(struct hlist_head));
1085
		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1086 1087 1088 1089 1090 1091 1092 1093 1094
			nf_conntrack_htable_size = 16384;
		if (nf_conntrack_htable_size < 32)
			nf_conntrack_htable_size = 32;

		/* Use a max. factor of four by default to get the same max as
		 * with the old struct list_heads. When a table size is given
		 * we use the old value of 8 to avoid reducing the max.
		 * entries. */
		max_factor = 4;
1095
	}
1096 1097
	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
						  &nf_conntrack_vmalloc);
1098 1099 1100 1101 1102
	if (!nf_conntrack_hash) {
		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
		goto err_out;
	}

1103
	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1104 1105 1106 1107 1108

	printk("nf_conntrack version %s (%u buckets, %d max)\n",
	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
	       nf_conntrack_max);

1109 1110 1111 1112
	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
						sizeof(struct nf_conn),
						0, 0, NULL, NULL);
	if (!nf_conntrack_cachep) {
1113 1114 1115 1116
		printk(KERN_ERR "Unable to create nf_conn slab cache\n");
		goto err_free_hash;
	}

1117 1118
	ret = nf_conntrack_proto_init();
	if (ret < 0)
1119 1120
		goto err_free_conntrack_slab;

1121
	ret = nf_conntrack_expect_init();
1122
	if (ret < 0)
1123
		goto out_fini_proto;
1124

1125 1126
	ret = nf_conntrack_helper_init();
	if (ret < 0)
1127
		goto out_fini_expect;
1128

1129
	/* For use by REJECT target */
1130
	rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach);
1131
	rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1132

1133 1134 1135 1136 1137 1138 1139 1140
	/* Set up fake conntrack:
	    - to never be deleted, not in any hashes */
	atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
	/*  - and look it like as a confirmed connection */
	set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);

	return ret;

1141 1142
out_fini_expect:
	nf_conntrack_expect_fini();
1143 1144
out_fini_proto:
	nf_conntrack_proto_fini();
1145
err_free_conntrack_slab:
1146
	kmem_cache_destroy(nf_conntrack_cachep);
1147
err_free_hash:
1148 1149
	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
			     nf_conntrack_htable_size);
1150 1151 1152
err_out:
	return -ENOMEM;
}