nf_conntrack_core.c 31.4 KB
Newer Older
1 2 3 4 5
/* Connection state tracking for netfilter.  This is separated from,
   but required by, the NAT layer; it can also be used by an iptables
   extension. */

/* (C) 1999-2001 Paul `Rusty' Russell
6
 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
A
Al Viro 已提交
31
#include <linux/mm.h>
32 33 34

#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
35
#include <net/netfilter/nf_conntrack_l4proto.h>
36
#include <net/netfilter/nf_conntrack_expect.h>
37 38
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
39
#include <net/netfilter/nf_conntrack_extend.h>
40

41
#define NF_CONNTRACK_VERSION	"0.5.0"
42 43

DEFINE_RWLOCK(nf_conntrack_lock);
44
EXPORT_SYMBOL_GPL(nf_conntrack_lock);
45 46 47

/* nf_conntrack_standalone needs this */
atomic_t nf_conntrack_count = ATOMIC_INIT(0);
48
EXPORT_SYMBOL_GPL(nf_conntrack_count);
49

50
unsigned int nf_conntrack_htable_size __read_mostly;
51 52
EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);

53
int nf_conntrack_max __read_mostly;
54
EXPORT_SYMBOL_GPL(nf_conntrack_max);
55

56
struct hlist_head *nf_conntrack_hash __read_mostly;
57 58
EXPORT_SYMBOL_GPL(nf_conntrack_hash);

59
struct nf_conn nf_conntrack_untracked __read_mostly;
60 61
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);

62
unsigned int nf_ct_log_invalid __read_mostly;
63
HLIST_HEAD(unconfirmed);
64
static int nf_conntrack_vmalloc __read_mostly;
65
static struct kmem_cache *nf_conntrack_cachep __read_mostly;
66

67 68 69 70 71 72 73 74 75 76
DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);

static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;

static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
				  unsigned int size, unsigned int rnd)
{
	unsigned int a, b;
77 78 79 80

	a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all),
		   (tuple->src.l3num << 16) | tuple->dst.protonum);
	b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
A
Al Viro 已提交
81 82
		   ((__force __u16)tuple->src.u.all << 16) |
		    (__force __u16)tuple->dst.u.all);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100

	return jhash_2words(a, b, rnd) % size;
}

static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
{
	return __hash_conntrack(tuple, nf_conntrack_htable_size,
				nf_conntrack_hash_rnd);
}

int
nf_ct_get_tuple(const struct sk_buff *skb,
		unsigned int nhoff,
		unsigned int dataoff,
		u_int16_t l3num,
		u_int8_t protonum,
		struct nf_conntrack_tuple *tuple,
		const struct nf_conntrack_l3proto *l3proto,
101
		const struct nf_conntrack_l4proto *l4proto)
102 103 104 105 106 107 108 109 110 111
{
	NF_CT_TUPLE_U_BLANK(tuple);

	tuple->src.l3num = l3num;
	if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
		return 0;

	tuple->dst.protonum = protonum;
	tuple->dst.dir = IP_CT_DIR_ORIGINAL;

112
	return l4proto->pkt_to_tuple(skb, dataoff, tuple);
113
}
114
EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
115

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
int nf_ct_get_tuplepr(const struct sk_buff *skb,
		      unsigned int nhoff,
		      u_int16_t l3num,
		      struct nf_conntrack_tuple *tuple)
{
	struct nf_conntrack_l3proto *l3proto;
	struct nf_conntrack_l4proto *l4proto;
	unsigned int protoff;
	u_int8_t protonum;
	int ret;

	rcu_read_lock();

	l3proto = __nf_ct_l3proto_find(l3num);
	ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
	if (ret != NF_ACCEPT) {
		rcu_read_unlock();
		return 0;
	}

	l4proto = __nf_ct_l4proto_find(l3num, protonum);

	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
			      l3proto, l4proto);

	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);

146 147 148 149
int
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
		   const struct nf_conntrack_tuple *orig,
		   const struct nf_conntrack_l3proto *l3proto,
150
		   const struct nf_conntrack_l4proto *l4proto)
151 152 153 154 155 156 157 158 159 160
{
	NF_CT_TUPLE_U_BLANK(inverse);

	inverse->src.l3num = orig->src.l3num;
	if (l3proto->invert_tuple(inverse, orig) == 0)
		return 0;

	inverse->dst.dir = !orig->dst.dir;

	inverse->dst.protonum = orig->dst.protonum;
161
	return l4proto->invert_tuple(inverse, orig);
162
}
163
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
164 165 166 167

static void
clean_from_lists(struct nf_conn *ct)
{
168
	pr_debug("clean_from_lists(%p)\n", ct);
169 170
	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
	hlist_del(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
171 172

	/* Destroy all pending expectations */
173
	nf_ct_remove_expectations(ct);
174 175 176 177 178 179
}

static void
destroy_conntrack(struct nf_conntrack *nfct)
{
	struct nf_conn *ct = (struct nf_conn *)nfct;
180
	struct nf_conntrack_l4proto *l4proto;
181

182
	pr_debug("destroy_conntrack(%p)\n", ct);
183 184 185 186 187 188 189 190 191
	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
	NF_CT_ASSERT(!timer_pending(&ct->timeout));

	nf_conntrack_event(IPCT_DESTROY, ct);
	set_bit(IPS_DYING_BIT, &ct->status);

	/* To make sure we don't get any weird locking issues here:
	 * destroy_conntrack() MUST NOT be called with a write lock
	 * to nf_conntrack_lock!!! -HW */
192 193 194
	rcu_read_lock();
	l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
				       ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
195 196
	if (l4proto && l4proto->destroy)
		l4proto->destroy(ct);
197

198 199
	nf_ct_ext_destroy(ct);

200
	rcu_read_unlock();
201 202 203 204 205 206

	write_lock_bh(&nf_conntrack_lock);
	/* Expectations will have been removed in clean_from_lists,
	 * except TFTP can create an expectation on the first packet,
	 * before connection is in the list, so we need to clean here,
	 * too. */
207
	nf_ct_remove_expectations(ct);
208 209 210

	/* We overload first tuple to link into unconfirmed list. */
	if (!nf_ct_is_confirmed(ct)) {
211 212
		BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
		hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
213 214 215 216 217 218 219 220
	}

	NF_CT_STAT_INC(delete);
	write_unlock_bh(&nf_conntrack_lock);

	if (ct->master)
		nf_ct_put(ct->master);

221
	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
222 223 224 225 226 227
	nf_conntrack_free(ct);
}

static void death_by_timeout(unsigned long ul_conntrack)
{
	struct nf_conn *ct = (void *)ul_conntrack;
228
	struct nf_conn_help *help = nfct_help(ct);
229
	struct nf_conntrack_helper *helper;
230

231 232 233 234 235 236 237
	if (help) {
		rcu_read_lock();
		helper = rcu_dereference(help->helper);
		if (helper && helper->destroy)
			helper->destroy(ct);
		rcu_read_unlock();
	}
238 239 240 241 242 243 244 245 246 247

	write_lock_bh(&nf_conntrack_lock);
	/* Inside lock so preempt is disabled on module removal path.
	 * Otherwise we can get spurious warnings. */
	NF_CT_STAT_INC(delete_list);
	clean_from_lists(ct);
	write_unlock_bh(&nf_conntrack_lock);
	nf_ct_put(ct);
}

248
struct nf_conntrack_tuple_hash *
249 250 251 252
__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
		    const struct nf_conn *ignored_conntrack)
{
	struct nf_conntrack_tuple_hash *h;
253
	struct hlist_node *n;
254 255
	unsigned int hash = hash_conntrack(tuple);

256
	hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
P
Patrick McHardy 已提交
257 258
		if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
		    nf_ct_tuple_equal(tuple, &h->tuple)) {
259 260 261 262 263 264 265 266
			NF_CT_STAT_INC(found);
			return h;
		}
		NF_CT_STAT_INC(searched);
	}

	return NULL;
}
267
EXPORT_SYMBOL_GPL(__nf_conntrack_find);
268 269 270

/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
271
nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
272 273 274 275
{
	struct nf_conntrack_tuple_hash *h;

	read_lock_bh(&nf_conntrack_lock);
276
	h = __nf_conntrack_find(tuple, NULL);
277 278 279 280 281 282
	if (h)
		atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
	read_unlock_bh(&nf_conntrack_lock);

	return h;
}
283
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
284

285 286
static void __nf_conntrack_hash_insert(struct nf_conn *ct,
				       unsigned int hash,
287
				       unsigned int repl_hash)
288
{
289 290 291 292
	hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
		       &nf_conntrack_hash[hash]);
	hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
		       &nf_conntrack_hash[repl_hash]);
293 294 295 296 297 298 299 300 301 302 303 304 305
}

void nf_conntrack_hash_insert(struct nf_conn *ct)
{
	unsigned int hash, repl_hash;

	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);

	write_lock_bh(&nf_conntrack_lock);
	__nf_conntrack_hash_insert(ct, hash, repl_hash);
	write_unlock_bh(&nf_conntrack_lock);
}
306
EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
307

308 309
/* Confirm a connection given skb; places it in hash table */
int
310
__nf_conntrack_confirm(struct sk_buff *skb)
311 312
{
	unsigned int hash, repl_hash;
P
Patrick McHardy 已提交
313
	struct nf_conntrack_tuple_hash *h;
314
	struct nf_conn *ct;
P
Patrick McHardy 已提交
315
	struct nf_conn_help *help;
316
	struct hlist_node *n;
317 318
	enum ip_conntrack_info ctinfo;

319
	ct = nf_ct_get(skb, &ctinfo);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338

	/* ipt_REJECT uses nf_conntrack_attach to attach related
	   ICMP/TCP RST packets in other direction.  Actual packet
	   which created connection will be IP_CT_NEW or for an
	   expected connection, IP_CT_RELATED. */
	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
		return NF_ACCEPT;

	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);

	/* We're not in hash table, and we refuse to set up related
	   connections for unconfirmed conns.  But packet copies and
	   REJECT will give spurious warnings here. */
	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */

	/* No external references means noone else could have
	   confirmed us. */
	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
339
	pr_debug("Confirming conntrack %p\n", ct);
340 341 342 343 344 345

	write_lock_bh(&nf_conntrack_lock);

	/* See if there's one in the list already, including reverse:
	   NAT could have grabbed it without realizing, since we're
	   not in the hash.  If there is, we lost race. */
346
	hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)
P
Patrick McHardy 已提交
347 348 349
		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
				      &h->tuple))
			goto out;
350
	hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)
P
Patrick McHardy 已提交
351 352 353
		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
				      &h->tuple))
			goto out;
354

P
Patrick McHardy 已提交
355
	/* Remove from unconfirmed list */
356
	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
P
Patrick McHardy 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369

	__nf_conntrack_hash_insert(ct, hash, repl_hash);
	/* Timer relative to confirmation time, not original
	   setting time, otherwise we'd get timer wrap in
	   weird delay cases. */
	ct->timeout.expires += jiffies;
	add_timer(&ct->timeout);
	atomic_inc(&ct->ct_general.use);
	set_bit(IPS_CONFIRMED_BIT, &ct->status);
	NF_CT_STAT_INC(insert);
	write_unlock_bh(&nf_conntrack_lock);
	help = nfct_help(ct);
	if (help && help->helper)
370
		nf_conntrack_event_cache(IPCT_HELPER, skb);
371
#ifdef CONFIG_NF_NAT_NEEDED
P
Patrick McHardy 已提交
372 373
	if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
	    test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
374
		nf_conntrack_event_cache(IPCT_NATINFO, skb);
375
#endif
P
Patrick McHardy 已提交
376
	nf_conntrack_event_cache(master_ct(ct) ?
377
				 IPCT_RELATED : IPCT_NEW, skb);
P
Patrick McHardy 已提交
378
	return NF_ACCEPT;
379

P
Patrick McHardy 已提交
380
out:
381 382 383 384
	NF_CT_STAT_INC(insert_failed);
	write_unlock_bh(&nf_conntrack_lock);
	return NF_DROP;
}
385
EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

/* Returns true if a connection correspondings to the tuple (required
   for NAT). */
int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
			 const struct nf_conn *ignored_conntrack)
{
	struct nf_conntrack_tuple_hash *h;

	read_lock_bh(&nf_conntrack_lock);
	h = __nf_conntrack_find(tuple, ignored_conntrack);
	read_unlock_bh(&nf_conntrack_lock);

	return h != NULL;
}
401
EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
402

403 404
#define NF_CT_EVICTION_RANGE	8

405 406
/* There's a small race here where we may free a just-assured
   connection.  Too bad: we're in trouble anyway. */
407
static int early_drop(unsigned int hash)
408
{
409
	/* Use oldest entry, which is roughly LRU */
410
	struct nf_conntrack_tuple_hash *h;
P
Patrick McHardy 已提交
411
	struct nf_conn *ct = NULL, *tmp;
412
	struct hlist_node *n;
413
	unsigned int i, cnt = 0;
414 415 416
	int dropped = 0;

	read_lock_bh(&nf_conntrack_lock);
417 418 419 420 421 422 423 424 425 426
	for (i = 0; i < nf_conntrack_htable_size; i++) {
		hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode) {
			tmp = nf_ct_tuplehash_to_ctrack(h);
			if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
				ct = tmp;
			cnt++;
		}
		if (ct || cnt >= NF_CT_EVICTION_RANGE)
			break;
		hash = (hash + 1) % nf_conntrack_htable_size;
427
	}
428 429
	if (ct)
		atomic_inc(&ct->ct_general.use);
430 431 432 433 434 435 436 437
	read_unlock_bh(&nf_conntrack_lock);

	if (!ct)
		return dropped;

	if (del_timer(&ct->timeout)) {
		death_by_timeout((unsigned long)ct);
		dropped = 1;
438
		NF_CT_STAT_INC_ATOMIC(early_drop);
439 440 441 442 443
	}
	nf_ct_put(ct);
	return dropped;
}

444 445
struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
				   const struct nf_conntrack_tuple *repl)
446 447 448
{
	struct nf_conn *conntrack = NULL;

449
	if (unlikely(!nf_conntrack_hash_rnd_initted)) {
450 451 452 453
		get_random_bytes(&nf_conntrack_hash_rnd, 4);
		nf_conntrack_hash_rnd_initted = 1;
	}

454 455 456
	/* We don't want any race condition at early drop stage */
	atomic_inc(&nf_conntrack_count);

457
	if (nf_conntrack_max
458
	    && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
459
		unsigned int hash = hash_conntrack(orig);
460
		if (!early_drop(hash)) {
461
			atomic_dec(&nf_conntrack_count);
462 463 464 465 466 467 468 469
			if (net_ratelimit())
				printk(KERN_WARNING
				       "nf_conntrack: table full, dropping"
				       " packet.\n");
			return ERR_PTR(-ENOMEM);
		}
	}

470
	conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);
471
	if (conntrack == NULL) {
472
		pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
473 474
		atomic_dec(&nf_conntrack_count);
		return ERR_PTR(-ENOMEM);
475 476 477 478 479 480
	}

	atomic_set(&conntrack->ct_general.use, 1);
	conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
	conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
	/* Don't set timer yet: wait for confirmation */
P
Patrick McHardy 已提交
481 482
	setup_timer(&conntrack->timeout, death_by_timeout,
		    (unsigned long)conntrack);
483 484 485

	return conntrack;
}
486
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
487 488 489

void nf_conntrack_free(struct nf_conn *conntrack)
{
490
	nf_ct_ext_free(conntrack);
491
	kmem_cache_free(nf_conntrack_cachep, conntrack);
492 493
	atomic_dec(&nf_conntrack_count);
}
494
EXPORT_SYMBOL_GPL(nf_conntrack_free);
495 496 497 498 499 500

/* Allocate a new conntrack: we return -ENOMEM if classification
   failed due to stress.  Otherwise it really is unclassifiable. */
static struct nf_conntrack_tuple_hash *
init_conntrack(const struct nf_conntrack_tuple *tuple,
	       struct nf_conntrack_l3proto *l3proto,
501
	       struct nf_conntrack_l4proto *l4proto,
502 503 504 505
	       struct sk_buff *skb,
	       unsigned int dataoff)
{
	struct nf_conn *conntrack;
506
	struct nf_conn_help *help;
507 508 509
	struct nf_conntrack_tuple repl_tuple;
	struct nf_conntrack_expect *exp;

510
	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
511
		pr_debug("Can't invert tuple.\n");
512 513 514
		return NULL;
	}

515
	conntrack = nf_conntrack_alloc(tuple, &repl_tuple);
516
	if (conntrack == NULL || IS_ERR(conntrack)) {
517
		pr_debug("Can't allocate conntrack.\n");
518 519 520
		return (struct nf_conntrack_tuple_hash *)conntrack;
	}

521
	if (!l4proto->new(conntrack, skb, dataoff)) {
522
		nf_conntrack_free(conntrack);
523
		pr_debug("init conntrack: can't track with proto module\n");
524 525 526 527
		return NULL;
	}

	write_lock_bh(&nf_conntrack_lock);
528
	exp = nf_ct_find_expectation(tuple);
529
	if (exp) {
530 531
		pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
			 conntrack, exp);
532 533 534
		/* Welcome, Mr. Bond.  We've been expecting you... */
		__set_bit(IPS_EXPECTED_BIT, &conntrack->status);
		conntrack->master = exp->master;
535
		if (exp->helper) {
536
			help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
537 538 539 540
			if (help)
				rcu_assign_pointer(help->helper, exp->helper);
		}

541 542
#ifdef CONFIG_NF_CONNTRACK_MARK
		conntrack->mark = exp->master->mark;
543 544 545
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
		conntrack->secmark = exp->master->secmark;
546 547 548
#endif
		nf_conntrack_get(&conntrack->master->ct_general);
		NF_CT_STAT_INC(expect_new);
549
	} else {
550 551 552 553
		struct nf_conntrack_helper *helper;

		helper = __nf_ct_helper_find(&repl_tuple);
		if (helper) {
554
			help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
555 556
			if (help)
				rcu_assign_pointer(help->helper, helper);
557
		}
558
		NF_CT_STAT_INC(new);
559
	}
560 561

	/* Overload tuple linked list to put us in unconfirmed list. */
562 563
	hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
		       &unconfirmed);
564 565 566 567 568 569

	write_unlock_bh(&nf_conntrack_lock);

	if (exp) {
		if (exp->expectfn)
			exp->expectfn(conntrack, exp);
570
		nf_ct_expect_put(exp);
571 572 573 574 575 576 577 578 579 580 581 582
	}

	return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
}

/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
static inline struct nf_conn *
resolve_normal_ct(struct sk_buff *skb,
		  unsigned int dataoff,
		  u_int16_t l3num,
		  u_int8_t protonum,
		  struct nf_conntrack_l3proto *l3proto,
583
		  struct nf_conntrack_l4proto *l4proto,
584 585 586 587 588 589 590
		  int *set_reply,
		  enum ip_conntrack_info *ctinfo)
{
	struct nf_conntrack_tuple tuple;
	struct nf_conntrack_tuple_hash *h;
	struct nf_conn *ct;

591
	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
592
			     dataoff, l3num, protonum, &tuple, l3proto,
593
			     l4proto)) {
594
		pr_debug("resolve_normal_ct: Can't get tuple\n");
595 596 597 598
		return NULL;
	}

	/* look for tuple match */
599
	h = nf_conntrack_find_get(&tuple);
600
	if (!h) {
601
		h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
		if (!h)
			return NULL;
		if (IS_ERR(h))
			return (void *)h;
	}
	ct = nf_ct_tuplehash_to_ctrack(h);

	/* It exists; we have (non-exclusive) reference. */
	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
		/* Please set reply bit if this packet OK */
		*set_reply = 1;
	} else {
		/* Once we've had two way comms, always ESTABLISHED. */
		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
617
			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
618 619
			*ctinfo = IP_CT_ESTABLISHED;
		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
620 621
			pr_debug("nf_conntrack_in: related packet for %p\n",
				 ct);
622 623
			*ctinfo = IP_CT_RELATED;
		} else {
624
			pr_debug("nf_conntrack_in: new packet for %p\n", ct);
625 626 627 628 629 630 631 632 633 634
			*ctinfo = IP_CT_NEW;
		}
		*set_reply = 0;
	}
	skb->nfct = &ct->ct_general;
	skb->nfctinfo = *ctinfo;
	return ct;
}

unsigned int
635
nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb)
636 637 638 639
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conntrack_l3proto *l3proto;
640
	struct nf_conntrack_l4proto *l4proto;
641 642 643 644 645 646
	unsigned int dataoff;
	u_int8_t protonum;
	int set_reply = 0;
	int ret;

	/* Previously seen (loopback or untracked)?  Ignore. */
647
	if (skb->nfct) {
648
		NF_CT_STAT_INC_ATOMIC(ignore);
649 650 651
		return NF_ACCEPT;
	}

652
	/* rcu_read_lock()ed by nf_hook_slow */
653
	l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
654
	ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
655 656
				   &dataoff, &protonum);
	if (ret <= 0) {
657
		pr_debug("not prepared to track yet or error occured\n");
658 659
		NF_CT_STAT_INC_ATOMIC(error);
		NF_CT_STAT_INC_ATOMIC(invalid);
660 661 662
		return -ret;
	}

663
	l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);
664 665 666 667

	/* It may be an special packet, error, unclean...
	 * inverse of the return code tells to the netfilter
	 * core what to do with the packet. */
668
	if (l4proto->error != NULL &&
669
	    (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
670 671
		NF_CT_STAT_INC_ATOMIC(error);
		NF_CT_STAT_INC_ATOMIC(invalid);
672 673 674
		return -ret;
	}

675
	ct = resolve_normal_ct(skb, dataoff, pf, protonum, l3proto, l4proto,
676 677 678
			       &set_reply, &ctinfo);
	if (!ct) {
		/* Not valid part of a connection */
679
		NF_CT_STAT_INC_ATOMIC(invalid);
680 681 682 683 684
		return NF_ACCEPT;
	}

	if (IS_ERR(ct)) {
		/* Too stressed to deal. */
685
		NF_CT_STAT_INC_ATOMIC(drop);
686 687 688
		return NF_DROP;
	}

689
	NF_CT_ASSERT(skb->nfct);
690

691
	ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
692 693 694
	if (ret < 0) {
		/* Invalid: inverse of the return code tells
		 * the netfilter core what to do */
695
		pr_debug("nf_conntrack_in: Can't track with proto module\n");
696 697
		nf_conntrack_put(skb->nfct);
		skb->nfct = NULL;
698
		NF_CT_STAT_INC_ATOMIC(invalid);
699 700 701 702
		return -ret;
	}

	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
703
		nf_conntrack_event_cache(IPCT_STATUS, skb);
704 705 706

	return ret;
}
707
EXPORT_SYMBOL_GPL(nf_conntrack_in);
708 709 710 711

int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
			 const struct nf_conntrack_tuple *orig)
{
712 713 714 715 716 717 718 719 720
	int ret;

	rcu_read_lock();
	ret = nf_ct_invert_tuple(inverse, orig,
				 __nf_ct_l3proto_find(orig->src.l3num),
				 __nf_ct_l4proto_find(orig->src.l3num,
						      orig->dst.protonum));
	rcu_read_unlock();
	return ret;
721
}
722
EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
723

724 725 726 727 728 729
/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
   implicitly racy: see __nf_conntrack_confirm */
void nf_conntrack_alter_reply(struct nf_conn *ct,
			      const struct nf_conntrack_tuple *newreply)
{
	struct nf_conn_help *help = nfct_help(ct);
730
	struct nf_conntrack_helper *helper;
731 732 733 734 735

	write_lock_bh(&nf_conntrack_lock);
	/* Should be unconfirmed, so not in hash table yet */
	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));

736
	pr_debug("Altering reply tuple of %p to ", ct);
737 738 739
	NF_CT_DUMP_TUPLE(newreply);

	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
740 741 742 743 744 745 746 747
	if (ct->master || (help && help->expecting != 0))
		goto out;

	helper = __nf_ct_helper_find(newreply);
	if (helper == NULL) {
		if (help)
			rcu_assign_pointer(help->helper, NULL);
		goto out;
748
	}
749 750

	if (help == NULL) {
751 752
		help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
		if (help == NULL)
753 754 755 756 757 758 759
			goto out;
	} else {
		memset(&help->help, 0, sizeof(help->help));
	}

	rcu_assign_pointer(help->helper, helper);
out:
760 761
	write_unlock_bh(&nf_conntrack_lock);
}
762
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777
/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
void __nf_ct_refresh_acct(struct nf_conn *ct,
			  enum ip_conntrack_info ctinfo,
			  const struct sk_buff *skb,
			  unsigned long extra_jiffies,
			  int do_acct)
{
	int event = 0;

	NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
	NF_CT_ASSERT(skb);

	write_lock_bh(&nf_conntrack_lock);

778 779 780 781 782 783
	/* Only update if this is not a fixed timeout */
	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
		write_unlock_bh(&nf_conntrack_lock);
		return;
	}

784 785 786 787 788
	/* If not in hash table, timer will not be active yet */
	if (!nf_ct_is_confirmed(ct)) {
		ct->timeout.expires = extra_jiffies;
		event = IPCT_REFRESH;
	} else {
789 790 791 792 793 794 795 796
		unsigned long newtime = jiffies + extra_jiffies;

		/* Only update the timeout if the new timeout is at least
		   HZ jiffies from the old timeout. Need del_timer for race
		   avoidance (may already be dying). */
		if (newtime - ct->timeout.expires >= HZ
		    && del_timer(&ct->timeout)) {
			ct->timeout.expires = newtime;
797 798 799 800 801 802 803 804 805
			add_timer(&ct->timeout);
			event = IPCT_REFRESH;
		}
	}

#ifdef CONFIG_NF_CT_ACCT
	if (do_acct) {
		ct->counters[CTINFO2DIR(ctinfo)].packets++;
		ct->counters[CTINFO2DIR(ctinfo)].bytes +=
806
			skb->len - skb_network_offset(skb);
807 808 809 810

		if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
		    || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
			event |= IPCT_COUNTER_FILLING;
811 812 813 814 815 816 817 818 819
	}
#endif

	write_unlock_bh(&nf_conntrack_lock);

	/* must be unlocked when calling event cache */
	if (event)
		nf_conntrack_event_cache(event, skb);
}
820
EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
821

822
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
823 824 825

#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
I
Ingo Molnar 已提交
826 827
#include <linux/mutex.h>

828 829 830
/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
 * in ip_conntrack_core, since we don't want the protocols to autoload
 * or depend on ctnetlink */
831
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
832 833
			       const struct nf_conntrack_tuple *tuple)
{
834
	NLA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
835
		&tuple->src.u.tcp.port);
836
	NLA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
837 838 839
		&tuple->dst.u.tcp.port);
	return 0;

840
nla_put_failure:
841 842
	return -1;
}
843
EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
844

845 846 847
const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
848
};
849
EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
850

851
int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
852 853
			       struct nf_conntrack_tuple *t)
{
854
	if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
855 856
		return -EINVAL;

857 858
	t->src.u.tcp.port = *(__be16 *)nla_data(tb[CTA_PROTO_SRC_PORT]);
	t->dst.u.tcp.port = *(__be16 *)nla_data(tb[CTA_PROTO_DST_PORT]);
859 860 861

	return 0;
}
862
EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
863 864
#endif

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
/* Used by ipt_REJECT and ip6t_REJECT. */
void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;

	/* This ICMP is in reverse direction to the packet which caused it */
	ct = nf_ct_get(skb, &ctinfo);
	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
		ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
	else
		ctinfo = IP_CT_RELATED;

	/* Attach to new skbuff, and increment count */
	nskb->nfct = &ct->ct_general;
	nskb->nfctinfo = ctinfo;
	nf_conntrack_get(nskb->nfct);
}
883
EXPORT_SYMBOL_GPL(__nf_conntrack_attach);
884 885 886 887 888 889 890 891 892 893

static inline int
do_iter(const struct nf_conntrack_tuple_hash *i,
	int (*iter)(struct nf_conn *i, void *data),
	void *data)
{
	return iter(nf_ct_tuplehash_to_ctrack(i), data);
}

/* Bring out ya dead! */
P
Patrick McHardy 已提交
894
static struct nf_conn *
895 896 897
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
		void *data, unsigned int *bucket)
{
P
Patrick McHardy 已提交
898 899
	struct nf_conntrack_tuple_hash *h;
	struct nf_conn *ct;
900
	struct hlist_node *n;
901 902 903

	write_lock_bh(&nf_conntrack_lock);
	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
904
		hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
P
Patrick McHardy 已提交
905 906 907 908
			ct = nf_ct_tuplehash_to_ctrack(h);
			if (iter(ct, data))
				goto found;
		}
909
	}
910
	hlist_for_each_entry(h, n, &unconfirmed, hnode) {
P
Patrick McHardy 已提交
911 912
		ct = nf_ct_tuplehash_to_ctrack(h);
		if (iter(ct, data))
913
			set_bit(IPS_DYING_BIT, &ct->status);
P
Patrick McHardy 已提交
914
	}
915
	write_unlock_bh(&nf_conntrack_lock);
P
Patrick McHardy 已提交
916 917
	return NULL;
found:
918
	atomic_inc(&ct->ct_general.use);
919
	write_unlock_bh(&nf_conntrack_lock);
P
Patrick McHardy 已提交
920
	return ct;
921 922 923 924 925
}

void
nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
{
P
Patrick McHardy 已提交
926
	struct nf_conn *ct;
927 928
	unsigned int bucket = 0;

P
Patrick McHardy 已提交
929
	while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
930 931 932 933 934 935 936 937
		/* Time to push up daises... */
		if (del_timer(&ct->timeout))
			death_by_timeout((unsigned long)ct);
		/* ... else the timer will get him soon. */

		nf_ct_put(ct);
	}
}
938
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
939 940 941 942 943 944

static int kill_all(struct nf_conn *i, void *data)
{
	return 1;
}

945
void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, int size)
946 947 948 949
{
	if (vmalloced)
		vfree(hash);
	else
950
		free_pages((unsigned long)hash,
951
			   get_order(sizeof(struct hlist_head) * size));
952
}
953
EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
954

955
void nf_conntrack_flush(void)
956 957 958
{
	nf_ct_iterate_cleanup(kill_all, NULL);
}
959
EXPORT_SYMBOL_GPL(nf_conntrack_flush);
960

961 962 963 964
/* Mishearing the voices in his head, our hero wonders how he's
   supposed to kill the mall. */
void nf_conntrack_cleanup(void)
{
965
	rcu_assign_pointer(ip_ct_attach, NULL);
966

967 968 969 970 971 972 973
	/* This makes sure all current packets have passed through
	   netfilter framework.  Roll on, two-stage module
	   delete... */
	synchronize_net();

	nf_ct_event_cache_flush();
 i_see_dead_people:
974
	nf_conntrack_flush();
975 976 977 978
	if (atomic_read(&nf_conntrack_count) != 0) {
		schedule();
		goto i_see_dead_people;
	}
979 980 981
	/* wait until all references to nf_conntrack_untracked are dropped */
	while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
		schedule();
982

983 984
	rcu_assign_pointer(nf_ct_destroy, NULL);

985
	kmem_cache_destroy(nf_conntrack_cachep);
986 987
	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
			     nf_conntrack_htable_size);
988

989
	nf_conntrack_proto_fini();
990
	nf_conntrack_helper_fini();
991
	nf_conntrack_expect_fini();
992 993
}

994
struct hlist_head *nf_ct_alloc_hashtable(int *sizep, int *vmalloced)
995
{
996
	struct hlist_head *hash;
997
	unsigned int size, i;
998

999
	*vmalloced = 0;
1000

1001
	size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1002
	hash = (void*)__get_free_pages(GFP_KERNEL,
1003
				       get_order(sizeof(struct hlist_head)
1004
						 * size));
1005
	if (!hash) {
1006 1007
		*vmalloced = 1;
		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1008
		hash = vmalloc(sizeof(struct hlist_head) * size);
1009 1010 1011
	}

	if (hash)
1012
		for (i = 0; i < size; i++)
1013
			INIT_HLIST_HEAD(&hash[i]);
1014 1015 1016

	return hash;
}
1017
EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1018 1019 1020 1021 1022 1023

int set_hashsize(const char *val, struct kernel_param *kp)
{
	int i, bucket, hashsize, vmalloced;
	int old_vmalloced, old_size;
	int rnd;
1024
	struct hlist_head *hash, *old_hash;
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	struct nf_conntrack_tuple_hash *h;

	/* On boot, we can set this without any fancy locking. */
	if (!nf_conntrack_htable_size)
		return param_set_uint(val, kp);

	hashsize = simple_strtol(val, NULL, 0);
	if (!hashsize)
		return -EINVAL;

1035
	hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1036 1037 1038 1039 1040 1041 1042 1043 1044
	if (!hash)
		return -ENOMEM;

	/* We have to rehahs for the new table anyway, so we also can
	 * use a newrandom seed */
	get_random_bytes(&rnd, 4);

	write_lock_bh(&nf_conntrack_lock);
	for (i = 0; i < nf_conntrack_htable_size; i++) {
1045 1046 1047 1048
		while (!hlist_empty(&nf_conntrack_hash[i])) {
			h = hlist_entry(nf_conntrack_hash[i].first,
					struct nf_conntrack_tuple_hash, hnode);
			hlist_del(&h->hnode);
1049
			bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1050
			hlist_add_head(&h->hnode, &hash[bucket]);
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
		}
	}
	old_size = nf_conntrack_htable_size;
	old_vmalloced = nf_conntrack_vmalloc;
	old_hash = nf_conntrack_hash;

	nf_conntrack_htable_size = hashsize;
	nf_conntrack_vmalloc = vmalloced;
	nf_conntrack_hash = hash;
	nf_conntrack_hash_rnd = rnd;
	write_unlock_bh(&nf_conntrack_lock);

1063
	nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1064 1065 1066 1067 1068 1069 1070 1071
	return 0;
}

module_param_call(hashsize, set_hashsize, param_get_uint,
		  &nf_conntrack_htable_size, 0600);

int __init nf_conntrack_init(void)
{
1072
	int max_factor = 8;
1073 1074 1075
	int ret;

	/* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1076
	 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1077 1078 1079
	if (!nf_conntrack_htable_size) {
		nf_conntrack_htable_size
			= (((num_physpages << PAGE_SHIFT) / 16384)
1080
			   / sizeof(struct hlist_head));
1081
		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1082 1083 1084 1085 1086 1087 1088 1089 1090
			nf_conntrack_htable_size = 16384;
		if (nf_conntrack_htable_size < 32)
			nf_conntrack_htable_size = 32;

		/* Use a max. factor of four by default to get the same max as
		 * with the old struct list_heads. When a table size is given
		 * we use the old value of 8 to avoid reducing the max.
		 * entries. */
		max_factor = 4;
1091
	}
1092 1093
	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
						  &nf_conntrack_vmalloc);
1094 1095 1096 1097 1098
	if (!nf_conntrack_hash) {
		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
		goto err_out;
	}

1099
	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1100 1101 1102 1103 1104

	printk("nf_conntrack version %s (%u buckets, %d max)\n",
	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
	       nf_conntrack_max);

1105 1106
	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
						sizeof(struct nf_conn),
1107
						0, 0, NULL);
1108
	if (!nf_conntrack_cachep) {
1109 1110 1111 1112
		printk(KERN_ERR "Unable to create nf_conn slab cache\n");
		goto err_free_hash;
	}

1113 1114
	ret = nf_conntrack_proto_init();
	if (ret < 0)
1115 1116
		goto err_free_conntrack_slab;

1117
	ret = nf_conntrack_expect_init();
1118
	if (ret < 0)
1119
		goto out_fini_proto;
1120

1121 1122
	ret = nf_conntrack_helper_init();
	if (ret < 0)
1123
		goto out_fini_expect;
1124

1125
	/* For use by REJECT target */
1126
	rcu_assign_pointer(ip_ct_attach, __nf_conntrack_attach);
1127
	rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1128

1129 1130 1131 1132 1133 1134 1135 1136
	/* Set up fake conntrack:
	    - to never be deleted, not in any hashes */
	atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
	/*  - and look it like as a confirmed connection */
	set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);

	return ret;

1137 1138
out_fini_expect:
	nf_conntrack_expect_fini();
1139 1140
out_fini_proto:
	nf_conntrack_proto_fini();
1141
err_free_conntrack_slab:
1142
	kmem_cache_destroy(nf_conntrack_cachep);
1143
err_free_hash:
1144 1145
	nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
			     nf_conntrack_htable_size);
1146 1147 1148
err_out:
	return -ENOMEM;
}