x_tables.c 31.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * x_tables core - Backend for {ip,ip6,arp}_tables
 *
 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
 *
 * Based on existing ip_tables code which is
 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
15
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 17 18 19 20 21 22
#include <linux/kernel.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
I
Ingo Molnar 已提交
23
#include <linux/mutex.h>
A
Al Viro 已提交
24
#include <linux/mm.h>
25
#include <linux/slab.h>
26
#include <net/net_namespace.h>
27 28 29

#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
30 31 32
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
I
Ingo Molnar 已提交
33

34 35
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
36
MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37 38 39

#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))

40 41 42
struct compat_delta {
	struct compat_delta *next;
	unsigned int offset;
43
	int delta;
44 45
};

46
struct xt_af {
I
Ingo Molnar 已提交
47
	struct mutex mutex;
48 49
	struct list_head match;
	struct list_head target;
50
#ifdef CONFIG_COMPAT
51
	struct mutex compat_mutex;
52 53
	struct compat_delta *compat_offsets;
#endif
54 55 56 57
};

static struct xt_af *xt;

58 59 60 61 62 63
static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
	[NFPROTO_UNSPEC] = "x",
	[NFPROTO_IPV4]   = "ip",
	[NFPROTO_ARP]    = "arp",
	[NFPROTO_BRIDGE] = "eb",
	[NFPROTO_IPV6]   = "ip6",
64 65
};

66 67 68
/* Allow this many total (re)entries. */
static const unsigned int xt_jumpstack_multiplier = 2;

69 70
/* Registration hooks for targets. */
int
71
xt_register_target(struct xt_target *target)
72
{
73 74
	u_int8_t af = target->family;
	int ret;
75

I
Ingo Molnar 已提交
76
	ret = mutex_lock_interruptible(&xt[af].mutex);
77 78 79
	if (ret != 0)
		return ret;
	list_add(&target->list, &xt[af].target);
I
Ingo Molnar 已提交
80
	mutex_unlock(&xt[af].mutex);
81 82 83 84 85
	return ret;
}
EXPORT_SYMBOL(xt_register_target);

void
86
xt_unregister_target(struct xt_target *target)
87
{
88
	u_int8_t af = target->family;
89

I
Ingo Molnar 已提交
90
	mutex_lock(&xt[af].mutex);
P
Patrick McHardy 已提交
91
	list_del(&target->list);
I
Ingo Molnar 已提交
92
	mutex_unlock(&xt[af].mutex);
93 94 95
}
EXPORT_SYMBOL(xt_unregister_target);

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
int
xt_register_targets(struct xt_target *target, unsigned int n)
{
	unsigned int i;
	int err = 0;

	for (i = 0; i < n; i++) {
		err = xt_register_target(&target[i]);
		if (err)
			goto err;
	}
	return err;

err:
	if (i > 0)
		xt_unregister_targets(target, i);
	return err;
}
EXPORT_SYMBOL(xt_register_targets);

void
xt_unregister_targets(struct xt_target *target, unsigned int n)
{
	unsigned int i;

	for (i = 0; i < n; i++)
		xt_unregister_target(&target[i]);
}
EXPORT_SYMBOL(xt_unregister_targets);

126
int
127
xt_register_match(struct xt_match *match)
128
{
129 130
	u_int8_t af = match->family;
	int ret;
131

I
Ingo Molnar 已提交
132
	ret = mutex_lock_interruptible(&xt[af].mutex);
133 134 135 136
	if (ret != 0)
		return ret;

	list_add(&match->list, &xt[af].match);
I
Ingo Molnar 已提交
137
	mutex_unlock(&xt[af].mutex);
138 139 140 141 142 143

	return ret;
}
EXPORT_SYMBOL(xt_register_match);

void
144
xt_unregister_match(struct xt_match *match)
145
{
146
	u_int8_t af = match->family;
147

I
Ingo Molnar 已提交
148
	mutex_lock(&xt[af].mutex);
P
Patrick McHardy 已提交
149
	list_del(&match->list);
I
Ingo Molnar 已提交
150
	mutex_unlock(&xt[af].mutex);
151 152 153
}
EXPORT_SYMBOL(xt_unregister_match);

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
int
xt_register_matches(struct xt_match *match, unsigned int n)
{
	unsigned int i;
	int err = 0;

	for (i = 0; i < n; i++) {
		err = xt_register_match(&match[i]);
		if (err)
			goto err;
	}
	return err;

err:
	if (i > 0)
		xt_unregister_matches(match, i);
	return err;
}
EXPORT_SYMBOL(xt_register_matches);

void
xt_unregister_matches(struct xt_match *match, unsigned int n)
{
	unsigned int i;

	for (i = 0; i < n; i++)
		xt_unregister_match(&match[i]);
}
EXPORT_SYMBOL(xt_unregister_matches);

184 185 186 187 188 189 190 191

/*
 * These are weird, but module loading must not be done with mutex
 * held (since they will register), and we have to have a single
 * function to use try_then_request_module().
 */

/* Find match, grabs ref.  Returns ERR_PTR() on error. */
192
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
193 194 195 196
{
	struct xt_match *m;
	int err = 0;

I
Ingo Molnar 已提交
197
	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
198 199 200 201 202 203
		return ERR_PTR(-EINTR);

	list_for_each_entry(m, &xt[af].match, list) {
		if (strcmp(m->name, name) == 0) {
			if (m->revision == revision) {
				if (try_module_get(m->me)) {
I
Ingo Molnar 已提交
204
					mutex_unlock(&xt[af].mutex);
205 206 207 208 209 210
					return m;
				}
			} else
				err = -EPROTOTYPE; /* Found something. */
		}
	}
I
Ingo Molnar 已提交
211
	mutex_unlock(&xt[af].mutex);
212 213 214 215 216

	if (af != NFPROTO_UNSPEC)
		/* Try searching again in the family-independent list */
		return xt_find_match(NFPROTO_UNSPEC, name, revision);

217 218 219 220
	return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_match);

221 222 223 224 225 226 227 228 229 230 231
struct xt_match *
xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{
	struct xt_match *match;

	match = try_then_request_module(xt_find_match(nfproto, name, revision),
					"%st_%s", xt_prefix[nfproto], name);
	return (match != NULL) ? match : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL_GPL(xt_request_find_match);

232
/* Find target, grabs ref.  Returns ERR_PTR() on error. */
233
struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
234 235 236 237
{
	struct xt_target *t;
	int err = 0;

I
Ingo Molnar 已提交
238
	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
239 240 241 242 243 244
		return ERR_PTR(-EINTR);

	list_for_each_entry(t, &xt[af].target, list) {
		if (strcmp(t->name, name) == 0) {
			if (t->revision == revision) {
				if (try_module_get(t->me)) {
I
Ingo Molnar 已提交
245
					mutex_unlock(&xt[af].mutex);
246 247 248 249 250 251
					return t;
				}
			} else
				err = -EPROTOTYPE; /* Found something. */
		}
	}
I
Ingo Molnar 已提交
252
	mutex_unlock(&xt[af].mutex);
253 254 255 256 257

	if (af != NFPROTO_UNSPEC)
		/* Try searching again in the family-independent list */
		return xt_find_target(NFPROTO_UNSPEC, name, revision);

258 259 260 261
	return ERR_PTR(err);
}
EXPORT_SYMBOL(xt_find_target);

262
struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
263 264 265 266
{
	struct xt_target *target;

	target = try_then_request_module(xt_find_target(af, name, revision),
267
					 "%st_%s", xt_prefix[af], name);
268
	return (target != NULL) ? target : ERR_PTR(-ENOENT);
269 270 271
}
EXPORT_SYMBOL_GPL(xt_request_find_target);

272
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
273
{
274
	const struct xt_match *m;
275 276 277 278 279 280 281 282 283 284
	int have_rev = 0;

	list_for_each_entry(m, &xt[af].match, list) {
		if (strcmp(m->name, name) == 0) {
			if (m->revision > *bestp)
				*bestp = m->revision;
			if (m->revision == revision)
				have_rev = 1;
		}
	}
285 286 287 288

	if (af != NFPROTO_UNSPEC && !have_rev)
		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);

289 290 291
	return have_rev;
}

292
static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
293
{
294
	const struct xt_target *t;
295 296 297 298 299 300 301 302 303 304
	int have_rev = 0;

	list_for_each_entry(t, &xt[af].target, list) {
		if (strcmp(t->name, name) == 0) {
			if (t->revision > *bestp)
				*bestp = t->revision;
			if (t->revision == revision)
				have_rev = 1;
		}
	}
305 306 307 308

	if (af != NFPROTO_UNSPEC && !have_rev)
		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);

309 310 311 312
	return have_rev;
}

/* Returns true or false (if no such extension at all) */
313
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
314 315 316 317
		     int *err)
{
	int have_rev, best = -1;

I
Ingo Molnar 已提交
318
	if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
319 320 321 322 323 324 325
		*err = -EINTR;
		return 1;
	}
	if (target == 1)
		have_rev = target_revfn(af, name, revision, &best);
	else
		have_rev = match_revfn(af, name, revision, &best);
I
Ingo Molnar 已提交
326
	mutex_unlock(&xt[af].mutex);
327 328 329 330 331 332 333 334 335 336 337 338 339 340

	/* Nothing at all?  Return 0 to try loading module. */
	if (best == -1) {
		*err = -ENOENT;
		return 0;
	}

	*err = best;
	if (!have_rev)
		*err = -EPROTONOSUPPORT;
	return 1;
}
EXPORT_SYMBOL_GPL(xt_find_revision);

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
static char *textify_hooks(char *buf, size_t size, unsigned int mask)
{
	static const char *const names[] = {
		"PREROUTING", "INPUT", "FORWARD",
		"OUTPUT", "POSTROUTING", "BROUTING",
	};
	unsigned int i;
	char *p = buf;
	bool np = false;
	int res;

	*p = '\0';
	for (i = 0; i < ARRAY_SIZE(names); ++i) {
		if (!(mask & (1 << i)))
			continue;
		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
		if (res > 0) {
			size -= res;
			p += res;
		}
		np = true;
	}

	return buf;
}

367
int xt_check_match(struct xt_mtchk_param *par,
368
		   unsigned int size, u_int8_t proto, bool inv_proto)
369
{
370 371
	int ret;

372 373
	if (XT_ALIGN(par->match->matchsize) != size &&
	    par->match->matchsize != -1) {
374 375 376 377
		/*
		 * ebt_among is exempt from centralized matchsize checking
		 * because it uses a dynamic-size data set.
		 */
378 379
		pr_err("%s_tables: %s.%u match: invalid size "
		       "%u (kernel) != (user) %u\n",
380
		       xt_prefix[par->family], par->match->name,
381
		       par->match->revision,
382
		       XT_ALIGN(par->match->matchsize), size);
383 384
		return -EINVAL;
	}
385 386
	if (par->match->table != NULL &&
	    strcmp(par->match->table, par->table) != 0) {
J
Joe Perches 已提交
387
		pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
388
		       xt_prefix[par->family], par->match->name,
389
		       par->match->table, par->table);
390 391
		return -EINVAL;
	}
392
	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
393 394
		char used[64], allow[64];

J
Joe Perches 已提交
395
		pr_err("%s_tables: %s match: used from hooks %s, but only "
396
		       "valid from %s\n",
397
		       xt_prefix[par->family], par->match->name,
398 399
		       textify_hooks(used, sizeof(used), par->hook_mask),
		       textify_hooks(allow, sizeof(allow), par->match->hooks));
400 401
		return -EINVAL;
	}
402
	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
J
Joe Perches 已提交
403
		pr_err("%s_tables: %s match: only valid for protocol %u\n",
404 405
		       xt_prefix[par->family], par->match->name,
		       par->match->proto);
406 407
		return -EINVAL;
	}
408 409 410 411 412 413 414 415
	if (par->match->checkentry != NULL) {
		ret = par->match->checkentry(par);
		if (ret < 0)
			return ret;
		else if (ret > 0)
			/* Flag up potential errors. */
			return -EIO;
	}
416 417 418 419
	return 0;
}
EXPORT_SYMBOL_GPL(xt_check_match);

420
#ifdef CONFIG_COMPAT
421
int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
{
	struct compat_delta *tmp;

	tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
	if (!tmp)
		return -ENOMEM;

	tmp->offset = offset;
	tmp->delta = delta;

	if (xt[af].compat_offsets) {
		tmp->next = xt[af].compat_offsets->next;
		xt[af].compat_offsets->next = tmp;
	} else {
		xt[af].compat_offsets = tmp;
		tmp->next = NULL;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);

443
void xt_compat_flush_offsets(u_int8_t af)
444 445 446 447 448 449 450 451 452 453 454 455 456
{
	struct compat_delta *tmp, *next;

	if (xt[af].compat_offsets) {
		for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
			next = tmp->next;
			kfree(tmp);
		}
		xt[af].compat_offsets = NULL;
	}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);

457
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
458 459
{
	struct compat_delta *tmp;
460
	int delta;
461 462 463 464 465 466 467 468

	for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
		if (tmp->offset < offset)
			delta += tmp->delta;
	return delta;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);

469
int xt_compat_match_offset(const struct xt_match *match)
470
{
471 472 473 474 475
	u_int16_t csize = match->compatsize ? : match->matchsize;
	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_match_offset);

476
int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
477
			      unsigned int *size)
478
{
479
	const struct xt_match *match = m->u.kernel.match;
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
	int pad, off = xt_compat_match_offset(match);
	u_int16_t msize = cm->u.user.match_size;

	m = *dstptr;
	memcpy(m, cm, sizeof(*cm));
	if (match->compat_from_user)
		match->compat_from_user(m->data, cm->data);
	else
		memcpy(m->data, cm->data, msize - sizeof(*cm));
	pad = XT_ALIGN(match->matchsize) - match->matchsize;
	if (pad > 0)
		memset(m->data + match->matchsize, 0, pad);

	msize += off;
	m->u.user.match_size = msize;

	*size += off;
	*dstptr += msize;
499
	return 0;
500 501 502
}
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);

503 504
int xt_compat_match_to_user(const struct xt_entry_match *m,
			    void __user **dstptr, unsigned int *size)
505
{
506
	const struct xt_match *match = m->u.kernel.match;
507 508 509 510 511
	struct compat_xt_entry_match __user *cm = *dstptr;
	int off = xt_compat_match_offset(match);
	u_int16_t msize = m->u.user.match_size - off;

	if (copy_to_user(cm, m, sizeof(*cm)) ||
512 513 514
	    put_user(msize, &cm->u.user.match_size) ||
	    copy_to_user(cm->u.user.name, m->u.kernel.match->name,
			 strlen(m->u.kernel.match->name) + 1))
515
		return -EFAULT;
516 517 518 519 520 521 522

	if (match->compat_to_user) {
		if (match->compat_to_user((void __user *)cm->data, m->data))
			return -EFAULT;
	} else {
		if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
			return -EFAULT;
523
	}
524 525 526 527

	*size -= off;
	*dstptr += msize;
	return 0;
528
}
529 530
EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
#endif /* CONFIG_COMPAT */
531

532
int xt_check_target(struct xt_tgchk_param *par,
533
		    unsigned int size, u_int8_t proto, bool inv_proto)
534
{
535 536
	int ret;

537
	if (XT_ALIGN(par->target->targetsize) != size) {
538 539
		pr_err("%s_tables: %s.%u target: invalid size "
		       "%u (kernel) != (user) %u\n",
540
		       xt_prefix[par->family], par->target->name,
541
		       par->target->revision,
542
		       XT_ALIGN(par->target->targetsize), size);
543 544
		return -EINVAL;
	}
545 546
	if (par->target->table != NULL &&
	    strcmp(par->target->table, par->table) != 0) {
J
Joe Perches 已提交
547
		pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
548
		       xt_prefix[par->family], par->target->name,
549
		       par->target->table, par->table);
550 551
		return -EINVAL;
	}
552
	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
553 554
		char used[64], allow[64];

J
Joe Perches 已提交
555
		pr_err("%s_tables: %s target: used from hooks %s, but only "
556
		       "usable from %s\n",
557
		       xt_prefix[par->family], par->target->name,
558 559
		       textify_hooks(used, sizeof(used), par->hook_mask),
		       textify_hooks(allow, sizeof(allow), par->target->hooks));
560 561
		return -EINVAL;
	}
562
	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
J
Joe Perches 已提交
563
		pr_err("%s_tables: %s target: only valid for protocol %u\n",
564
		       xt_prefix[par->family], par->target->name,
565
		       par->target->proto);
566 567
		return -EINVAL;
	}
568 569 570 571 572 573 574 575
	if (par->target->checkentry != NULL) {
		ret = par->target->checkentry(par);
		if (ret < 0)
			return ret;
		else if (ret > 0)
			/* Flag up potential errors. */
			return -EIO;
	}
576 577 578 579
	return 0;
}
EXPORT_SYMBOL_GPL(xt_check_target);

580
#ifdef CONFIG_COMPAT
581
int xt_compat_target_offset(const struct xt_target *target)
582
{
583 584 585 586 587 588
	u_int16_t csize = target->compatsize ? : target->targetsize;
	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
}
EXPORT_SYMBOL_GPL(xt_compat_target_offset);

void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
589
				unsigned int *size)
590
{
591
	const struct xt_target *target = t->u.kernel.target;
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
	int pad, off = xt_compat_target_offset(target);
	u_int16_t tsize = ct->u.user.target_size;

	t = *dstptr;
	memcpy(t, ct, sizeof(*ct));
	if (target->compat_from_user)
		target->compat_from_user(t->data, ct->data);
	else
		memcpy(t->data, ct->data, tsize - sizeof(*ct));
	pad = XT_ALIGN(target->targetsize) - target->targetsize;
	if (pad > 0)
		memset(t->data + target->targetsize, 0, pad);

	tsize += off;
	t->u.user.target_size = tsize;

	*size += off;
	*dstptr += tsize;
}
EXPORT_SYMBOL_GPL(xt_compat_target_from_user);

614 615
int xt_compat_target_to_user(const struct xt_entry_target *t,
			     void __user **dstptr, unsigned int *size)
616
{
617
	const struct xt_target *target = t->u.kernel.target;
618 619 620 621 622
	struct compat_xt_entry_target __user *ct = *dstptr;
	int off = xt_compat_target_offset(target);
	u_int16_t tsize = t->u.user.target_size - off;

	if (copy_to_user(ct, t, sizeof(*ct)) ||
623 624 625
	    put_user(tsize, &ct->u.user.target_size) ||
	    copy_to_user(ct->u.user.name, t->u.kernel.target->name,
			 strlen(t->u.kernel.target->name) + 1))
626
		return -EFAULT;
627 628 629 630 631 632 633

	if (target->compat_to_user) {
		if (target->compat_to_user((void __user *)ct->data, t->data))
			return -EFAULT;
	} else {
		if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
			return -EFAULT;
634
	}
635 636 637 638

	*size -= off;
	*dstptr += tsize;
	return 0;
639
}
640
EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
641 642
#endif

643 644 645 646 647 648
struct xt_table_info *xt_alloc_table_info(unsigned int size)
{
	struct xt_table_info *newinfo;
	int cpu;

	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
649
	if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
650 651
		return NULL;

652
	newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
653 654 655 656 657
	if (!newinfo)
		return NULL;

	newinfo->size = size;

658
	for_each_possible_cpu(cpu) {
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
		if (size <= PAGE_SIZE)
			newinfo->entries[cpu] = kmalloc_node(size,
							GFP_KERNEL,
							cpu_to_node(cpu));
		else
			newinfo->entries[cpu] = vmalloc_node(size,
							cpu_to_node(cpu));

		if (newinfo->entries[cpu] == NULL) {
			xt_free_table_info(newinfo);
			return NULL;
		}
	}

	return newinfo;
}
EXPORT_SYMBOL(xt_alloc_table_info);

void xt_free_table_info(struct xt_table_info *info)
{
	int cpu;

681
	for_each_possible_cpu(cpu) {
682 683 684 685 686
		if (info->size <= PAGE_SIZE)
			kfree(info->entries[cpu]);
		else
			vfree(info->entries[cpu]);
	}
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701

	if (info->jumpstack != NULL) {
		if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
			for_each_possible_cpu(cpu)
				vfree(info->jumpstack[cpu]);
		} else {
			for_each_possible_cpu(cpu)
				kfree(info->jumpstack[cpu]);
		}
	}

	if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
		vfree(info->jumpstack);
	else
		kfree(info->jumpstack);
702 703

	free_percpu(info->stackptr);
704

705 706 707 708 709
	kfree(info);
}
EXPORT_SYMBOL(xt_free_table_info);

/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
710 711
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
				    const char *name)
712 713 714
{
	struct xt_table *t;

I
Ingo Molnar 已提交
715
	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
716 717
		return ERR_PTR(-EINTR);

718
	list_for_each_entry(t, &net->xt.tables[af], list)
719 720
		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
			return t;
I
Ingo Molnar 已提交
721
	mutex_unlock(&xt[af].mutex);
722 723 724 725 726 727
	return NULL;
}
EXPORT_SYMBOL_GPL(xt_find_table_lock);

void xt_table_unlock(struct xt_table *table)
{
I
Ingo Molnar 已提交
728
	mutex_unlock(&xt[table->af].mutex);
729 730 731
}
EXPORT_SYMBOL_GPL(xt_table_unlock);

732
#ifdef CONFIG_COMPAT
733
void xt_compat_lock(u_int8_t af)
734 735 736 737 738
{
	mutex_lock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_lock);

739
void xt_compat_unlock(u_int8_t af)
740 741 742 743 744
{
	mutex_unlock(&xt[af].compat_mutex);
}
EXPORT_SYMBOL_GPL(xt_compat_unlock);
#endif
745

746 747 748
DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);

749 750 751 752 753
static int xt_jumpstack_alloc(struct xt_table_info *i)
{
	unsigned int size;
	int cpu;

754
	i->stackptr = alloc_percpu(unsigned int);
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	if (i->stackptr == NULL)
		return -ENOMEM;

	size = sizeof(void **) * nr_cpu_ids;
	if (size > PAGE_SIZE)
		i->jumpstack = vmalloc(size);
	else
		i->jumpstack = kmalloc(size, GFP_KERNEL);
	if (i->jumpstack == NULL)
		return -ENOMEM;
	memset(i->jumpstack, 0, size);

	i->stacksize *= xt_jumpstack_multiplier;
	size = sizeof(void *) * i->stacksize;
	for_each_possible_cpu(cpu) {
		if (size > PAGE_SIZE)
			i->jumpstack[cpu] = vmalloc_node(size,
				cpu_to_node(cpu));
		else
			i->jumpstack[cpu] = kmalloc_node(size,
				GFP_KERNEL, cpu_to_node(cpu));
		if (i->jumpstack[cpu] == NULL)
			/*
			 * Freeing will be done later on by the callers. The
			 * chain is: xt_replace_table -> __do_replace ->
			 * do_replace -> xt_free_table_info.
			 */
			return -ENOMEM;
	}

	return 0;
}
787

788 789 790 791 792 793
struct xt_table_info *
xt_replace_table(struct xt_table *table,
	      unsigned int num_counters,
	      struct xt_table_info *newinfo,
	      int *error)
{
794
	struct xt_table_info *private;
795
	int ret;
796

797 798 799 800 801 802
	ret = xt_jumpstack_alloc(newinfo);
	if (ret < 0) {
		*error = ret;
		return NULL;
	}

803
	/* Do the substitution. */
804
	local_bh_disable();
805
	private = table->private;
806

807 808
	/* Check inside lock: is the old number correct? */
	if (num_counters != private->number) {
809
		pr_debug("num_counters != table->private->number (%u/%u)\n",
810
			 num_counters, private->number);
811
		local_bh_enable();
812 813 814 815
		*error = -EAGAIN;
		return NULL;
	}

816 817 818 819 820 821 822 823 824 825 826 827
	table->private = newinfo;
	newinfo->initial_entries = private->initial_entries;

	/*
	 * Even though table entries have now been swapped, other CPU's
	 * may still be using the old entries. This is okay, because
	 * resynchronization happens because of the locking done
	 * during the get_counters() routine.
	 */
	local_bh_enable();

	return private;
828 829 830
}
EXPORT_SYMBOL_GPL(xt_replace_table);

831 832
struct xt_table *xt_register_table(struct net *net,
				   const struct xt_table *input_table,
833 834
				   struct xt_table_info *bootstrap,
				   struct xt_table_info *newinfo)
835 836 837
{
	int ret;
	struct xt_table_info *private;
838
	struct xt_table *t, *table;
839

840
	/* Don't add one object to multiple lists. */
841
	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
842 843 844 845 846
	if (!table) {
		ret = -ENOMEM;
		goto out;
	}

I
Ingo Molnar 已提交
847
	ret = mutex_lock_interruptible(&xt[table->af].mutex);
848
	if (ret != 0)
849
		goto out_free;
850 851

	/* Don't autoload: we'd eat our tail... */
852
	list_for_each_entry(t, &net->xt.tables[table->af], list) {
P
Patrick McHardy 已提交
853 854 855 856
		if (strcmp(t->name, table->name) == 0) {
			ret = -EEXIST;
			goto unlock;
		}
857 858 859 860
	}

	/* Simplifies replace_table code. */
	table->private = bootstrap;
861

862 863 864 865
	if (!xt_replace_table(table, 0, newinfo, &ret))
		goto unlock;

	private = table->private;
866
	pr_debug("table->private->number = %u\n", private->number);
867 868 869 870

	/* save number of initial entries */
	private->initial_entries = private->number;

871
	list_add(&table->list, &net->xt.tables[table->af]);
872 873
	mutex_unlock(&xt[table->af].mutex);
	return table;
874 875

 unlock:
I
Ingo Molnar 已提交
876
	mutex_unlock(&xt[table->af].mutex);
877 878
out_free:
	kfree(table);
879 880
out:
	return ERR_PTR(ret);
881 882 883 884 885 886 887
}
EXPORT_SYMBOL_GPL(xt_register_table);

void *xt_unregister_table(struct xt_table *table)
{
	struct xt_table_info *private;

I
Ingo Molnar 已提交
888
	mutex_lock(&xt[table->af].mutex);
889
	private = table->private;
P
Patrick McHardy 已提交
890
	list_del(&table->list);
I
Ingo Molnar 已提交
891
	mutex_unlock(&xt[table->af].mutex);
892
	kfree(table);
893 894 895 896 897 898

	return private;
}
EXPORT_SYMBOL_GPL(xt_unregister_table);

#ifdef CONFIG_PROC_FS
899 900
struct xt_names_priv {
	struct seq_net_private p;
901
	u_int8_t af;
902
};
903
static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
904
{
905
	struct xt_names_priv *priv = seq->private;
906
	struct net *net = seq_file_net(seq);
907
	u_int8_t af = priv->af;
908

909
	mutex_lock(&xt[af].mutex);
910
	return seq_list_start(&net->xt.tables[af], *pos);
911
}
912

913 914
static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
915
	struct xt_names_priv *priv = seq->private;
916
	struct net *net = seq_file_net(seq);
917
	u_int8_t af = priv->af;
918

919
	return seq_list_next(v, &net->xt.tables[af], pos);
920 921
}

922
static void xt_table_seq_stop(struct seq_file *seq, void *v)
923
{
924
	struct xt_names_priv *priv = seq->private;
925
	u_int8_t af = priv->af;
926

927 928
	mutex_unlock(&xt[af].mutex);
}
929

930 931 932
static int xt_table_seq_show(struct seq_file *seq, void *v)
{
	struct xt_table *table = list_entry(v, struct xt_table, list);
933

934 935 936 937 938
	if (strlen(table->name))
		return seq_printf(seq, "%s\n", table->name);
	else
		return 0;
}
939

940 941 942 943 944 945 946 947 948 949
static const struct seq_operations xt_table_seq_ops = {
	.start	= xt_table_seq_start,
	.next	= xt_table_seq_next,
	.stop	= xt_table_seq_stop,
	.show	= xt_table_seq_show,
};

static int xt_table_open(struct inode *inode, struct file *file)
{
	int ret;
950
	struct xt_names_priv *priv;
951

952 953
	ret = seq_open_net(inode, file, &xt_table_seq_ops,
			   sizeof(struct xt_names_priv));
954
	if (!ret) {
955 956
		priv = ((struct seq_file *)file->private_data)->private;
		priv->af = (unsigned long)PDE(inode)->data;
957 958
	}
	return ret;
959 960
}

961 962 963 964 965
static const struct file_operations xt_table_ops = {
	.owner	 = THIS_MODULE,
	.open	 = xt_table_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
966
	.release = seq_release_net,
967 968
};

969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
/*
 * Traverse state for ip{,6}_{tables,matches} for helping crossing
 * the multi-AF mutexes.
 */
struct nf_mttg_trav {
	struct list_head *head, *curr;
	uint8_t class, nfproto;
};

enum {
	MTTG_TRAV_INIT,
	MTTG_TRAV_NFP_UNSPEC,
	MTTG_TRAV_NFP_SPEC,
	MTTG_TRAV_DONE,
};

static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
    bool is_target)
987
{
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	static const uint8_t next_class[] = {
		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
	};
	struct nf_mttg_trav *trav = seq->private;

	switch (trav->class) {
	case MTTG_TRAV_INIT:
		trav->class = MTTG_TRAV_NFP_UNSPEC;
		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
		trav->head = trav->curr = is_target ?
			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
 		break;
	case MTTG_TRAV_NFP_UNSPEC:
		trav->curr = trav->curr->next;
		if (trav->curr != trav->head)
			break;
		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
		mutex_lock(&xt[trav->nfproto].mutex);
		trav->head = trav->curr = is_target ?
			&xt[trav->nfproto].target : &xt[trav->nfproto].match;
		trav->class = next_class[trav->class];
		break;
	case MTTG_TRAV_NFP_SPEC:
		trav->curr = trav->curr->next;
		if (trav->curr != trav->head)
			break;
		/* fallthru, _stop will unlock */
	default:
		return NULL;
	}
1019

1020 1021 1022
	if (ppos != NULL)
		++*ppos;
	return trav;
1023
}
1024

1025 1026
static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
    bool is_target)
1027
{
1028 1029
	struct nf_mttg_trav *trav = seq->private;
	unsigned int j;
1030

1031 1032 1033 1034 1035
	trav->class = MTTG_TRAV_INIT;
	for (j = 0; j < *pos; ++j)
		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
			return NULL;
	return trav;
1036 1037
}

1038
static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1039
{
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	struct nf_mttg_trav *trav = seq->private;

	switch (trav->class) {
	case MTTG_TRAV_NFP_UNSPEC:
		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
		break;
	case MTTG_TRAV_NFP_SPEC:
		mutex_unlock(&xt[trav->nfproto].mutex);
		break;
	}
}
1051

1052 1053 1054
static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
{
	return xt_mttg_seq_start(seq, pos, false);
1055 1056
}

1057
static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1058
{
1059 1060
	return xt_mttg_seq_next(seq, v, ppos, false);
}
1061

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
static int xt_match_seq_show(struct seq_file *seq, void *v)
{
	const struct nf_mttg_trav *trav = seq->private;
	const struct xt_match *match;

	switch (trav->class) {
	case MTTG_TRAV_NFP_UNSPEC:
	case MTTG_TRAV_NFP_SPEC:
		if (trav->curr == trav->head)
			return 0;
		match = list_entry(trav->curr, struct xt_match, list);
		return (*match->name == '\0') ? 0 :
		       seq_printf(seq, "%s\n", match->name);
	}
	return 0;
1077 1078
}

1079 1080 1081
static const struct seq_operations xt_match_seq_ops = {
	.start	= xt_match_seq_start,
	.next	= xt_match_seq_next,
1082
	.stop	= xt_mttg_seq_stop,
1083
	.show	= xt_match_seq_show,
1084 1085
};

1086
static int xt_match_open(struct inode *inode, struct file *file)
1087
{
1088 1089
	struct seq_file *seq;
	struct nf_mttg_trav *trav;
1090 1091
	int ret;

1092 1093 1094
	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
	if (trav == NULL)
		return -ENOMEM;
1095

1096 1097 1098 1099
	ret = seq_open(file, &xt_match_seq_ops);
	if (ret < 0) {
		kfree(trav);
		return ret;
1100
	}
1101 1102 1103 1104 1105

	seq = file->private_data;
	seq->private = trav;
	trav->nfproto = (unsigned long)PDE(inode)->data;
	return 0;
1106 1107 1108 1109 1110 1111 1112
}

static const struct file_operations xt_match_ops = {
	.owner	 = THIS_MODULE,
	.open	 = xt_match_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
1113
	.release = seq_release_private,
1114
};
1115

1116 1117
static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
{
1118
	return xt_mttg_seq_start(seq, pos, true);
1119 1120
}

1121
static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1122
{
1123
	return xt_mttg_seq_next(seq, v, ppos, true);
1124 1125 1126 1127
}

static int xt_target_seq_show(struct seq_file *seq, void *v)
{
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	const struct nf_mttg_trav *trav = seq->private;
	const struct xt_target *target;

	switch (trav->class) {
	case MTTG_TRAV_NFP_UNSPEC:
	case MTTG_TRAV_NFP_SPEC:
		if (trav->curr == trav->head)
			return 0;
		target = list_entry(trav->curr, struct xt_target, list);
		return (*target->name == '\0') ? 0 :
		       seq_printf(seq, "%s\n", target->name);
	}
	return 0;
1141 1142 1143 1144 1145
}

static const struct seq_operations xt_target_seq_ops = {
	.start	= xt_target_seq_start,
	.next	= xt_target_seq_next,
1146
	.stop	= xt_mttg_seq_stop,
1147 1148 1149 1150 1151
	.show	= xt_target_seq_show,
};

static int xt_target_open(struct inode *inode, struct file *file)
{
1152 1153
	struct seq_file *seq;
	struct nf_mttg_trav *trav;
1154 1155
	int ret;

1156 1157 1158
	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
	if (trav == NULL)
		return -ENOMEM;
1159

1160 1161 1162 1163
	ret = seq_open(file, &xt_target_seq_ops);
	if (ret < 0) {
		kfree(trav);
		return ret;
1164
	}
1165 1166 1167 1168 1169

	seq = file->private_data;
	seq->private = trav;
	trav->nfproto = (unsigned long)PDE(inode)->data;
	return 0;
1170 1171
}

1172
static const struct file_operations xt_target_ops = {
1173
	.owner	 = THIS_MODULE,
1174
	.open	 = xt_target_open,
1175 1176
	.read	 = seq_read,
	.llseek	 = seq_lseek,
1177
	.release = seq_release_private,
1178 1179 1180 1181 1182 1183 1184 1185
};

#define FORMAT_TABLES	"_tables_names"
#define	FORMAT_MATCHES	"_tables_matches"
#define FORMAT_TARGETS 	"_tables_targets"

#endif /* CONFIG_PROC_FS */

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
/**
 * xt_hook_link - set up hooks for a new table
 * @table:	table with metadata needed to set up hooks
 * @fn:		Hook function
 *
 * This function will take care of creating and registering the necessary
 * Netfilter hooks for XT tables.
 */
struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
{
	unsigned int hook_mask = table->valid_hooks;
	uint8_t i, num_hooks = hweight32(hook_mask);
	uint8_t hooknum;
	struct nf_hook_ops *ops;
	int ret;

	ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
	if (ops == NULL)
		return ERR_PTR(-ENOMEM);

	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
	     hook_mask >>= 1, ++hooknum) {
		if (!(hook_mask & 1))
			continue;
		ops[i].hook     = fn;
		ops[i].owner    = table->me;
		ops[i].pf       = table->af;
		ops[i].hooknum  = hooknum;
		ops[i].priority = table->priority;
		++i;
	}

	ret = nf_register_hooks(ops, num_hooks);
	if (ret < 0) {
		kfree(ops);
		return ERR_PTR(ret);
	}

	return ops;
}
EXPORT_SYMBOL_GPL(xt_hook_link);

/**
 * xt_hook_unlink - remove hooks for a table
 * @ops:	nf_hook_ops array as returned by nf_hook_link
 * @hook_mask:	the very same mask that was passed to nf_hook_link
 */
void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
{
	nf_unregister_hooks(ops, hweight32(table->valid_hooks));
	kfree(ops);
}
EXPORT_SYMBOL_GPL(xt_hook_unlink);

1240
int xt_proto_init(struct net *net, u_int8_t af)
1241 1242 1243 1244 1245 1246
{
#ifdef CONFIG_PROC_FS
	char buf[XT_FUNCTION_MAXNAMELEN];
	struct proc_dir_entry *proc;
#endif

1247
	if (af >= ARRAY_SIZE(xt_prefix))
1248 1249 1250 1251
		return -EINVAL;


#ifdef CONFIG_PROC_FS
1252
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1253
	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1254 1255
	proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
				(void *)(unsigned long)af);
1256 1257 1258
	if (!proc)
		goto out;

1259
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1260
	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1261 1262
	proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
				(void *)(unsigned long)af);
1263 1264 1265
	if (!proc)
		goto out_remove_tables;

1266
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1267
	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1268 1269
	proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
				(void *)(unsigned long)af);
1270 1271 1272 1273 1274 1275 1276 1277
	if (!proc)
		goto out_remove_matches;
#endif

	return 0;

#ifdef CONFIG_PROC_FS
out_remove_matches:
1278
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1279
	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1280
	proc_net_remove(net, buf);
1281 1282

out_remove_tables:
1283
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1284
	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1285
	proc_net_remove(net, buf);
1286 1287 1288 1289 1290 1291
out:
	return -1;
#endif
}
EXPORT_SYMBOL_GPL(xt_proto_init);

1292
void xt_proto_fini(struct net *net, u_int8_t af)
1293 1294 1295 1296
{
#ifdef CONFIG_PROC_FS
	char buf[XT_FUNCTION_MAXNAMELEN];

1297
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1298
	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1299
	proc_net_remove(net, buf);
1300

1301
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1302
	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1303
	proc_net_remove(net, buf);
1304

1305
	strlcpy(buf, xt_prefix[af], sizeof(buf));
1306
	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1307
	proc_net_remove(net, buf);
1308 1309 1310 1311
#endif /*CONFIG_PROC_FS*/
}
EXPORT_SYMBOL_GPL(xt_proto_fini);

1312 1313 1314 1315
static int __net_init xt_net_init(struct net *net)
{
	int i;

1316
	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1317 1318 1319 1320 1321 1322 1323
		INIT_LIST_HEAD(&net->xt.tables[i]);
	return 0;
}

static struct pernet_operations xt_net_ops = {
	.init = xt_net_init,
};
1324 1325 1326

static int __init xt_init(void)
{
1327 1328 1329 1330 1331 1332 1333 1334
	unsigned int i;
	int rv;

	for_each_possible_cpu(i) {
		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
		spin_lock_init(&lock->lock);
		lock->readers = 0;
	}
1335

1336
	xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1337 1338 1339
	if (!xt)
		return -ENOMEM;

1340
	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
I
Ingo Molnar 已提交
1341
		mutex_init(&xt[i].mutex);
1342 1343
#ifdef CONFIG_COMPAT
		mutex_init(&xt[i].compat_mutex);
1344
		xt[i].compat_offsets = NULL;
1345
#endif
1346 1347 1348
		INIT_LIST_HEAD(&xt[i].target);
		INIT_LIST_HEAD(&xt[i].match);
	}
1349 1350 1351 1352
	rv = register_pernet_subsys(&xt_net_ops);
	if (rv < 0)
		kfree(xt);
	return rv;
1353 1354 1355 1356
}

static void __exit xt_fini(void)
{
1357
	unregister_pernet_subsys(&xt_net_ops);
1358 1359 1360 1361 1362 1363
	kfree(xt);
}

module_init(xt_init);
module_exit(xt_fini);