core.c 16.1 KB
Newer Older
1
/* netfilter.c: look after the filters for various protocols.
2 3 4 5 6
 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
 *
 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
 * way.
 *
7
 * This code is GPL.
8 9 10 11 12 13 14 15 16 17 18
 */
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <net/protocol.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/netdevice.h>
19
#include <linux/netfilter_ipv6.h>
20 21
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
22
#include <linux/mutex.h>
23
#include <linux/mm.h>
24
#include <linux/rcupdate.h>
25
#include <net/net_namespace.h>
26
#include <net/netfilter/nf_queue.h>
27 28 29 30
#include <net/sock.h>

#include "nf_internals.h"

31 32
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33

34 35 36
DEFINE_PER_CPU(bool, nf_skb_duplicated);
EXPORT_SYMBOL_GPL(nf_skb_duplicated);

37
#ifdef CONFIG_JUMP_LABEL
38
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 40 41
EXPORT_SYMBOL(nf_hooks_needed);
#endif

42
static DEFINE_MUTEX(nf_hook_mutex);
43 44 45 46

/* max hooks per family/hooknum */
#define MAX_HOOK_COUNT		1024

47 48
#define nf_entry_dereference(e) \
	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
49

50 51 52 53 54
static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
{
	struct nf_hook_entries *e;
	size_t alloc = sizeof(*e) +
		       sizeof(struct nf_hook_entry) * num +
55 56
		       sizeof(struct nf_hook_ops *) * num +
		       sizeof(struct nf_hook_entries_rcu_head);
57 58 59 60 61 62 63 64 65 66

	if (num == 0)
		return NULL;

	e = kvzalloc(alloc, GFP_KERNEL);
	if (e)
		e->num_hook_entries = num;
	return e;
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static void __nf_hook_entries_free(struct rcu_head *h)
{
	struct nf_hook_entries_rcu_head *head;

	head = container_of(h, struct nf_hook_entries_rcu_head, head);
	kvfree(head->allocation);
}

static void nf_hook_entries_free(struct nf_hook_entries *e)
{
	struct nf_hook_entries_rcu_head *head;
	struct nf_hook_ops **ops;
	unsigned int num;

	if (!e)
		return;

	num = e->num_hook_entries;
	ops = nf_hook_entries_get_hook_ops(e);
	head = (void *)&ops[num];
	head->allocation = e;
	call_rcu(&head->head, __nf_hook_entries_free);
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
static unsigned int accept_all(void *priv,
			       struct sk_buff *skb,
			       const struct nf_hook_state *state)
{
	return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
}

static const struct nf_hook_ops dummy_ops = {
	.hook = accept_all,
	.priority = INT_MIN,
};

static struct nf_hook_entries *
nf_hook_entries_grow(const struct nf_hook_entries *old,
		     const struct nf_hook_ops *reg)
{
	unsigned int i, alloc_entries, nhooks, old_entries;
	struct nf_hook_ops **orig_ops = NULL;
	struct nf_hook_ops **new_ops;
	struct nf_hook_entries *new;
	bool inserted = false;

	alloc_entries = 1;
	old_entries = old ? old->num_hook_entries : 0;

	if (old) {
		orig_ops = nf_hook_entries_get_hook_ops(old);

		for (i = 0; i < old_entries; i++) {
			if (orig_ops[i] != &dummy_ops)
				alloc_entries++;
		}
	}

	if (alloc_entries > MAX_HOOK_COUNT)
		return ERR_PTR(-E2BIG);

	new = allocate_hook_entries_size(alloc_entries);
	if (!new)
		return ERR_PTR(-ENOMEM);

	new_ops = nf_hook_entries_get_hook_ops(new);

	i = 0;
	nhooks = 0;
	while (i < old_entries) {
		if (orig_ops[i] == &dummy_ops) {
			++i;
			continue;
		}
141

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
		if (inserted || reg->priority > orig_ops[i]->priority) {
			new_ops[nhooks] = (void *)orig_ops[i];
			new->hooks[nhooks] = old->hooks[i];
			i++;
		} else {
			new_ops[nhooks] = (void *)reg;
			new->hooks[nhooks].hook = reg->hook;
			new->hooks[nhooks].priv = reg->priv;
			inserted = true;
		}
		nhooks++;
	}

	if (!inserted) {
		new_ops[nhooks] = (void *)reg;
		new->hooks[nhooks].hook = reg->hook;
		new->hooks[nhooks].priv = reg->priv;
	}

	return new;
}

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
static void hooks_validate(const struct nf_hook_entries *hooks)
{
#ifdef CONFIG_DEBUG_KERNEL
	struct nf_hook_ops **orig_ops;
	int prio = INT_MIN;
	size_t i = 0;

	orig_ops = nf_hook_entries_get_hook_ops(hooks);

	for (i = 0; i < hooks->num_hook_entries; i++) {
		if (orig_ops[i] == &dummy_ops)
			continue;

		WARN_ON(orig_ops[i]->priority < prio);

		if (orig_ops[i]->priority > prio)
			prio = orig_ops[i]->priority;
	}
#endif
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
				const struct nf_hook_ops *reg)
{
	struct nf_hook_entries *new_hooks;
	struct nf_hook_entries *p;

	p = rcu_dereference_raw(*pp);
	new_hooks = nf_hook_entries_grow(p, reg);
	if (IS_ERR(new_hooks))
		return PTR_ERR(new_hooks);

	hooks_validate(new_hooks);

	rcu_assign_pointer(*pp, new_hooks);

	BUG_ON(p == new_hooks);
	nf_hook_entries_free(p);
	return 0;
}
EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);

206 207 208
/*
 * __nf_hook_entries_try_shrink - try to shrink hook array
 *
209
 * @old -- current hook blob at @pp
210 211 212 213 214 215 216 217 218 219 220 221
 * @pp -- location of hook blob
 *
 * Hook unregistration must always succeed, so to-be-removed hooks
 * are replaced by a dummy one that will just move to next hook.
 *
 * This counts the current dummy hooks, attempts to allocate new blob,
 * copies the live hooks, then replaces and discards old one.
 *
 * return values:
 *
 * Returns address to free, or NULL.
 */
222 223
static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
					  struct nf_hook_entries __rcu **pp)
224 225
{
	unsigned int i, j, skip = 0, hook_entries;
226
	struct nf_hook_entries *new = NULL;
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
	struct nf_hook_ops **orig_ops;
	struct nf_hook_ops **new_ops;

	if (WARN_ON_ONCE(!old))
		return NULL;

	orig_ops = nf_hook_entries_get_hook_ops(old);
	for (i = 0; i < old->num_hook_entries; i++) {
		if (orig_ops[i] == &dummy_ops)
			skip++;
	}

	/* if skip == hook_entries all hooks have been removed */
	hook_entries = old->num_hook_entries;
	if (skip == hook_entries)
		goto out_assign;

244
	if (skip == 0)
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
		return NULL;

	hook_entries -= skip;
	new = allocate_hook_entries_size(hook_entries);
	if (!new)
		return NULL;

	new_ops = nf_hook_entries_get_hook_ops(new);
	for (i = 0, j = 0; i < old->num_hook_entries; i++) {
		if (orig_ops[i] == &dummy_ops)
			continue;
		new->hooks[j] = old->hooks[i];
		new_ops[j] = (void *)orig_ops[i];
		j++;
	}
260
	hooks_validate(new);
261 262 263 264 265
out_assign:
	rcu_assign_pointer(*pp, new);
	return old;
}

266 267 268
static struct nf_hook_entries __rcu **
nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
		   struct net_device *dev)
269
{
270
	switch (pf) {
271 272
	case NFPROTO_NETDEV:
		break;
273
#ifdef CONFIG_NETFILTER_FAMILY_ARP
274
	case NFPROTO_ARP:
275
		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
276
			return NULL;
277
		return net->nf.hooks_arp + hooknum;
278 279
#endif
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
280
	case NFPROTO_BRIDGE:
281
		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
282
			return NULL;
283
		return net->nf.hooks_bridge + hooknum;
284
#endif
285
	case NFPROTO_IPV4:
286
		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
287
			return NULL;
288
		return net->nf.hooks_ipv4 + hooknum;
289
	case NFPROTO_IPV6:
290
		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
291
			return NULL;
292
		return net->nf.hooks_ipv6 + hooknum;
293
#if IS_ENABLED(CONFIG_DECNET)
294
	case NFPROTO_DECNET:
295
		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
296
			return NULL;
297
		return net->nf.hooks_decnet + hooknum;
298
#endif
299 300 301 302
	default:
		WARN_ON_ONCE(1);
		return NULL;
	}
L
Linus Torvalds 已提交
303

304
#ifdef CONFIG_NETFILTER_INGRESS
305 306 307
	if (hooknum == NF_NETDEV_INGRESS) {
		if (dev && dev_net(dev) == net)
			return &dev->nf_hooks_ingress;
308
	}
309
#endif
310
	WARN_ON_ONCE(1);
L
Linus Torvalds 已提交
311
	return NULL;
312
}
313

314 315
static int __nf_register_net_hook(struct net *net, int pf,
				  const struct nf_hook_ops *reg)
316
{
317 318
	struct nf_hook_entries *p, *new_hooks;
	struct nf_hook_entries __rcu **pp;
319

320
	if (pf == NFPROTO_NETDEV) {
321 322 323 324 325 326 327 328
#ifndef CONFIG_NETFILTER_INGRESS
		if (reg->hooknum == NF_NETDEV_INGRESS)
			return -EOPNOTSUPP;
#endif
		if (reg->hooknum != NF_NETDEV_INGRESS ||
		    !reg->dev || dev_net(reg->dev) != net)
			return -EINVAL;
	}
329

330
	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
L
Linus Torvalds 已提交
331 332 333
	if (!pp)
		return -EINVAL;

334
	mutex_lock(&nf_hook_mutex);
335

336 337 338 339 340
	p = nf_entry_dereference(*pp);
	new_hooks = nf_hook_entries_grow(p, reg);

	if (!IS_ERR(new_hooks))
		rcu_assign_pointer(*pp, new_hooks);
341

342
	mutex_unlock(&nf_hook_mutex);
343 344 345
	if (IS_ERR(new_hooks))
		return PTR_ERR(new_hooks);

346
	hooks_validate(new_hooks);
347
#ifdef CONFIG_NETFILTER_INGRESS
348
	if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
349 350
		net_inc_ingress_queue();
#endif
351
#ifdef CONFIG_JUMP_LABEL
352
	static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
353
#endif
354
	BUG_ON(p == new_hooks);
355
	nf_hook_entries_free(p);
356 357 358
	return 0;
}

359
/*
360
 * nf_remove_net_hook - remove a hook from blob
361 362 363 364 365 366 367
 *
 * @oldp: current address of hook blob
 * @unreg: hook to unregister
 *
 * This cannot fail, hook unregistration must always succeed.
 * Therefore replace the to-be-removed hook with a dummy hook.
 */
368 369
static bool nf_remove_net_hook(struct nf_hook_entries *old,
			       const struct nf_hook_ops *unreg)
370
{
371 372
	struct nf_hook_ops **orig_ops;
	unsigned int i;
373

374 375 376 377 378 379
	orig_ops = nf_hook_entries_get_hook_ops(old);
	for (i = 0; i < old->num_hook_entries; i++) {
		if (orig_ops[i] != unreg)
			continue;
		WRITE_ONCE(old->hooks[i].hook, accept_all);
		WRITE_ONCE(orig_ops[i], &dummy_ops);
380
		return true;
381
	}
382

383
	return false;
384 385
}

386 387
static void __nf_unregister_net_hook(struct net *net, int pf,
				     const struct nf_hook_ops *reg)
388
{
389 390
	struct nf_hook_entries __rcu **pp;
	struct nf_hook_entries *p;
391

392
	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
393 394 395 396 397 398 399 400 401 402 403
	if (!pp)
		return;

	mutex_lock(&nf_hook_mutex);

	p = nf_entry_dereference(*pp);
	if (WARN_ON_ONCE(!p)) {
		mutex_unlock(&nf_hook_mutex);
		return;
	}

404 405 406 407 408
	if (nf_remove_net_hook(p, reg)) {
#ifdef CONFIG_NETFILTER_INGRESS
		if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
			net_dec_ingress_queue();
#endif
409
#ifdef CONFIG_JUMP_LABEL
410 411 412 413 414
		static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]);
#endif
	} else {
		WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
	}
415

416
	p = __nf_hook_entries_try_shrink(p, pp);
417
	mutex_unlock(&nf_hook_mutex);
418 419 420
	if (!p)
		return;

421
	nf_queue_nf_hook_drop(net);
422
	nf_hook_entries_free(p);
423
}
424 425 426 427 428 429 430 431 432 433

void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
	if (reg->pf == NFPROTO_INET) {
		__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
		__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
	} else {
		__nf_unregister_net_hook(net, reg->pf, reg);
	}
}
434 435
EXPORT_SYMBOL(nf_unregister_net_hook);

436 437 438 439 440 441 442 443 444 445 446 447 448
void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
				const struct nf_hook_ops *reg)
{
	struct nf_hook_entries *p;

	p = rcu_dereference_raw(*pp);
	if (nf_remove_net_hook(p, reg)) {
		p = __nf_hook_entries_try_shrink(p, pp);
		nf_hook_entries_free(p);
	}
}
EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
	int err;

	if (reg->pf == NFPROTO_INET) {
		err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
		if (err < 0)
			return err;

		err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
		if (err < 0) {
			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
			return err;
		}
	} else {
		err = __nf_register_net_hook(net, reg->pf, reg);
		if (err < 0)
			return err;
	}

	return 0;
}
EXPORT_SYMBOL(nf_register_net_hook);

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
			  unsigned int n)
{
	unsigned int i;
	int err = 0;

	for (i = 0; i < n; i++) {
		err = nf_register_net_hook(net, &reg[i]);
		if (err)
			goto err;
	}
	return err;

err:
	if (i > 0)
		nf_unregister_net_hooks(net, reg, i);
	return err;
}
EXPORT_SYMBOL(nf_register_net_hooks);

void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
494
			     unsigned int hookcount)
495
{
496
	unsigned int i;
497

498 499
	for (i = 0; i < hookcount; i++)
		nf_unregister_net_hook(net, &reg[i]);
500 501 502
}
EXPORT_SYMBOL(nf_unregister_net_hooks);

503
/* Returns 1 if okfn() needs to be executed by the caller,
504
 * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
505
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
506
		 const struct nf_hook_entries *e, unsigned int s)
507 508
{
	unsigned int verdict;
509
	int ret;
510

511 512
	for (; s < e->num_hook_entries; s++) {
		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
513 514 515 516 517 518 519 520 521 522
		switch (verdict & NF_VERDICT_MASK) {
		case NF_ACCEPT:
			break;
		case NF_DROP:
			kfree_skb(skb);
			ret = NF_DROP_GETERR(verdict);
			if (ret == 0)
				ret = -EPERM;
			return ret;
		case NF_QUEUE:
523 524
			ret = nf_queue(skb, state, e, s, verdict);
			if (ret == 1)
525 526 527 528 529 530 531 532
				continue;
			return ret;
		default:
			/* Implicit handling for NF_STOLEN, as well as any other
			 * non conventional verdicts.
			 */
			return 0;
		}
533
	}
534 535

	return 1;
536 537 538 539
}
EXPORT_SYMBOL(nf_hook_slow);


540
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
541
{
542
	if (writable_len > skb->len)
543 544 545
		return 0;

	/* Not exclusive use of packet?  Must copy. */
546 547 548 549 550 551 552 553 554 555 556 557
	if (!skb_cloned(skb)) {
		if (writable_len <= skb_headlen(skb))
			return 1;
	} else if (skb_clone_writable(skb, writable_len))
		return 1;

	if (writable_len <= skb_headlen(skb))
		writable_len = 0;
	else
		writable_len -= skb_headlen(skb);

	return !!__pskb_pull_tail(skb, writable_len);
558 559 560
}
EXPORT_SYMBOL(skb_make_writable);

561 562 563
/* This needs to be compiled in any case to avoid dependencies between the
 * nfnetlink_queue code and nf_conntrack.
 */
564 565
struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nfnl_ct_hook);
566

567 568 569
struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_hook);

I
Igor Maravić 已提交
570
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
571 572 573
/* This does not belong here, but locally generated errors need it if connection
   tracking in use: without this, connection may not be in hash table, and hence
   manufactured ICMP or RST packets will not be associated with it. */
574 575
void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
		__rcu __read_mostly;
576 577
EXPORT_SYMBOL(ip_ct_attach);

578 579 580
struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_hook);

581
void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
582
{
583
	void (*attach)(struct sk_buff *, const struct sk_buff *);
584

585
	if (skb->_nfct) {
586 587 588 589 590
		rcu_read_lock();
		attach = rcu_dereference(ip_ct_attach);
		if (attach)
			attach(new, skb);
		rcu_read_unlock();
591 592 593
	}
}
EXPORT_SYMBOL(nf_ct_attach);
594 595 596

void nf_conntrack_destroy(struct nf_conntrack *nfct)
{
597
	struct nf_ct_hook *ct_hook;
598 599

	rcu_read_lock();
600 601 602
	ct_hook = rcu_dereference(nf_ct_hook);
	BUG_ON(ct_hook == NULL);
	ct_hook->destroy(nfct);
603 604 605
	rcu_read_unlock();
}
EXPORT_SYMBOL(nf_conntrack_destroy);
606

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
			 const struct sk_buff *skb)
{
	struct nf_ct_hook *ct_hook;
	bool ret = false;

	rcu_read_lock();
	ct_hook = rcu_dereference(nf_ct_hook);
	if (ct_hook)
		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL(nf_ct_get_tuple_skb);

622 623 624 625 626 627
/* Built-in default zone used e.g. by modules. */
const struct nf_conntrack_zone nf_ct_zone_dflt = {
	.id	= NF_CT_DEFAULT_ZONE_ID,
	.dir	= NF_CT_DEFAULT_ZONE_DIR,
};
EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
628
#endif /* CONFIG_NF_CONNTRACK */
629

630 631
static void __net_init
__netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
632
{
633
	int h;
634

635
	for (h = 0; h < max; h++)
636 637 638 639 640
		RCU_INIT_POINTER(e[h], NULL);
}

static int __net_init netfilter_net_init(struct net *net)
{
641 642
	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
643
#ifdef CONFIG_NETFILTER_FAMILY_ARP
644
	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
645 646
#endif
#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
647
	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
648
#endif
649
#if IS_ENABLED(CONFIG_DECNET)
650
	__netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
651
#endif
652

653 654 655
#ifdef CONFIG_PROC_FS
	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
						net->proc_net);
656 657 658 659
	if (!net->nf.proc_netfilter) {
		if (!net_eq(net, &init_net))
			pr_err("cannot create netfilter proc entry");

660 661 662
		return -ENOMEM;
	}
#endif
663

664
	return 0;
665 666 667 668 669 670 671 672 673 674 675 676
}

static void __net_exit netfilter_net_exit(struct net *net)
{
	remove_proc_entry("netfilter", net->proc_net);
}

static struct pernet_operations netfilter_net_ops = {
	.init = netfilter_net_init,
	.exit = netfilter_net_exit,
};

677
int __init netfilter_init(void)
678
{
679
	int ret;
680

681 682 683 684 685 686 687
	ret = register_pernet_subsys(&netfilter_net_ops);
	if (ret < 0)
		goto err;

	ret = netfilter_log_init();
	if (ret < 0)
		goto err_pernet;
688

689 690 691 692 693
	return 0;
err_pernet:
	unregister_pernet_subsys(&netfilter_net_ops);
err:
	return ret;
694
}