nfnetlink_queue_core.c 32.8 KB
Newer Older
1 2
/*
 * This is a module which is used for queueing packets and communicating with
3
 * userspace via nfnetlink.
4 5
 *
 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6
 * (C) 2007 by Patrick McHardy <kaber@trash.net>
7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Based on the old ipv4-only ip_queue.c:
 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/spinlock.h>
21
#include <linux/slab.h>
22 23 24
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
25
#include <linux/proc_fs.h>
26 27 28 29 30 31
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_queue.h>
#include <linux/list.h>
#include <net/sock.h>
32
#include <net/tcp_states.h>
33
#include <net/netfilter/nf_queue.h>
34
#include <net/netns/generic.h>
35
#include <net/netfilter/nfnetlink_queue.h>
36

A
Arun Sharma 已提交
37
#include <linux/atomic.h>
38

39
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
40 41 42
#include "../bridge/br_private.h"
#endif

43 44
#define NFQNL_QMAX_DEFAULT 1024

45 46 47 48 49 50 51 52
/* We're using struct nlattr which has 16bit nla_len. Note that nla_len
 * includes the header length. Thus, the maximum packet length that we
 * support is 65531 bytes. We send truncated packets if the specified length
 * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
 * attribute to detect truncation.
 */
#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)

53 54
struct nfqnl_instance {
	struct hlist_node hlist;		/* global list of queues */
55
	struct rcu_head rcu;
56

57
	int peer_portid;
58 59 60 61 62 63 64 65
	unsigned int queue_maxlen;
	unsigned int copy_range;
	unsigned int queue_dropped;
	unsigned int queue_user_dropped;


	u_int16_t queue_num;			/* number of this queue */
	u_int8_t copy_mode;
K
Krishna Kumar 已提交
66
	u_int32_t flags;			/* Set using NFQA_CFG_FLAGS */
67 68 69 70 71 72
/*
 * Following fields are dirtied for each queued packet,
 * keep them in same cache line if possible.
 */
	spinlock_t	lock;
	unsigned int	queue_total;
73
	unsigned int	id_sequence;		/* 'sequence' of pkt ids */
74 75 76
	struct list_head queue_list;		/* packets in queue */
};

77
typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
78

79
static int nfnl_queue_net_id __read_mostly;
80 81

#define INSTANCE_BUCKETS	16
82 83 84 85 86 87 88 89 90
struct nfnl_queue_net {
	spinlock_t instances_lock;
	struct hlist_head instance_table[INSTANCE_BUCKETS];
};

static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
{
	return net_generic(net, nfnl_queue_net_id);
}
91 92 93

static inline u_int8_t instance_hashfn(u_int16_t queue_num)
{
94
	return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
95 96 97
}

static struct nfqnl_instance *
98
instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
99 100 101 102
{
	struct hlist_head *head;
	struct nfqnl_instance *inst;

103
	head = &q->instance_table[instance_hashfn(queue_num)];
104
	hlist_for_each_entry_rcu(inst, head, hlist) {
105 106 107 108 109 110 111
		if (inst->queue_num == queue_num)
			return inst;
	}
	return NULL;
}

static struct nfqnl_instance *
112 113
instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
		int portid)
114
{
115
	struct nfqnl_instance *inst;
116
	unsigned int h;
117
	int err;
118

119 120
	spin_lock(&q->instances_lock);
	if (instance_lookup(q, queue_num)) {
121
		err = -EEXIST;
122
		goto out_unlock;
123
	}
124

H
Harald Welte 已提交
125
	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
126 127
	if (!inst) {
		err = -ENOMEM;
128
		goto out_unlock;
129
	}
130 131

	inst->queue_num = queue_num;
132
	inst->peer_portid = portid;
133
	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
134
	inst->copy_range = NFQNL_MAX_COPY_RANGE;
135
	inst->copy_mode = NFQNL_COPY_NONE;
136
	spin_lock_init(&inst->lock);
137 138
	INIT_LIST_HEAD(&inst->queue_list);

139 140
	if (!try_module_get(THIS_MODULE)) {
		err = -EAGAIN;
141
		goto out_free;
142
	}
143

144
	h = instance_hashfn(queue_num);
145
	hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
146

147
	spin_unlock(&q->instances_lock);
148 149 150 151 152 153

	return inst;

out_free:
	kfree(inst);
out_unlock:
154
	spin_unlock(&q->instances_lock);
155
	return ERR_PTR(err);
156 157
}

158 159
static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
			unsigned long data);
160 161

static void
162
instance_destroy_rcu(struct rcu_head *head)
163
{
164 165
	struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
						   rcu);
166

167
	nfqnl_flush(inst, NULL, 0);
168
	kfree(inst);
169 170 171
	module_put(THIS_MODULE);
}

172
static void
173 174
__instance_destroy(struct nfqnl_instance *inst)
{
175 176
	hlist_del_rcu(&inst->hlist);
	call_rcu(&inst->rcu, instance_destroy_rcu);
177 178
}

179
static void
180
instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
181
{
182
	spin_lock(&q->instances_lock);
183
	__instance_destroy(inst);
184
	spin_unlock(&q->instances_lock);
185 186 187
}

static inline void
188
__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
189
{
190
       list_add_tail(&entry->list, &queue->queue_list);
191 192 193
       queue->queue_total++;
}

194 195 196 197 198 199 200
static void
__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
{
	list_del(&entry->list);
	queue->queue_total--;
}

201
static struct nf_queue_entry *
202
find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
203
{
204
	struct nf_queue_entry *entry = NULL, *i;
205

206
	spin_lock_bh(&queue->lock);
207 208 209 210 211 212 213 214

	list_for_each_entry(i, &queue->queue_list, list) {
		if (i->id == id) {
			entry = i;
			break;
		}
	}

215 216
	if (entry)
		__dequeue_entry(queue, entry);
217

218 219 220 221 222 223
	spin_unlock_bh(&queue->lock);

	return entry;
}

static void
224
nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
225
{
226
	struct nf_queue_entry *entry, *next;
227

228
	spin_lock_bh(&queue->lock);
229 230 231 232
	list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
		if (!cmpfn || cmpfn(entry, data)) {
			list_del(&entry->list);
			queue->queue_total--;
233
			nf_reinject(entry, NF_DROP);
234 235
		}
	}
236 237 238
	spin_unlock_bh(&queue->lock);
}

239 240 241
static int
nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
		      bool csum_verify)
242 243 244 245 246
{
	__u32 flags = 0;

	if (packet->ip_summed == CHECKSUM_PARTIAL)
		flags = NFQA_SKB_CSUMNOTREADY;
247 248 249
	else if (csum_verify)
		flags = NFQA_SKB_CSUM_NOTVERIFIED;

250 251 252 253 254 255
	if (skb_is_gso(packet))
		flags |= NFQA_SKB_GSO;

	return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
}

256 257 258 259
static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
{
	const struct cred *cred;

260
	if (!sk_fullsock(sk))
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
		return 0;

	read_lock_bh(&sk->sk_callback_lock);
	if (sk->sk_socket && sk->sk_socket->file) {
		cred = sk->sk_socket->file->f_cred;
		if (nla_put_be32(skb, NFQA_UID,
		    htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
			goto nla_put_failure;
		if (nla_put_be32(skb, NFQA_GID,
		    htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
			goto nla_put_failure;
	}
	read_unlock_bh(&sk->sk_callback_lock);
	return 0;

nla_put_failure:
	read_unlock_bh(&sk->sk_callback_lock);
	return -1;
}

281
static struct sk_buff *
282
nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
283 284
			   struct nf_queue_entry *entry,
			   __be32 **packet_id_ptr)
285 286
{
	size_t size;
287
	size_t data_len = 0, cap_len = 0;
288
	unsigned int hlen = 0;
289
	struct sk_buff *skb;
290 291
	struct nlattr *nla;
	struct nfqnl_msg_packet_hdr *pmsg;
292 293
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
294 295 296
	struct sk_buff *entskb = entry->skb;
	struct net_device *indev;
	struct net_device *outdev;
297 298
	struct nf_conn *ct = NULL;
	enum ip_conntrack_info uninitialized_var(ctinfo);
299
	bool csum_verify;
300

301
	size =    nlmsg_total_size(sizeof(struct nfgenmsg))
302 303 304
		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
305
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
306 307
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
308
#endif
309 310
		+ nla_total_size(sizeof(u_int32_t))	/* mark */
		+ nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
311
		+ nla_total_size(sizeof(u_int32_t))	/* skbinfo */
312 313 314 315
		+ nla_total_size(sizeof(u_int32_t));	/* cap_len */

	if (entskb->tstamp.tv64)
		size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
316

317 318 319 320 321 322
	if (entry->hook <= NF_INET_FORWARD ||
	   (entry->hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
		csum_verify = !skb_csum_unnecessary(entskb);
	else
		csum_verify = false;

323
	outdev = entry->outdev;
324

325
	switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
326 327 328
	case NFQNL_COPY_META:
	case NFQNL_COPY_NONE:
		break;
329

330
	case NFQNL_COPY_PACKET:
331 332
		if (!(queue->flags & NFQA_CFG_F_GSO) &&
		    entskb->ip_summed == CHECKSUM_PARTIAL &&
333
		    skb_checksum_help(entskb))
334
			return NULL;
335 336

		data_len = ACCESS_ONCE(queue->copy_range);
337
		if (data_len > entskb->len)
338
			data_len = entskb->len;
339

340 341
		hlen = skb_zerocopy_headlen(entskb);
		hlen = min_t(unsigned int, hlen, data_len);
342
		size += sizeof(struct nlattr) + hlen;
343
		cap_len = entskb->len;
344 345 346
		break;
	}

347 348
	if (queue->flags & NFQA_CFG_F_CONNTRACK)
		ct = nfqnl_ct_get(entskb, &size, &ctinfo);
349

350 351 352 353 354
	if (queue->flags & NFQA_CFG_F_UID_GID) {
		size +=  (nla_total_size(sizeof(u_int32_t))	/* uid */
			+ nla_total_size(sizeof(u_int32_t)));	/* gid */
	}

355
	skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
356
				  GFP_ATOMIC);
357 358
	if (!skb) {
		skb_tx_error(entskb);
359
		return NULL;
360
	}
361

362
	nlh = nlmsg_put(skb, 0, 0,
363
			NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
364 365
			sizeof(struct nfgenmsg), 0);
	if (!nlh) {
366
		skb_tx_error(entskb);
367 368 369 370
		kfree_skb(skb);
		return NULL;
	}
	nfmsg = nlmsg_data(nlh);
371
	nfmsg->nfgen_family = entry->pf;
372 373 374
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(queue->queue_num);

375 376 377 378 379
	nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
	pmsg = nla_data(nla);
	pmsg->hw_protocol	= entskb->protocol;
	pmsg->hook		= entry->hook;
	*packet_id_ptr		= &pmsg->packet_id;
380

381
	indev = entry->indev;
382
	if (indev) {
383
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
384 385
		if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
			goto nla_put_failure;
386
#else
387
		if (entry->pf == PF_BRIDGE) {
388
			/* Case 1: indev is physical input device, we need to
389
			 * look for bridge group (when called from
390
			 * netfilter_bridge) */
391 392
			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
393
			/* this is the bridge group "brX" */
394
			/* rcu_read_lock()ed by __nf_queue */
395 396 397
			    nla_put_be32(skb, NFQA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
398 399 400
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
401 402 403 404 405 406 407
			if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;
			if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
			    nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
					 htonl(entskb->nf_bridge->physindev->ifindex)))
				goto nla_put_failure;
408 409
		}
#endif
410 411
	}

412
	if (outdev) {
413
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
414 415
		if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
			goto nla_put_failure;
416
#else
417
		if (entry->pf == PF_BRIDGE) {
418
			/* Case 1: outdev is physical output device, we need to
419
			 * look for bridge group (when called from
420
			 * netfilter_bridge) */
421 422
			if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
423
			/* this is the bridge group "brX" */
424
			/* rcu_read_lock()ed by __nf_queue */
425 426 427
			    nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
428 429 430
		} else {
			/* Case 2: outdev is bridge group, we need to look for
			 * physical output device (when called from ipv4) */
431 432 433 434 435 436 437
			if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;
			if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
			    nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
					 htonl(entskb->nf_bridge->physoutdev->ifindex)))
				goto nla_put_failure;
438 439
		}
#endif
440 441
	}

442 443 444
	if (entskb->mark &&
	    nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
		goto nla_put_failure;
445

446 447
	if (indev && entskb->dev &&
	    entskb->mac_header != entskb->network_header) {
448
		struct nfqnl_msg_packet_hw phw;
449 450 451 452
		int len;

		memset(&phw, 0, sizeof(phw));
		len = dev_parse_header(entskb, phw.hw_addr);
S
Stephen Hemminger 已提交
453 454
		if (len) {
			phw.hw_addrlen = htons(len);
455 456
			if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
S
Stephen Hemminger 已提交
457
		}
458 459
	}

460
	if (entskb->tstamp.tv64) {
461
		struct nfqnl_msg_packet_timestamp ts;
462 463 464
		struct timeval tv = ktime_to_timeval(entskb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);
465

466 467
		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
468 469
	}

470 471 472 473
	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
		goto nla_put_failure;

474 475 476
	if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
		goto nla_put_failure;

477 478
	if (cap_len > data_len &&
	    nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
479 480
		goto nla_put_failure;

481
	if (nfqnl_put_packet_info(skb, entskb, csum_verify))
482 483
		goto nla_put_failure;

484
	if (data_len) {
485
		struct nlattr *nla;
486

487 488
		if (skb_tailroom(skb) < sizeof(*nla) + hlen)
			goto nla_put_failure;
489

490
		nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
491
		nla->nla_type = NFQA_PAYLOAD;
492
		nla->nla_len = nla_attr_size(data_len);
493

494 495
		if (skb_zerocopy(skb, entskb, data_len, hlen))
			goto nla_put_failure;
496
	}
497

498
	nlh->nlmsg_len = skb->len;
499 500
	return skb;

501
nla_put_failure:
502
	skb_tx_error(entskb);
503
	kfree_skb(skb);
504
	net_err_ratelimited("nf_queue: error creating packet message\n");
505 506 507 508
	return NULL;
}

static int
509 510
__nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
			struct nf_queue_entry *entry)
511 512
{
	struct sk_buff *nskb;
513
	int err = -ENOBUFS;
514
	__be32 *packet_id_ptr;
K
Krishna Kumar 已提交
515
	int failopen = 0;
516

517
	nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
518 519
	if (nskb == NULL) {
		err = -ENOMEM;
520
		goto err_out;
521
	}
522
	spin_lock_bh(&queue->lock);
523

524
	if (queue->queue_total >= queue->queue_maxlen) {
K
Krishna Kumar 已提交
525 526 527 528 529 530 531 532
		if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
			failopen = 1;
			err = 0;
		} else {
			queue->queue_dropped++;
			net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
					     queue->queue_total);
		}
533 534
		goto err_out_free_nskb;
	}
535 536
	entry->id = ++queue->id_sequence;
	*packet_id_ptr = htonl(entry->id);
537 538

	/* nfnetlink_unicast will either free the nskb or add it to a socket */
539
	err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
540
	if (err < 0) {
541
		queue->queue_user_dropped++;
542 543 544 545 546 547
		goto err_out_unlock;
	}

	__enqueue_entry(queue, entry);

	spin_unlock_bh(&queue->lock);
548
	return 0;
549 550

err_out_free_nskb:
551
	kfree_skb(nskb);
552 553
err_out_unlock:
	spin_unlock_bh(&queue->lock);
K
Krishna Kumar 已提交
554 555
	if (failopen)
		nf_reinject(entry, NF_ACCEPT);
556
err_out:
557
	return err;
558 559
}

560 561 562 563 564 565 566 567 568 569 570 571
static struct nf_queue_entry *
nf_queue_entry_dup(struct nf_queue_entry *e)
{
	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
	if (entry) {
		if (nf_queue_entry_get_refs(entry))
			return entry;
		kfree(entry);
	}
	return NULL;
}

572
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
/* When called from bridge netfilter, skb->data must point to MAC header
 * before calling skb_gso_segment(). Else, original MAC header is lost
 * and segmented skbs will be sent to wrong destination.
 */
static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
{
	if (skb->nf_bridge)
		__skb_push(skb, skb->network_header - skb->mac_header);
}

static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
{
	if (skb->nf_bridge)
		__skb_pull(skb, skb->network_header - skb->mac_header);
}
#else
#define nf_bridge_adjust_skb_data(s) do {} while (0)
#define nf_bridge_adjust_segmented_data(s) do {} while (0)
#endif

static void free_entry(struct nf_queue_entry *entry)
{
	nf_queue_entry_release_refs(entry);
	kfree(entry);
}

static int
__nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
			   struct sk_buff *skb, struct nf_queue_entry *entry)
{
	int ret = -ENOMEM;
	struct nf_queue_entry *entry_seg;

	nf_bridge_adjust_segmented_data(skb);

	if (skb->next == NULL) { /* last packet, no need to copy entry */
		struct sk_buff *gso_skb = entry->skb;
		entry->skb = skb;
		ret = __nfqnl_enqueue_packet(net, queue, entry);
		if (ret)
			entry->skb = gso_skb;
		return ret;
	}

	skb->next = NULL;

	entry_seg = nf_queue_entry_dup(entry);
	if (entry_seg) {
		entry_seg->skb = skb;
		ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
		if (ret)
			free_entry(entry_seg);
	}
	return ret;
}

static int
nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
	unsigned int queued;
	struct nfqnl_instance *queue;
	struct sk_buff *skb, *segs;
	int err = -ENOBUFS;
	struct net *net = dev_net(entry->indev ?
				  entry->indev : entry->outdev);
	struct nfnl_queue_net *q = nfnl_queue_pernet(net);

	/* rcu_read_lock()ed by nf_hook_slow() */
	queue = instance_lookup(q, queuenum);
	if (!queue)
		return -ESRCH;

	if (queue->copy_mode == NFQNL_COPY_NONE)
		return -EINVAL;

	skb = entry->skb;

	switch (entry->pf) {
	case NFPROTO_IPV4:
		skb->protocol = htons(ETH_P_IP);
		break;
	case NFPROTO_IPV6:
		skb->protocol = htons(ETH_P_IPV6);
		break;
	}

659 660 661
	if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
		return __nfqnl_enqueue_packet(net, queue, entry);

662 663 664 665 666 667
	nf_bridge_adjust_skb_data(skb);
	segs = skb_gso_segment(skb, 0);
	/* Does not use PTR_ERR to limit the number of error codes that can be
	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to
	 * mean 'ignore this hook'.
	 */
668
	if (IS_ERR_OR_NULL(segs))
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		goto out_err;
	queued = 0;
	err = 0;
	do {
		struct sk_buff *nskb = segs->next;
		if (err == 0)
			err = __nfqnl_enqueue_packet_gso(net, queue,
							segs, entry);
		if (err == 0)
			queued++;
		else
			kfree_skb(segs);
		segs = nskb;
	} while (segs);

	if (queued) {
		if (err) /* some segments are already queued */
			free_entry(entry);
		kfree_skb(skb);
		return 0;
	}
 out_err:
	nf_bridge_adjust_segmented_data(skb);
	return err;
}

695
static int
696
nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
697
{
698
	struct sk_buff *nskb;
699

700 701 702 703
	if (diff < 0) {
		if (pskb_trim(e->skb, data_len))
			return -ENOMEM;
	} else if (diff > 0) {
704 705 706
		if (data_len > 0xFFFF)
			return -EINVAL;
		if (diff > skb_tailroom(e->skb)) {
707 708
			nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
					       diff, GFP_ATOMIC);
709
			if (!nskb) {
710
				printk(KERN_WARNING "nf_queue: OOM "
711
				      "in mangle, dropping packet\n");
712
				return -ENOMEM;
713
			}
714 715
			kfree_skb(e->skb);
			e->skb = nskb;
716 717 718
		}
		skb_put(e->skb, diff);
	}
719
	if (!skb_make_writable(e->skb, data_len))
720
		return -ENOMEM;
721
	skb_copy_to_linear_data(e->skb, data, data_len);
722
	e->skb->ip_summed = CHECKSUM_NONE;
723 724 725 726 727 728 729
	return 0;
}

static int
nfqnl_set_mode(struct nfqnl_instance *queue,
	       unsigned char mode, unsigned int range)
{
730
	int status = 0;
731 732

	spin_lock_bh(&queue->lock);
733 734 735 736 737 738 739 740 741
	switch (mode) {
	case NFQNL_COPY_NONE:
	case NFQNL_COPY_META:
		queue->copy_mode = mode;
		queue->copy_range = 0;
		break;

	case NFQNL_COPY_PACKET:
		queue->copy_mode = mode;
742 743
		if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
			queue->copy_range = NFQNL_MAX_COPY_RANGE;
744 745 746 747 748 749 750 751
		else
			queue->copy_range = range;
		break;

	default:
		status = -EINVAL;

	}
752 753 754 755 756 757
	spin_unlock_bh(&queue->lock);

	return status;
}

static int
758
dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
759
{
760 761
	if (entry->indev)
		if (entry->indev->ifindex == ifindex)
762
			return 1;
763 764
	if (entry->outdev)
		if (entry->outdev->ifindex == ifindex)
765
			return 1;
766
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
767 768 769 770 771 772 773 774 775
	if (entry->skb->nf_bridge) {
		if (entry->skb->nf_bridge->physindev &&
		    entry->skb->nf_bridge->physindev->ifindex == ifindex)
			return 1;
		if (entry->skb->nf_bridge->physoutdev &&
		    entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
			return 1;
	}
#endif
776 777 778 779 780 781
	return 0;
}

/* drop all packets with either indev or outdev == ifindex from all queue
 * instances */
static void
782
nfqnl_dev_drop(struct net *net, int ifindex)
783 784
{
	int i;
785
	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
786

787
	rcu_read_lock();
788

789
	for (i = 0; i < INSTANCE_BUCKETS; i++) {
790
		struct nfqnl_instance *inst;
791
		struct hlist_head *head = &q->instance_table[i];
792

793
		hlist_for_each_entry_rcu(inst, head, hlist)
794
			nfqnl_flush(inst, dev_cmp, ifindex);
795 796
	}

797
	rcu_read_unlock();
798 799 800 801 802 803 804 805
}

#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)

static int
nfqnl_rcv_dev_event(struct notifier_block *this,
		    unsigned long event, void *ptr)
{
806
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
807 808 809

	/* Drop any packets associated with the downed device */
	if (event == NETDEV_DOWN)
810
		nfqnl_dev_drop(dev_net(dev), dev->ifindex);
811 812 813 814 815 816 817 818 819 820 821 822
	return NOTIFY_DONE;
}

static struct notifier_block nfqnl_dev_notifier = {
	.notifier_call	= nfqnl_rcv_dev_event,
};

static int
nfqnl_rcv_nl_event(struct notifier_block *this,
		   unsigned long event, void *ptr)
{
	struct netlink_notify *n = ptr;
823
	struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
824

825
	if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
826 827
		int i;

828
		/* destroy all instances for this portid */
829
		spin_lock(&q->instances_lock);
830
		for (i = 0; i < INSTANCE_BUCKETS; i++) {
831
			struct hlist_node *t2;
832
			struct nfqnl_instance *inst;
833
			struct hlist_head *head = &q->instance_table[i];
834

835
			hlist_for_each_entry_safe(inst, t2, head, hlist) {
836
				if (n->portid == inst->peer_portid)
837 838 839
					__instance_destroy(inst);
			}
		}
840
		spin_unlock(&q->instances_lock);
841 842 843 844 845 846 847 848
	}
	return NOTIFY_DONE;
}

static struct notifier_block nfqnl_rtnl_notifier = {
	.notifier_call	= nfqnl_rcv_nl_event,
};

849 850 851 852
static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
	[NFQA_MARK]		= { .type = NLA_U32 },
	[NFQA_PAYLOAD]		= { .type = NLA_UNSPEC },
853
	[NFQA_CT]		= { .type = NLA_UNSPEC },
854
	[NFQA_EXP]		= { .type = NLA_UNSPEC },
855 856
};

857 858 859 860 861
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
	[NFQA_VERDICT_HDR]	= { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
	[NFQA_MARK]		= { .type = NLA_U32 },
};

862 863
static struct nfqnl_instance *
verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid)
864 865 866
{
	struct nfqnl_instance *queue;

867
	queue = instance_lookup(q, queue_num);
868 869 870
	if (!queue)
		return ERR_PTR(-ENODEV);

871
	if (queue->peer_portid != nlportid)
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
		return ERR_PTR(-EPERM);

	return queue;
}

static struct nfqnl_msg_verdict_hdr*
verdicthdr_get(const struct nlattr * const nfqa[])
{
	struct nfqnl_msg_verdict_hdr *vhdr;
	unsigned int verdict;

	if (!nfqa[NFQA_VERDICT_HDR])
		return NULL;

	vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
887 888
	verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
	if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
889 890 891 892 893 894 895 896 897 898 899 900 901 902
		return NULL;
	return vhdr;
}

static int nfq_id_after(unsigned int id, unsigned int max)
{
	return (int)(id - max) > 0;
}

static int
nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
		   const struct nlmsghdr *nlh,
		   const struct nlattr * const nfqa[])
{
903
	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
904 905 906 907 908 909 910
	struct nf_queue_entry *entry, *tmp;
	unsigned int verdict, maxid;
	struct nfqnl_msg_verdict_hdr *vhdr;
	struct nfqnl_instance *queue;
	LIST_HEAD(batch_list);
	u16 queue_num = ntohs(nfmsg->res_id);

911 912 913 914 915
	struct net *net = sock_net(ctnl);
	struct nfnl_queue_net *q = nfnl_queue_pernet(net);

	queue = verdict_instance_lookup(q, queue_num,
					NETLINK_CB(skb).portid);
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
	if (IS_ERR(queue))
		return PTR_ERR(queue);

	vhdr = verdicthdr_get(nfqa);
	if (!vhdr)
		return -EINVAL;

	verdict = ntohl(vhdr->verdict);
	maxid = ntohl(vhdr->id);

	spin_lock_bh(&queue->lock);

	list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
		if (nfq_id_after(entry->id, maxid))
			break;
		__dequeue_entry(queue, entry);
		list_add_tail(&entry->list, &batch_list);
	}

	spin_unlock_bh(&queue->lock);

	if (list_empty(&batch_list))
		return -ENOENT;

	list_for_each_entry_safe(entry, tmp, &batch_list, list) {
		if (nfqa[NFQA_MARK])
			entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
		nf_reinject(entry, verdict);
	}
	return 0;
}

948 949
static int
nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
950 951
		   const struct nlmsghdr *nlh,
		   const struct nlattr * const nfqa[])
952
{
953
	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
954 955 956 957 958
	u_int16_t queue_num = ntohs(nfmsg->res_id);

	struct nfqnl_msg_verdict_hdr *vhdr;
	struct nfqnl_instance *queue;
	unsigned int verdict;
959
	struct nf_queue_entry *entry;
960 961
	enum ip_conntrack_info uninitialized_var(ctinfo);
	struct nf_conn *ct = NULL;
962

963 964
	struct net *net = sock_net(ctnl);
	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
965

966 967 968 969
	queue = instance_lookup(q, queue_num);
	if (!queue)
		queue = verdict_instance_lookup(q, queue_num,
						NETLINK_CB(skb).portid);
970 971
	if (IS_ERR(queue))
		return PTR_ERR(queue);
972

973 974
	vhdr = verdicthdr_get(nfqa);
	if (!vhdr)
975
		return -EINVAL;
976 977 978

	verdict = ntohl(vhdr->verdict);

979
	entry = find_dequeue_entry(queue, ntohl(vhdr->id));
980 981
	if (entry == NULL)
		return -ENOENT;
982

983
	if (nfqa[NFQA_CT]) {
984
		ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
985 986 987 988 989 990
		if (ct && nfqa[NFQA_EXP]) {
			nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
					    NETLINK_CB(skb).portid,
					    nlmsg_report(nlh));
		}
	}
991

992
	if (nfqa[NFQA_PAYLOAD]) {
993 994 995
		u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
		int diff = payload_len - entry->skb->len;

996
		if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
997
				 payload_len, entry, diff) < 0)
998
			verdict = NF_DROP;
999

1000
		if (ct)
1001
			nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
1002 1003
	}

1004
	if (nfqa[NFQA_MARK])
1005
		entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1006

1007
	nf_reinject(entry, verdict);
1008 1009 1010 1011 1012
	return 0;
}

static int
nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
1013 1014
		  const struct nlmsghdr *nlh,
		  const struct nlattr * const nfqa[])
1015 1016 1017 1018
{
	return -ENOTSUPP;
}

1019 1020 1021
static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
	[NFQA_CFG_CMD]		= { .len = sizeof(struct nfqnl_msg_config_cmd) },
	[NFQA_CFG_PARAMS]	= { .len = sizeof(struct nfqnl_msg_config_params) },
1022 1023
};

1024
static const struct nf_queue_handler nfqh = {
1025 1026 1027
	.outfn	= &nfqnl_enqueue_packet,
};

1028 1029
static int
nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
1030 1031
		  const struct nlmsghdr *nlh,
		  const struct nlattr * const nfqa[])
1032
{
1033
	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1034 1035
	u_int16_t queue_num = ntohs(nfmsg->res_id);
	struct nfqnl_instance *queue;
1036
	struct nfqnl_msg_config_cmd *cmd = NULL;
1037 1038
	struct net *net = sock_net(ctnl);
	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1039
	int ret = 0;
1040

1041 1042 1043
	if (nfqa[NFQA_CFG_CMD]) {
		cmd = nla_data(nfqa[NFQA_CFG_CMD]);

1044
		/* Obsolete commands without queue context */
1045
		switch (cmd->command) {
1046 1047
		case NFQNL_CFG_CMD_PF_BIND: return 0;
		case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1048 1049 1050 1051
		}
	}

	rcu_read_lock();
1052
	queue = instance_lookup(q, queue_num);
1053
	if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1054
		ret = -EPERM;
1055
		goto err_out_unlock;
1056 1057
	}

1058
	if (cmd != NULL) {
1059 1060
		switch (cmd->command) {
		case NFQNL_CFG_CMD_BIND:
1061 1062 1063 1064
			if (queue) {
				ret = -EBUSY;
				goto err_out_unlock;
			}
1065 1066
			queue = instance_create(q, queue_num,
						NETLINK_CB(skb).portid);
1067 1068
			if (IS_ERR(queue)) {
				ret = PTR_ERR(queue);
1069 1070
				goto err_out_unlock;
			}
1071 1072
			break;
		case NFQNL_CFG_CMD_UNBIND:
1073 1074 1075 1076
			if (!queue) {
				ret = -ENODEV;
				goto err_out_unlock;
			}
1077
			instance_destroy(q, queue);
1078 1079 1080 1081 1082
			break;
		case NFQNL_CFG_CMD_PF_BIND:
		case NFQNL_CFG_CMD_PF_UNBIND:
			break;
		default:
1083
			ret = -ENOTSUPP;
1084
			break;
1085 1086 1087
		}
	}

1088
	if (nfqa[NFQA_CFG_PARAMS]) {
1089 1090
		struct nfqnl_msg_config_params *params;

1091
		if (!queue) {
1092
			ret = -ENODEV;
1093
			goto err_out_unlock;
1094
		}
1095
		params = nla_data(nfqa[NFQA_CFG_PARAMS]);
1096 1097 1098 1099
		nfqnl_set_mode(queue, params->copy_mode,
				ntohl(params->copy_range));
	}

1100
	if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1101
		__be32 *queue_maxlen;
1102 1103 1104

		if (!queue) {
			ret = -ENODEV;
1105
			goto err_out_unlock;
1106
		}
1107
		queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1108 1109 1110 1111 1112
		spin_lock_bh(&queue->lock);
		queue->queue_maxlen = ntohl(*queue_maxlen);
		spin_unlock_bh(&queue->lock);
	}

K
Krishna Kumar 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
	if (nfqa[NFQA_CFG_FLAGS]) {
		__u32 flags, mask;

		if (!queue) {
			ret = -ENODEV;
			goto err_out_unlock;
		}

		if (!nfqa[NFQA_CFG_MASK]) {
			/* A mask is needed to specify which flags are being
			 * changed.
			 */
			ret = -EINVAL;
			goto err_out_unlock;
		}

		flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
		mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));

1132 1133 1134 1135 1136
		if (flags >= NFQA_CFG_F_MAX) {
			ret = -EOPNOTSUPP;
			goto err_out_unlock;
		}

K
Krishna Kumar 已提交
1137 1138 1139 1140 1141 1142
		spin_lock_bh(&queue->lock);
		queue->flags &= ~mask;
		queue->flags |= flags & mask;
		spin_unlock_bh(&queue->lock);
	}

1143 1144
err_out_unlock:
	rcu_read_unlock();
1145
	return ret;
1146 1147
}

1148
static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1149
	[NFQNL_MSG_PACKET]	= { .call_rcu = nfqnl_recv_unsupp,
1150
				    .attr_count = NFQA_MAX, },
1151
	[NFQNL_MSG_VERDICT]	= { .call_rcu = nfqnl_recv_verdict,
1152 1153
				    .attr_count = NFQA_MAX,
				    .policy = nfqa_verdict_policy },
1154
	[NFQNL_MSG_CONFIG]	= { .call = nfqnl_recv_config,
1155 1156
				    .attr_count = NFQA_CFG_MAX,
				    .policy = nfqa_cfg_policy },
1157 1158 1159
	[NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
				    .attr_count = NFQA_MAX,
				    .policy = nfqa_verdict_batch_policy },
1160 1161
};

1162
static const struct nfnetlink_subsystem nfqnl_subsys = {
1163 1164 1165 1166 1167 1168
	.name		= "nf_queue",
	.subsys_id	= NFNL_SUBSYS_QUEUE,
	.cb_count	= NFQNL_MSG_MAX,
	.cb		= nfqnl_cb,
};

1169 1170
#ifdef CONFIG_PROC_FS
struct iter_state {
1171
	struct seq_net_private p;
1172 1173 1174 1175 1176 1177
	unsigned int bucket;
};

static struct hlist_node *get_first(struct seq_file *seq)
{
	struct iter_state *st = seq->private;
1178 1179
	struct net *net;
	struct nfnl_queue_net *q;
1180 1181 1182 1183

	if (!st)
		return NULL;

1184 1185
	net = seq_file_net(seq);
	q = nfnl_queue_pernet(net);
1186
	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1187 1188
		if (!hlist_empty(&q->instance_table[st->bucket]))
			return q->instance_table[st->bucket].first;
1189 1190 1191 1192 1193 1194 1195
	}
	return NULL;
}

static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
{
	struct iter_state *st = seq->private;
1196
	struct net *net = seq_file_net(seq);
1197 1198 1199

	h = h->next;
	while (!h) {
1200 1201
		struct nfnl_queue_net *q;

1202 1203 1204
		if (++st->bucket >= INSTANCE_BUCKETS)
			return NULL;

1205 1206
		q = nfnl_queue_pernet(net);
		h = q->instance_table[st->bucket].first;
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	}
	return h;
}

static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
{
	struct hlist_node *head;
	head = get_first(seq);

	if (head)
		while (pos && (head = get_next(seq, head)))
			pos--;
	return pos ? NULL : head;
}

1222 1223
static void *seq_start(struct seq_file *s, loff_t *pos)
	__acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1224
{
1225 1226
	spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
	return get_idx(s, *pos);
1227 1228 1229 1230 1231 1232 1233 1234 1235
}

static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
{
	(*pos)++;
	return get_next(s, v);
}

static void seq_stop(struct seq_file *s, void *v)
1236
	__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1237
{
1238
	spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1239 1240 1241 1242 1243 1244
}

static int seq_show(struct seq_file *s, void *v)
{
	const struct nfqnl_instance *inst = v;

1245 1246 1247 1248 1249 1250 1251
	seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
		   inst->queue_num,
		   inst->peer_portid, inst->queue_total,
		   inst->copy_mode, inst->copy_range,
		   inst->queue_dropped, inst->queue_user_dropped,
		   inst->id_sequence, 1);
	return seq_has_overflowed(s);
1252 1253
}

1254
static const struct seq_operations nfqnl_seq_ops = {
1255 1256 1257 1258 1259 1260 1261 1262
	.start	= seq_start,
	.next	= seq_next,
	.stop	= seq_stop,
	.show	= seq_show,
};

static int nfqnl_open(struct inode *inode, struct file *file)
{
1263
	return seq_open_net(inode, file, &nfqnl_seq_ops,
1264
			sizeof(struct iter_state));
1265 1266
}

1267
static const struct file_operations nfqnl_file_ops = {
1268 1269 1270 1271
	.owner	 = THIS_MODULE,
	.open	 = nfqnl_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
1272
	.release = seq_release_net,
1273 1274 1275 1276
};

#endif /* PROC_FS */

1277
static int __net_init nfnl_queue_net_init(struct net *net)
1278
{
1279 1280
	unsigned int i;
	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1281

1282
	for (i = 0; i < INSTANCE_BUCKETS; i++)
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
		INIT_HLIST_HEAD(&q->instance_table[i]);

	spin_lock_init(&q->instances_lock);

#ifdef CONFIG_PROC_FS
	if (!proc_create("nfnetlink_queue", 0440,
			 net->nf.proc_netfilter, &nfqnl_file_ops))
		return -ENOMEM;
#endif
	return 0;
}

static void __net_exit nfnl_queue_net_exit(struct net *net)
{
1297
#ifdef CONFIG_PROC_FS
1298
	remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1299
#endif
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
}

static struct pernet_operations nfnl_queue_net_ops = {
	.init	= nfnl_queue_net_init,
	.exit	= nfnl_queue_net_exit,
	.id	= &nfnl_queue_net_id,
	.size	= sizeof(struct nfnl_queue_net),
};

static int __init nfnetlink_queue_init(void)
{
	int status = -ENOMEM;
1312

1313 1314 1315
	netlink_register_notifier(&nfqnl_rtnl_notifier);
	status = nfnetlink_subsys_register(&nfqnl_subsys);
	if (status < 0) {
1316
		pr_err("nf_queue: failed to create netlink socket\n");
1317 1318 1319
		goto cleanup_netlink_notifier;
	}

1320 1321 1322
	status = register_pernet_subsys(&nfnl_queue_net_ops);
	if (status < 0) {
		pr_err("nf_queue: failed to register pernet ops\n");
1323
		goto cleanup_subsys;
1324
	}
1325
	register_netdevice_notifier(&nfqnl_dev_notifier);
1326
	nf_register_queue_handler(&nfqh);
1327 1328
	return status;

1329
cleanup_subsys:
1330 1331 1332 1333 1334 1335
	nfnetlink_subsys_unregister(&nfqnl_subsys);
cleanup_netlink_notifier:
	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
	return status;
}

1336
static void __exit nfnetlink_queue_fini(void)
1337
{
1338
	nf_unregister_queue_handler();
1339
	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1340
	unregister_pernet_subsys(&nfnl_queue_net_ops);
1341 1342
	nfnetlink_subsys_unregister(&nfqnl_subsys);
	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1343 1344

	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1345 1346 1347 1348 1349 1350 1351
}

MODULE_DESCRIPTION("netfilter packet queue handler");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);

1352 1353
module_init(nfnetlink_queue_init);
module_exit(nfnetlink_queue_fini);