nf_conntrack_ecache.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Event cache for netfilter. */

/* (C) 1999-2001 Paul `Rusty' Russell
 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
21
#include <linux/slab.h>
22
#include <linux/export.h>
23 24 25

#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
26
#include <net/netfilter/nf_conntrack_extend.h>
27

28
static DEFINE_MUTEX(nf_ct_ecache_mutex);
29

30 31
/* deliver cached events and clear cache entry - must be called with locally
 * disabled softirqs */
32
void nf_ct_deliver_cached_events(struct nf_conn *ct)
33
{
34
	struct net *net = nf_ct_net(ct);
35
	unsigned long events, missed;
36
	struct nf_ct_event_notifier *notify;
37
	struct nf_conntrack_ecache *e;
38 39
	struct nf_ct_event item;
	int ret;
40 41

	rcu_read_lock();
42
	notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
43 44 45
	if (notify == NULL)
		goto out_unlock;

46 47 48 49 50 51
	e = nf_ct_ecache_find(ct);
	if (e == NULL)
		goto out_unlock;

	events = xchg(&e->cache, 0);

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
	if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
		goto out_unlock;

	/* We make a copy of the missed event cache without taking
	 * the lock, thus we may send missed events twice. However,
	 * this does not harm and it happens very rarely. */
	missed = e->missed;

	if (!((events | missed) & e->ctmask))
		goto out_unlock;

	item.ct = ct;
	item.pid = 0;
	item.report = 0;

	ret = notify->fcn(events | missed, &item);

	if (likely(ret >= 0 && !missed))
		goto out_unlock;

	spin_lock_bh(&ct->lock);
	if (ret < 0)
		e->missed |= events;
	else
		e->missed &= ~missed;
	spin_unlock_bh(&ct->lock);
78

79 80
out_unlock:
	rcu_read_unlock();
81
}
82
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
83

84 85
int nf_conntrack_register_notifier(struct net *net,
				   struct nf_ct_event_notifier *new)
86
{
87
	int ret;
88
	struct nf_ct_event_notifier *notify;
89 90

	mutex_lock(&nf_ct_ecache_mutex);
91
	notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
92 93
					   lockdep_is_held(&nf_ct_ecache_mutex));
	if (notify != NULL) {
94 95 96
		ret = -EBUSY;
		goto out_unlock;
	}
97
	rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
98
	ret = 0;
99 100 101 102

out_unlock:
	mutex_unlock(&nf_ct_ecache_mutex);
	return ret;
103 104 105
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);

106 107
void nf_conntrack_unregister_notifier(struct net *net,
				      struct nf_ct_event_notifier *new)
108
{
109 110
	struct nf_ct_event_notifier *notify;

111
	mutex_lock(&nf_ct_ecache_mutex);
112
	notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
113 114
					   lockdep_is_held(&nf_ct_ecache_mutex));
	BUG_ON(notify != new);
115
	RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
116
	mutex_unlock(&nf_ct_ecache_mutex);
117 118 119
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);

120 121
int nf_ct_expect_register_notifier(struct net *net,
				   struct nf_exp_event_notifier *new)
122
{
123
	int ret;
124
	struct nf_exp_event_notifier *notify;
125 126

	mutex_lock(&nf_ct_ecache_mutex);
127
	notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
128 129
					   lockdep_is_held(&nf_ct_ecache_mutex));
	if (notify != NULL) {
130 131 132
		ret = -EBUSY;
		goto out_unlock;
	}
133
	rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
134
	ret = 0;
135 136 137 138

out_unlock:
	mutex_unlock(&nf_ct_ecache_mutex);
	return ret;
139
}
140
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
141

142 143
void nf_ct_expect_unregister_notifier(struct net *net,
				      struct nf_exp_event_notifier *new)
144
{
145 146
	struct nf_exp_event_notifier *notify;

147
	mutex_lock(&nf_ct_ecache_mutex);
148
	notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
149 150
					   lockdep_is_held(&nf_ct_ecache_mutex));
	BUG_ON(notify != new);
151
	RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
152
	mutex_unlock(&nf_ct_ecache_mutex);
153
}
154
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
155 156 157

#define NF_CT_EVENTS_DEFAULT 1
static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
158
static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
159 160 161 162 163 164 165 166 167 168

#ifdef CONFIG_SYSCTL
static struct ctl_table event_sysctl_table[] = {
	{
		.procname	= "nf_conntrack_events",
		.data		= &init_net.ct.sysctl_events,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
169 170 171 172 173 174 175
	{
		.procname	= "nf_conntrack_events_retry_timeout",
		.data		= &init_net.ct.sysctl_events_retry_timeout,
		.maxlen		= sizeof(unsigned int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec_jiffies,
	},
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	{}
};
#endif /* CONFIG_SYSCTL */

static struct nf_ct_ext_type event_extend __read_mostly = {
	.len	= sizeof(struct nf_conntrack_ecache),
	.align	= __alignof__(struct nf_conntrack_ecache),
	.id	= NF_CT_EXT_ECACHE,
};

#ifdef CONFIG_SYSCTL
static int nf_conntrack_event_init_sysctl(struct net *net)
{
	struct ctl_table *table;

	table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
			GFP_KERNEL);
	if (!table)
		goto out;

	table[0].data = &net->ct.sysctl_events;
197
	table[1].data = &net->ct.sysctl_events_retry_timeout;
198 199

	net->ct.event_sysctl_header =
200
		register_net_sysctl(net, "net/netfilter", table);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	if (!net->ct.event_sysctl_header) {
		printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
		goto out_register;
	}
	return 0;

out_register:
	kfree(table);
out:
	return -ENOMEM;
}

static void nf_conntrack_event_fini_sysctl(struct net *net)
{
	struct ctl_table *table;

	table = net->ct.event_sysctl_header->ctl_table_arg;
	unregister_net_sysctl_table(net->ct.event_sysctl_header);
	kfree(table);
}
#else
static int nf_conntrack_event_init_sysctl(struct net *net)
{
	return 0;
}

static void nf_conntrack_event_fini_sysctl(struct net *net)
{
}
#endif /* CONFIG_SYSCTL */

int nf_conntrack_ecache_init(struct net *net)
{
	int ret;

	net->ct.sysctl_events = nf_ct_events;
237
	net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

	if (net_eq(net, &init_net)) {
		ret = nf_ct_extend_register(&event_extend);
		if (ret < 0) {
			printk(KERN_ERR "nf_ct_event: Unable to register "
					"event extension.\n");
			goto out_extend_register;
		}
	}

	ret = nf_conntrack_event_init_sysctl(net);
	if (ret < 0)
		goto out_sysctl;

	return 0;

out_sysctl:
	if (net_eq(net, &init_net))
		nf_ct_extend_unregister(&event_extend);
out_extend_register:
	return ret;
}

void nf_conntrack_ecache_fini(struct net *net)
{
	nf_conntrack_event_fini_sysctl(net);
	if (net_eq(net, &init_net))
		nf_ct_extend_unregister(&event_extend);
}