提交 d862a662 编写于 作者: P Patrick McHardy

netfilter: nf_conntrack: use is_vmalloc_addr()

Use is_vmalloc_addr() in nf_ct_free_hashtable() and get rid of
the vmalloc flags to indicate that a hash table has been allocated
using vmalloc().
Signed-off-by: NPatrick McHardy <kaber@trash.net>
上级 0134e89c
...@@ -202,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto); ...@@ -202,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto);
* Allocate a hashtable of hlist_head (if nulls == 0), * Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1) * or hlist_nulls_head (if nulls == 1)
*/ */
extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls); extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size); extern void nf_ct_free_hashtable(void *hash, unsigned int size);
extern struct nf_conntrack_tuple_hash * extern struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone, __nf_conntrack_find(struct net *net, u16 zone,
......
...@@ -28,8 +28,6 @@ struct netns_ct { ...@@ -28,8 +28,6 @@ struct netns_ct {
struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *acct_sysctl_header;
struct ctl_table_header *event_sysctl_header; struct ctl_table_header *event_sysctl_header;
#endif #endif
int hash_vmalloc;
int expect_vmalloc;
char *slabname; char *slabname;
}; };
#endif #endif
...@@ -43,7 +43,6 @@ struct netns_ipv4 { ...@@ -43,7 +43,6 @@ struct netns_ipv4 {
struct xt_table *nat_table; struct xt_table *nat_table;
struct hlist_head *nat_bysource; struct hlist_head *nat_bysource;
unsigned int nat_htable_size; unsigned int nat_htable_size;
int nat_vmalloced;
#endif #endif
int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_all;
......
...@@ -682,8 +682,7 @@ static int __net_init nf_nat_net_init(struct net *net) ...@@ -682,8 +682,7 @@ static int __net_init nf_nat_net_init(struct net *net)
{ {
/* Leave them the same for the moment. */ /* Leave them the same for the moment. */
net->ipv4.nat_htable_size = net->ct.htable_size; net->ipv4.nat_htable_size = net->ct.htable_size;
net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
&net->ipv4.nat_vmalloced, 0);
if (!net->ipv4.nat_bysource) if (!net->ipv4.nat_bysource)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
...@@ -705,8 +704,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) ...@@ -705,8 +704,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{ {
nf_ct_iterate_cleanup(net, &clean_nat, NULL); nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu(); synchronize_rcu();
nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
net->ipv4.nat_htable_size);
} }
static struct pernet_operations nf_nat_net_ops = { static struct pernet_operations nf_nat_net_ops = {
......
...@@ -1202,9 +1202,9 @@ static int kill_all(struct nf_conn *i, void *data) ...@@ -1202,9 +1202,9 @@ static int kill_all(struct nf_conn *i, void *data)
return 1; return 1;
} }
void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) void nf_ct_free_hashtable(void *hash, unsigned int size)
{ {
if (vmalloced) if (is_vmalloc_addr(hash))
vfree(hash); vfree(hash);
else else
free_pages((unsigned long)hash, free_pages((unsigned long)hash,
...@@ -1271,8 +1271,7 @@ static void nf_conntrack_cleanup_net(struct net *net) ...@@ -1271,8 +1271,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
goto i_see_dead_people; goto i_see_dead_people;
} }
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
net->ct.htable_size);
nf_conntrack_ecache_fini(net); nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net); nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net); nf_conntrack_expect_fini(net);
...@@ -1301,21 +1300,18 @@ void nf_conntrack_cleanup(struct net *net) ...@@ -1301,21 +1300,18 @@ void nf_conntrack_cleanup(struct net *net)
} }
} }
void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{ {
struct hlist_nulls_head *hash; struct hlist_nulls_head *hash;
unsigned int nr_slots, i; unsigned int nr_slots, i;
size_t sz; size_t sz;
*vmalloced = 0;
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
sz = nr_slots * sizeof(struct hlist_nulls_head); sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz)); get_order(sz));
if (!hash) { if (!hash) {
*vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL); PAGE_KERNEL);
...@@ -1331,7 +1327,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); ...@@ -1331,7 +1327,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
{ {
int i, bucket, vmalloced, old_vmalloced; int i, bucket;
unsigned int hashsize, old_size; unsigned int hashsize, old_size;
struct hlist_nulls_head *hash, *old_hash; struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple_hash *h;
...@@ -1348,7 +1344,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) ...@@ -1348,7 +1344,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hashsize) if (!hashsize)
return -EINVAL; return -EINVAL;
hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); hash = nf_ct_alloc_hashtable(&hashsize, 1);
if (!hash) if (!hash)
return -ENOMEM; return -ENOMEM;
...@@ -1370,15 +1366,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) ...@@ -1370,15 +1366,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
} }
} }
old_size = init_net.ct.htable_size; old_size = init_net.ct.htable_size;
old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash; old_hash = init_net.ct.hash;
init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash; init_net.ct.hash = hash;
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); nf_ct_free_hashtable(old_hash, old_size);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
...@@ -1491,8 +1485,7 @@ static int nf_conntrack_init_net(struct net *net) ...@@ -1491,8 +1485,7 @@ static int nf_conntrack_init_net(struct net *net)
} }
net->ct.htable_size = nf_conntrack_htable_size; net->ct.htable_size = nf_conntrack_htable_size;
net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
&net->ct.hash_vmalloc, 1);
if (!net->ct.hash) { if (!net->ct.hash) {
ret = -ENOMEM; ret = -ENOMEM;
printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
...@@ -1515,8 +1508,7 @@ static int nf_conntrack_init_net(struct net *net) ...@@ -1515,8 +1508,7 @@ static int nf_conntrack_init_net(struct net *net)
err_acct: err_acct:
nf_conntrack_expect_fini(net); nf_conntrack_expect_fini(net);
err_expect: err_expect:
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
net->ct.htable_size);
err_hash: err_hash:
kmem_cache_destroy(net->ct.nf_conntrack_cachep); kmem_cache_destroy(net->ct.nf_conntrack_cachep);
err_cache: err_cache:
......
...@@ -639,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net) ...@@ -639,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net)
} }
net->ct.expect_count = 0; net->ct.expect_count = 0;
net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
&net->ct.expect_vmalloc, 0);
if (net->ct.expect_hash == NULL) if (net->ct.expect_hash == NULL)
goto err1; goto err1;
...@@ -662,8 +661,7 @@ int nf_conntrack_expect_init(struct net *net) ...@@ -662,8 +661,7 @@ int nf_conntrack_expect_init(struct net *net)
if (net_eq(net, &init_net)) if (net_eq(net, &init_net))
kmem_cache_destroy(nf_ct_expect_cachep); kmem_cache_destroy(nf_ct_expect_cachep);
err2: err2:
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
nf_ct_expect_hsize);
err1: err1:
return err; return err;
} }
...@@ -675,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net) ...@@ -675,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net)
rcu_barrier(); /* Wait for call_rcu() before destroy */ rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep); kmem_cache_destroy(nf_ct_expect_cachep);
} }
nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
nf_ct_expect_hsize);
} }
...@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex); ...@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex);
static struct hlist_head *nf_ct_helper_hash __read_mostly; static struct hlist_head *nf_ct_helper_hash __read_mostly;
static unsigned int nf_ct_helper_hsize __read_mostly; static unsigned int nf_ct_helper_hsize __read_mostly;
static unsigned int nf_ct_helper_count __read_mostly; static unsigned int nf_ct_helper_count __read_mostly;
static int nf_ct_helper_vmalloc;
/* Stupid hash, but collision free for the default registrations of the /* Stupid hash, but collision free for the default registrations of the
...@@ -267,8 +266,7 @@ int nf_conntrack_helper_init(void) ...@@ -267,8 +266,7 @@ int nf_conntrack_helper_init(void)
int err; int err;
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
&nf_ct_helper_vmalloc, 0);
if (!nf_ct_helper_hash) if (!nf_ct_helper_hash)
return -ENOMEM; return -ENOMEM;
...@@ -279,14 +277,12 @@ int nf_conntrack_helper_init(void) ...@@ -279,14 +277,12 @@ int nf_conntrack_helper_init(void)
return 0; return 0;
err1: err1:
nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc, nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
nf_ct_helper_hsize);
return err; return err;
} }
void nf_conntrack_helper_fini(void) void nf_conntrack_helper_fini(void)
{ {
nf_ct_extend_unregister(&helper_extend); nf_ct_extend_unregister(&helper_extend);
nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc, nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
nf_ct_helper_hsize);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册