提交 a173e550 编写于 作者: D David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next

Pablo Neira Ayuso says:

====================
Netfilter updates for net-next

The following patchset contains netfilter updates for net-next, they are:

1) Add the reject expression for the nf_tables bridge family, this
   allows us to send explicit reject (TCP RST / ICMP dest unrech) to
   the packets matching a rule.

2) Simplify and consolidate the nf_tables set dumping logic. This uses
   netlink control->data to filter out depending on the request.

3) Perform garbage collection in xt_hashlimit using a workqueue instead
   of a timer, which is problematic when many entries are in place in
   the tables, from Eric Dumazet.

4) Remove leftover code from the removed ulog target support, from
   Paul Bolle.

5) Dump unmodified flags in the netfilter packet accounting when resetting
   counters, so userspace knows that a counter was in overquota situation,
   from Alexey Perevalov.

6) Fix wrong usage of the bitwise functions in nfnetlink_acct, also from
   Alexey.

7) Fix a crash when adding new set element with an empty NFTA_SET_ELEM_LIST
   attribute.

This patchset also includes a couple of cleanups for xt_LED from
Duan Jiong and for nf_conntrack_ipv4 (using coccinelle) from
Himangi Saraogi.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -15,11 +15,5 @@ struct netns_xt { ...@@ -15,11 +15,5 @@ struct netns_xt {
struct ebt_table *frame_filter; struct ebt_table *frame_filter;
struct ebt_table *frame_nat; struct ebt_table *frame_nat;
#endif #endif
#if IS_ENABLED(CONFIG_IP_NF_TARGET_ULOG)
bool ulog_warn_deprecated;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_EBT_ULOG)
bool ebt_ulog_warn_deprecated;
#endif
}; };
#endif #endif
...@@ -14,6 +14,12 @@ config NFT_BRIDGE_META ...@@ -14,6 +14,12 @@ config NFT_BRIDGE_META
help help
Add support for bridge dedicated meta key. Add support for bridge dedicated meta key.
config NFT_BRIDGE_REJECT
tristate "Netfilter nf_tables bridge reject support"
depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
help
Add support to reject packets.
config NF_LOG_BRIDGE config NF_LOG_BRIDGE
tristate "Bridge packet logging" tristate "Bridge packet logging"
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o obj-$(CONFIG_NFT_BRIDGE_META) += nft_meta_bridge.o
obj-$(CONFIG_NFT_BRIDGE_REJECT) += nft_reject_bridge.o
# packet logging # packet logging
obj-$(CONFIG_NF_LOG_BRIDGE) += nf_log_bridge.o obj-$(CONFIG_NF_LOG_BRIDGE) += nf_log_bridge.o
...@@ -36,5 +37,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o ...@@ -36,5 +37,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
# watchers # watchers
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o
/*
* Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
static void nft_reject_bridge_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
switch (eth_hdr(pkt->skb)->h_proto) {
case htons(ETH_P_IP):
return nft_reject_ipv4_eval(expr, data, pkt);
case htons(ETH_P_IPV6):
return nft_reject_ipv6_eval(expr, data, pkt);
default:
/* No explicit way to reject this protocol, drop it. */
data[NFT_REG_VERDICT].verdict = NF_DROP;
break;
}
}
static struct nft_expr_type nft_reject_bridge_type;
static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
};
static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
.family = NFPROTO_BRIDGE,
.name = "reject",
.ops = &nft_reject_bridge_ops,
.policy = nft_reject_policy,
.maxattr = NFTA_REJECT_MAX,
.owner = THIS_MODULE,
};
static int __init nft_reject_bridge_module_init(void)
{
return nft_register_expr(&nft_reject_bridge_type);
}
static void __exit nft_reject_bridge_module_exit(void)
{
nft_unregister_expr(&nft_reject_bridge_type);
}
module_init(nft_reject_bridge_module_init);
module_exit(nft_reject_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
...@@ -57,7 +57,6 @@ obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o ...@@ -57,7 +57,6 @@ obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
# generic ARP tables # generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
......
...@@ -358,7 +358,7 @@ static struct nf_sockopt_ops so_getorigdst = { ...@@ -358,7 +358,7 @@ static struct nf_sockopt_ops so_getorigdst = {
.pf = PF_INET, .pf = PF_INET,
.get_optmin = SO_ORIGINAL_DST, .get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1, .get_optmax = SO_ORIGINAL_DST+1,
.get = &getorigdst, .get = getorigdst,
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
......
...@@ -2247,80 +2247,7 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx, ...@@ -2247,80 +2247,7 @@ static int nf_tables_set_notify(const struct nft_ctx *ctx,
return err; return err;
} }
static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb, static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
struct netlink_callback *cb)
{
const struct nft_set *set;
unsigned int idx = 0, s_idx = cb->args[0];
if (cb->args[1])
return skb->len;
rcu_read_lock();
cb->seq = ctx->net->nft.base_seq;
list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
NLM_F_MULTI) < 0) {
cb->args[0] = idx;
goto done;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
cb->args[1] = 1;
done:
rcu_read_unlock();
return skb->len;
}
static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nft_set *set;
unsigned int idx, s_idx = cb->args[0];
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
if (cb->args[1])
return skb->len;
rcu_read_lock();
cb->seq = ctx->net->nft.base_seq;
list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
if (cur_table) {
if (cur_table != table)
continue;
cur_table = NULL;
}
ctx->table = table;
idx = 0;
list_for_each_entry_rcu(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
NLM_F_MULTI) < 0) {
cb->args[0] = idx;
cb->args[2] = (unsigned long) table;
goto done;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
cb->args[1] = 1;
done:
rcu_read_unlock();
return skb->len;
}
static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
struct netlink_callback *cb)
{ {
const struct nft_set *set; const struct nft_set *set;
unsigned int idx, s_idx = cb->args[0]; unsigned int idx, s_idx = cb->args[0];
...@@ -2328,6 +2255,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, ...@@ -2328,6 +2255,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2]; struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
int cur_family = cb->args[3]; int cur_family = cb->args[3];
struct nft_ctx *ctx = cb->data, ctx_set;
if (cb->args[1]) if (cb->args[1])
return skb->len; return skb->len;
...@@ -2336,28 +2264,34 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, ...@@ -2336,28 +2264,34 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
cb->seq = net->nft.base_seq; cb->seq = net->nft.base_seq;
list_for_each_entry_rcu(afi, &net->nft.af_info, list) { list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
if (ctx->afi && ctx->afi != afi)
continue;
if (cur_family) { if (cur_family) {
if (afi->family != cur_family) if (afi->family != cur_family)
continue; continue;
cur_family = 0; cur_family = 0;
} }
list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(table, &afi->tables, list) {
if (ctx->table && ctx->table != table)
continue;
if (cur_table) { if (cur_table) {
if (cur_table != table) if (cur_table != table)
continue; continue;
cur_table = NULL; cur_table = NULL;
} }
ctx->table = table;
ctx->afi = afi;
idx = 0; idx = 0;
list_for_each_entry_rcu(set, &ctx->table->sets, list) { list_for_each_entry_rcu(set, &table->sets, list) {
if (idx < s_idx) if (idx < s_idx)
goto cont; goto cont;
if (nf_tables_fill_set(skb, ctx, set,
ctx_set = *ctx;
ctx_set.table = table;
ctx_set.afi = afi;
if (nf_tables_fill_set(skb, &ctx_set, set,
NFT_MSG_NEWSET, NFT_MSG_NEWSET,
NLM_F_MULTI) < 0) { NLM_F_MULTI) < 0) {
cb->args[0] = idx; cb->args[0] = idx;
...@@ -2379,31 +2313,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb, ...@@ -2379,31 +2313,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
return skb->len; return skb->len;
} }
static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb) static int nf_tables_dump_sets_done(struct netlink_callback *cb)
{ {
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); kfree(cb->data);
struct nlattr *nla[NFTA_SET_MAX + 1]; return 0;
struct nft_ctx ctx;
int err, ret;
err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX,
nft_set_policy);
if (err < 0)
return err;
err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla);
if (err < 0)
return err;
if (ctx.table == NULL) {
if (ctx.afi == NULL)
ret = nf_tables_dump_sets_all(&ctx, skb, cb);
else
ret = nf_tables_dump_sets_family(&ctx, skb, cb);
} else
ret = nf_tables_dump_sets_table(&ctx, skb, cb);
return ret;
} }
#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */ #define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */
...@@ -2426,7 +2339,17 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, ...@@ -2426,7 +2339,17 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
if (nlh->nlmsg_flags & NLM_F_DUMP) { if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = { struct netlink_dump_control c = {
.dump = nf_tables_dump_sets, .dump = nf_tables_dump_sets,
.done = nf_tables_dump_sets_done,
}; };
struct nft_ctx *ctx_dump;
ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_KERNEL);
if (ctx_dump == NULL)
return -ENOMEM;
*ctx_dump = ctx;
c.data = ctx_dump;
return netlink_dump_start(nlsk, skb, nlh, &c); return netlink_dump_start(nlsk, skb, nlh, &c);
} }
...@@ -3150,6 +3073,9 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, ...@@ -3150,6 +3073,9 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
struct nft_ctx ctx; struct nft_ctx ctx;
int rem, err = 0; int rem, err = 0;
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;
err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
if (err < 0) if (err < 0)
return err; return err;
...@@ -3233,6 +3159,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, ...@@ -3233,6 +3159,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
struct nft_ctx ctx; struct nft_ctx ctx;
int rem, err = 0; int rem, err = 0;
if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
return -EINVAL;
err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -41,6 +41,7 @@ struct nf_acct { ...@@ -41,6 +41,7 @@ struct nf_acct {
}; };
#define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES) #define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
#define NFACCT_OVERQUOTA_BIT 2 /* NFACCT_F_OVERQUOTA */
static int static int
nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
...@@ -77,7 +78,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, ...@@ -77,7 +78,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
smp_mb__before_atomic(); smp_mb__before_atomic();
/* reset overquota flag if quota is enabled. */ /* reset overquota flag if quota is enabled. */
if ((matching->flags & NFACCT_F_QUOTA)) if ((matching->flags & NFACCT_F_QUOTA))
clear_bit(NFACCT_F_OVERQUOTA, &matching->flags); clear_bit(NFACCT_OVERQUOTA_BIT,
&matching->flags);
return 0; return 0;
} }
return -EBUSY; return -EBUSY;
...@@ -129,6 +131,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, ...@@ -129,6 +131,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
struct nfgenmsg *nfmsg; struct nfgenmsg *nfmsg;
unsigned int flags = portid ? NLM_F_MULTI : 0; unsigned int flags = portid ? NLM_F_MULTI : 0;
u64 pkts, bytes; u64 pkts, bytes;
u32 old_flags;
event |= NFNL_SUBSYS_ACCT << 8; event |= NFNL_SUBSYS_ACCT << 8;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags); nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
...@@ -143,12 +146,13 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, ...@@ -143,12 +146,13 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
if (nla_put_string(skb, NFACCT_NAME, acct->name)) if (nla_put_string(skb, NFACCT_NAME, acct->name))
goto nla_put_failure; goto nla_put_failure;
old_flags = acct->flags;
if (type == NFNL_MSG_ACCT_GET_CTRZERO) { if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
pkts = atomic64_xchg(&acct->pkts, 0); pkts = atomic64_xchg(&acct->pkts, 0);
bytes = atomic64_xchg(&acct->bytes, 0); bytes = atomic64_xchg(&acct->bytes, 0);
smp_mb__before_atomic(); smp_mb__before_atomic();
if (acct->flags & NFACCT_F_QUOTA) if (acct->flags & NFACCT_F_QUOTA)
clear_bit(NFACCT_F_OVERQUOTA, &acct->flags); clear_bit(NFACCT_OVERQUOTA_BIT, &acct->flags);
} else { } else {
pkts = atomic64_read(&acct->pkts); pkts = atomic64_read(&acct->pkts);
bytes = atomic64_read(&acct->bytes); bytes = atomic64_read(&acct->bytes);
...@@ -160,7 +164,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, ...@@ -160,7 +164,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
if (acct->flags & NFACCT_F_QUOTA) { if (acct->flags & NFACCT_F_QUOTA) {
u64 *quota = (u64 *)acct->data; u64 *quota = (u64 *)acct->data;
if (nla_put_be32(skb, NFACCT_FLAGS, htonl(acct->flags)) || if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) ||
nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota))) nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota)))
goto nla_put_failure; goto nla_put_failure;
} }
...@@ -412,7 +416,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct) ...@@ -412,7 +416,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
ret = now > *quota; ret = now > *quota;
if (now >= *quota && if (now >= *quota &&
!test_and_set_bit(NFACCT_F_OVERQUOTA, &nfacct->flags)) { !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
nfnl_overquota_report(nfacct); nfnl_overquota_report(nfacct);
} }
......
...@@ -133,9 +133,7 @@ static int led_tg_check(const struct xt_tgchk_param *par) ...@@ -133,9 +133,7 @@ static int led_tg_check(const struct xt_tgchk_param *par)
err = led_trigger_register(&ledinternal->netfilter_led_trigger); err = led_trigger_register(&ledinternal->netfilter_led_trigger);
if (err) { if (err) {
pr_warning("led_trigger_register() failed\n"); pr_err("Trigger name is already in use.\n");
if (err == -EEXIST)
pr_warning("Trigger name is already in use.\n");
goto exit_alloc; goto exit_alloc;
} }
......
...@@ -104,7 +104,7 @@ struct xt_hashlimit_htable { ...@@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
spinlock_t lock; /* lock for list_head */ spinlock_t lock; /* lock for list_head */
u_int32_t rnd; /* random seed for hash */ u_int32_t rnd; /* random seed for hash */
unsigned int count; /* number entries in table */ unsigned int count; /* number entries in table */
struct timer_list timer; /* timer for gc */ struct delayed_work gc_work;
/* seq_file stuff */ /* seq_file stuff */
struct proc_dir_entry *pde; struct proc_dir_entry *pde;
...@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) ...@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
call_rcu_bh(&ent->rcu, dsthash_free_rcu); call_rcu_bh(&ent->rcu, dsthash_free_rcu);
ht->count--; ht->count--;
} }
static void htable_gc(unsigned long htlong); static void htable_gc(struct work_struct *work);
static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
u_int8_t family) u_int8_t family)
...@@ -273,9 +273,9 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, ...@@ -273,9 +273,9 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
} }
hinfo->net = net; hinfo->net = net;
setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
add_timer(&hinfo->timer); msecs_to_jiffies(hinfo->cfg.gc_interval));
hlist_add_head(&hinfo->node, &hashlimit_net->htables); hlist_add_head(&hinfo->node, &hashlimit_net->htables);
...@@ -300,29 +300,30 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, ...@@ -300,29 +300,30 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
{ {
unsigned int i; unsigned int i;
/* lock hash table and iterate over it */
spin_lock_bh(&ht->lock);
for (i = 0; i < ht->cfg.size; i++) { for (i = 0; i < ht->cfg.size; i++) {
struct dsthash_ent *dh; struct dsthash_ent *dh;
struct hlist_node *n; struct hlist_node *n;
spin_lock_bh(&ht->lock);
hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) { hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
if ((*select)(ht, dh)) if ((*select)(ht, dh))
dsthash_free(ht, dh); dsthash_free(ht, dh);
} }
spin_unlock_bh(&ht->lock);
cond_resched();
} }
spin_unlock_bh(&ht->lock);
} }
/* hash table garbage collector, run by timer */ static void htable_gc(struct work_struct *work)
static void htable_gc(unsigned long htlong)
{ {
struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong; struct xt_hashlimit_htable *ht;
ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
htable_selective_cleanup(ht, select_gc); htable_selective_cleanup(ht, select_gc);
/* re-add the timer accordingly */ queue_delayed_work(system_power_efficient_wq,
ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval); &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
add_timer(&ht->timer);
} }
static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
...@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo) ...@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
static void htable_destroy(struct xt_hashlimit_htable *hinfo) static void htable_destroy(struct xt_hashlimit_htable *hinfo)
{ {
del_timer_sync(&hinfo->timer); cancel_delayed_work_sync(&hinfo->gc_work);
htable_remove_proc_entry(hinfo); htable_remove_proc_entry(hinfo);
htable_selective_cleanup(hinfo, select_all); htable_selective_cleanup(hinfo, select_all);
kfree(hinfo->name); kfree(hinfo->name);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册