提交 807192de 编写于 作者: D David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for your net tree:

1) rbtree lookup from control plane returns the left-hand side element
   of the range when the interval end flag is set on.

2) osf extension is not supported from the input path, reject this from
   the control plane, from Fernando Fernandez Mancera.

3) xt_TEE is leaving output interface unset due to a recent incorrect
   netns rework, from Taehee Yoo.

4) xt_TEE allows to select an interface which does not belong to this
   netnamespace, from Taehee Yoo.

5) Zero private extension area in nft_compat, just like we do in x_tables,
   otherwise we leak kernel memory to userspace.

6) Missing .checkentry and .destroy entries in new DNAT extensions breaks
   it since we never load nf_conntrack dependencies, from Paolo Abeni.

7) Do not remove flowtable hook from netns exit path, the netdevice handler
   already deals with this, also from Taehee Yoo.

8) Only cleanup flowtable entries that reside in this netnamespace, also
   from Taehee Yoo.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -465,14 +465,17 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init); ...@@ -465,14 +465,17 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data) static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
{ {
struct net_device *dev = data; struct net_device *dev = data;
struct flow_offload_entry *e;
e = container_of(flow, struct flow_offload_entry, flow);
if (!dev) { if (!dev) {
flow_offload_teardown(flow); flow_offload_teardown(flow);
return; return;
} }
if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
if (flow->tuplehash[0].tuple.iifidx == dev->ifindex || (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
flow->tuplehash[1].tuple.iifidx == dev->ifindex) flow->tuplehash[1].tuple.iifidx == dev->ifindex))
flow_offload_dead(flow); flow_offload_dead(flow);
} }
......
...@@ -7280,9 +7280,6 @@ static void __nft_release_tables(struct net *net) ...@@ -7280,9 +7280,6 @@ static void __nft_release_tables(struct net *net)
list_for_each_entry(chain, &table->chains, list) list_for_each_entry(chain, &table->chains, list)
nf_tables_unregister_hook(net, table, chain); nf_tables_unregister_hook(net, table, chain);
list_for_each_entry(flowtable, &table->flowtables, list)
nf_unregister_net_hooks(net, flowtable->ops,
flowtable->ops_len);
/* No packets are walking on these chains anymore. */ /* No packets are walking on these chains anymore. */
ctx.table = table; ctx.table = table;
list_for_each_entry(chain, &table->chains, list) { list_for_each_entry(chain, &table->chains, list) {
......
...@@ -290,6 +290,24 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) ...@@ -290,6 +290,24 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
module_put(target->me); module_put(target->me);
} }
static int nft_extension_dump_info(struct sk_buff *skb, int attr,
const void *info,
unsigned int size, unsigned int user_size)
{
unsigned int info_size, aligned_size = XT_ALIGN(size);
struct nlattr *nla;
nla = nla_reserve(skb, attr, aligned_size);
if (!nla)
return -1;
info_size = user_size ? : size;
memcpy(nla_data(nla), info, info_size);
memset(nla_data(nla) + info_size, 0, aligned_size - info_size);
return 0;
}
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
{ {
const struct xt_target *target = expr->ops->data; const struct xt_target *target = expr->ops->data;
...@@ -297,7 +315,8 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) ...@@ -297,7 +315,8 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) || if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) || nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info)) nft_extension_dump_info(skb, NFTA_TARGET_INFO, info,
target->targetsize, target->usersize))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
...@@ -532,7 +551,8 @@ static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr, ...@@ -532,7 +551,8 @@ static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) || nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info)) nft_extension_dump_info(skb, NFTA_MATCH_INFO, info,
match->matchsize, match->usersize))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
......
...@@ -82,6 +82,15 @@ static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr) ...@@ -82,6 +82,15 @@ static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr)
return -1; return -1;
} }
static int nft_osf_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_FORWARD));
}
static struct nft_expr_type nft_osf_type; static struct nft_expr_type nft_osf_type;
static const struct nft_expr_ops nft_osf_op = { static const struct nft_expr_ops nft_osf_op = {
.eval = nft_osf_eval, .eval = nft_osf_eval,
...@@ -89,6 +98,7 @@ static const struct nft_expr_ops nft_osf_op = { ...@@ -89,6 +98,7 @@ static const struct nft_expr_ops nft_osf_op = {
.init = nft_osf_init, .init = nft_osf_init,
.dump = nft_osf_dump, .dump = nft_osf_dump,
.type = &nft_osf_type, .type = &nft_osf_type,
.validate = nft_osf_validate,
}; };
static struct nft_expr_type nft_osf_type __read_mostly = { static struct nft_expr_type nft_osf_type __read_mostly = {
......
...@@ -135,9 +135,12 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, ...@@ -135,9 +135,12 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
d = memcmp(this, key, set->klen); d = memcmp(this, key, set->klen);
if (d < 0) { if (d < 0) {
parent = rcu_dereference_raw(parent->rb_left); parent = rcu_dereference_raw(parent->rb_left);
if (!(flags & NFT_SET_ELEM_INTERVAL_END))
interval = rbe; interval = rbe;
} else if (d > 0) { } else if (d > 0) {
parent = rcu_dereference_raw(parent->rb_right); parent = rcu_dereference_raw(parent->rb_right);
if (flags & NFT_SET_ELEM_INTERVAL_END)
interval = rbe;
} else { } else {
if (!nft_set_elem_active(&rbe->ext, genmask)) if (!nft_set_elem_active(&rbe->ext, genmask))
parent = rcu_dereference_raw(parent->rb_left); parent = rcu_dereference_raw(parent->rb_left);
...@@ -154,7 +157,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, ...@@ -154,7 +157,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
if (set->flags & NFT_SET_INTERVAL && interval != NULL && if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
nft_set_elem_active(&interval->ext, genmask) && nft_set_elem_active(&interval->ext, genmask) &&
!nft_rbtree_interval_end(interval)) { ((!nft_rbtree_interval_end(interval) &&
!(flags & NFT_SET_ELEM_INTERVAL_END)) ||
(nft_rbtree_interval_end(interval) &&
(flags & NFT_SET_ELEM_INTERVAL_END)))) {
*elem = interval; *elem = interval;
return true; return true;
} }
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/route.h> #include <linux/route.h>
#include <linux/netfilter/x_tables.h> #include <linux/netfilter/x_tables.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/route.h> #include <net/route.h>
#include <net/netfilter/ipv4/nf_dup_ipv4.h> #include <net/netfilter/ipv4/nf_dup_ipv4.h>
#include <net/netfilter/ipv6/nf_dup_ipv6.h> #include <net/netfilter/ipv6/nf_dup_ipv6.h>
...@@ -25,8 +27,15 @@ struct xt_tee_priv { ...@@ -25,8 +27,15 @@ struct xt_tee_priv {
int oif; int oif;
}; };
static unsigned int tee_net_id __read_mostly;
static const union nf_inet_addr tee_zero_address; static const union nf_inet_addr tee_zero_address;
struct tee_net {
struct list_head priv_list;
/* lock protects the priv_list */
struct mutex lock;
};
static unsigned int static unsigned int
tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{ {
...@@ -51,17 +60,16 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -51,17 +60,16 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
} }
#endif #endif
static DEFINE_MUTEX(priv_list_mutex);
static LIST_HEAD(priv_list);
static int tee_netdev_event(struct notifier_block *this, unsigned long event, static int tee_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr) void *ptr)
{ {
struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct tee_net *tn = net_generic(net, tee_net_id);
struct xt_tee_priv *priv; struct xt_tee_priv *priv;
mutex_lock(&priv_list_mutex); mutex_lock(&tn->lock);
list_for_each_entry(priv, &priv_list, list) { list_for_each_entry(priv, &tn->priv_list, list) {
switch (event) { switch (event) {
case NETDEV_REGISTER: case NETDEV_REGISTER:
if (!strcmp(dev->name, priv->tginfo->oif)) if (!strcmp(dev->name, priv->tginfo->oif))
...@@ -79,13 +87,14 @@ static int tee_netdev_event(struct notifier_block *this, unsigned long event, ...@@ -79,13 +87,14 @@ static int tee_netdev_event(struct notifier_block *this, unsigned long event,
break; break;
} }
} }
mutex_unlock(&priv_list_mutex); mutex_unlock(&tn->lock);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static int tee_tg_check(const struct xt_tgchk_param *par) static int tee_tg_check(const struct xt_tgchk_param *par)
{ {
struct tee_net *tn = net_generic(par->net, tee_net_id);
struct xt_tee_tginfo *info = par->targinfo; struct xt_tee_tginfo *info = par->targinfo;
struct xt_tee_priv *priv; struct xt_tee_priv *priv;
...@@ -95,6 +104,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par) ...@@ -95,6 +104,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
return -EINVAL; return -EINVAL;
if (info->oif[0]) { if (info->oif[0]) {
struct net_device *dev;
if (info->oif[sizeof(info->oif)-1] != '\0') if (info->oif[sizeof(info->oif)-1] != '\0')
return -EINVAL; return -EINVAL;
...@@ -106,9 +117,14 @@ static int tee_tg_check(const struct xt_tgchk_param *par) ...@@ -106,9 +117,14 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
priv->oif = -1; priv->oif = -1;
info->priv = priv; info->priv = priv;
mutex_lock(&priv_list_mutex); dev = dev_get_by_name(par->net, info->oif);
list_add(&priv->list, &priv_list); if (dev) {
mutex_unlock(&priv_list_mutex); priv->oif = dev->ifindex;
dev_put(dev);
}
mutex_lock(&tn->lock);
list_add(&priv->list, &tn->priv_list);
mutex_unlock(&tn->lock);
} else } else
info->priv = NULL; info->priv = NULL;
...@@ -118,12 +134,13 @@ static int tee_tg_check(const struct xt_tgchk_param *par) ...@@ -118,12 +134,13 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
static void tee_tg_destroy(const struct xt_tgdtor_param *par) static void tee_tg_destroy(const struct xt_tgdtor_param *par)
{ {
struct tee_net *tn = net_generic(par->net, tee_net_id);
struct xt_tee_tginfo *info = par->targinfo; struct xt_tee_tginfo *info = par->targinfo;
if (info->priv) { if (info->priv) {
mutex_lock(&priv_list_mutex); mutex_lock(&tn->lock);
list_del(&info->priv->list); list_del(&info->priv->list);
mutex_unlock(&priv_list_mutex); mutex_unlock(&tn->lock);
kfree(info->priv); kfree(info->priv);
} }
static_key_slow_dec(&xt_tee_enabled); static_key_slow_dec(&xt_tee_enabled);
...@@ -156,6 +173,21 @@ static struct xt_target tee_tg_reg[] __read_mostly = { ...@@ -156,6 +173,21 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
#endif #endif
}; };
static int __net_init tee_net_init(struct net *net)
{
struct tee_net *tn = net_generic(net, tee_net_id);
INIT_LIST_HEAD(&tn->priv_list);
mutex_init(&tn->lock);
return 0;
}
static struct pernet_operations tee_net_ops = {
.init = tee_net_init,
.id = &tee_net_id,
.size = sizeof(struct tee_net),
};
static struct notifier_block tee_netdev_notifier = { static struct notifier_block tee_netdev_notifier = {
.notifier_call = tee_netdev_event, .notifier_call = tee_netdev_event,
}; };
...@@ -164,22 +196,32 @@ static int __init tee_tg_init(void) ...@@ -164,22 +196,32 @@ static int __init tee_tg_init(void)
{ {
int ret; int ret;
ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); ret = register_pernet_subsys(&tee_net_ops);
if (ret) if (ret < 0)
return ret; return ret;
ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
if (ret < 0)
goto cleanup_subsys;
ret = register_netdevice_notifier(&tee_netdev_notifier); ret = register_netdevice_notifier(&tee_netdev_notifier);
if (ret) { if (ret < 0)
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); goto unregister_targets;
return ret;
}
return 0; return 0;
unregister_targets:
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
cleanup_subsys:
unregister_pernet_subsys(&tee_net_ops);
return ret;
} }
static void __exit tee_tg_exit(void) static void __exit tee_tg_exit(void)
{ {
unregister_netdevice_notifier(&tee_netdev_notifier); unregister_netdevice_notifier(&tee_netdev_notifier);
xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg)); xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
unregister_pernet_subsys(&tee_net_ops);
} }
module_init(tee_tg_init); module_init(tee_tg_init);
......
...@@ -216,6 +216,8 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = { ...@@ -216,6 +216,8 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
{ {
.name = "DNAT", .name = "DNAT",
.revision = 2, .revision = 2,
.checkentry = xt_nat_checkentry,
.destroy = xt_nat_destroy,
.target = xt_dnat_target_v2, .target = xt_dnat_target_v2,
.targetsize = sizeof(struct nf_nat_range2), .targetsize = sizeof(struct nf_nat_range2),
.table = "nat", .table = "nat",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册