提交 e3b37f11 编写于 作者: A Aaron Conole 提交者: Pablo Neira Ayuso

netfilter: replace list_head with single linked list

The netfilter hook list never uses the prev pointer, and so can be trimmed to
be a simple singly-linked list.

In addition to having a more light weight structure for hook traversal,
struct net becomes 5568 bytes (down from 6400) and struct net_device becomes
2176 bytes (down from 2240).
Signed-off-by: NAaron Conole <aconole@bytheb.org>
Signed-off-by: NFlorian Westphal <fw@strlen.de>
Signed-off-by: NPablo Neira Ayuso <pablo@netfilter.org>
上级 54f17bbc
...@@ -1783,7 +1783,7 @@ struct net_device { ...@@ -1783,7 +1783,7 @@ struct net_device {
#endif #endif
struct netdev_queue __rcu *ingress_queue; struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS #ifdef CONFIG_NETFILTER_INGRESS
struct list_head nf_hooks_ingress; struct nf_hook_entry __rcu *nf_hooks_ingress;
#endif #endif
unsigned char broadcast[MAX_ADDR_LEN]; unsigned char broadcast[MAX_ADDR_LEN];
......
...@@ -55,12 +55,34 @@ struct nf_hook_state { ...@@ -55,12 +55,34 @@ struct nf_hook_state {
struct net_device *out; struct net_device *out;
struct sock *sk; struct sock *sk;
struct net *net; struct net *net;
struct list_head *hook_list; struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *); int (*okfn)(struct net *, struct sock *, struct sk_buff *);
}; };
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
u_int8_t pf;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
};
struct nf_hook_entry {
struct nf_hook_entry __rcu *next;
struct nf_hook_ops ops;
const struct nf_hook_ops *orig_ops;
};
static inline void nf_hook_state_init(struct nf_hook_state *p, static inline void nf_hook_state_init(struct nf_hook_state *p,
struct list_head *hook_list, struct nf_hook_entry *hook_entry,
unsigned int hook, unsigned int hook,
int thresh, u_int8_t pf, int thresh, u_int8_t pf,
struct net_device *indev, struct net_device *indev,
...@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p, ...@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->out = outdev; p->out = outdev;
p->sk = sk; p->sk = sk;
p->net = net; p->net = net;
p->hook_list = hook_list; RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn; p->okfn = okfn;
} }
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
u_int8_t pf;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
};
struct nf_sockopt_ops { struct nf_sockopt_ops {
struct list_head list; struct list_head list;
...@@ -161,7 +168,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, ...@@ -161,7 +168,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *), int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh) int thresh)
{ {
struct list_head *hook_list; struct nf_hook_entry *hook_head;
int ret = 1;
#ifdef HAVE_JUMP_LABEL #ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) && if (__builtin_constant_p(pf) &&
...@@ -170,22 +178,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, ...@@ -170,22 +178,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
return 1; return 1;
#endif #endif
hook_list = &net->nf.hooks[pf][hook]; rcu_read_lock();
hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
if (!list_empty(hook_list)) { if (hook_head) {
struct nf_hook_state state; struct nf_hook_state state;
int ret;
/* We may already have this, but read-locks nest anyway */ nf_hook_state_init(&state, hook_head, hook, thresh,
rcu_read_lock();
nf_hook_state_init(&state, hook_list, hook, thresh,
pf, indev, outdev, sk, net, okfn); pf, indev, outdev, sk, net, okfn);
ret = nf_hook_slow(skb, &state); ret = nf_hook_slow(skb, &state);
rcu_read_unlock();
return ret;
} }
return 1; rcu_read_unlock();
return ret;
} }
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
......
...@@ -11,23 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb) ...@@ -11,23 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false; return false;
#endif #endif
return !list_empty(&skb->dev->nf_hooks_ingress); return rcu_access_pointer(skb->dev->nf_hooks_ingress);
} }
/* caller must hold rcu_read_lock */ /* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb) static inline int nf_hook_ingress(struct sk_buff *skb)
{ {
struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state; struct nf_hook_state state;
nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, /* Must recheck the ingress hook head, in the event it became NULL
NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, * after the check in nf_hook_ingress_active evaluated to true.
skb->dev, NULL, NULL, dev_net(skb->dev), NULL); */
if (unlikely(!e))
return 0;
nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
NFPROTO_NETDEV, skb->dev, NULL, NULL,
dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state); return nf_hook_slow(skb, &state);
} }
static inline void nf_hook_ingress_init(struct net_device *dev) static inline void nf_hook_ingress_init(struct net_device *dev)
{ {
INIT_LIST_HEAD(&dev->nf_hooks_ingress); RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
} }
#else /* CONFIG_NETFILTER_INGRESS */ #else /* CONFIG_NETFILTER_INGRESS */
static inline int nf_hook_ingress_active(struct sk_buff *skb) static inline int nf_hook_ingress_active(struct sk_buff *skb)
......
...@@ -11,7 +11,6 @@ struct nf_queue_entry { ...@@ -11,7 +11,6 @@ struct nf_queue_entry {
struct sk_buff *skb; struct sk_buff *skb;
unsigned int id; unsigned int id;
struct nf_hook_ops *elem;
struct nf_hook_state state; struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */ u16 size; /* sizeof(entry) + saved route keys */
...@@ -25,7 +24,7 @@ struct nf_queue_handler { ...@@ -25,7 +24,7 @@ struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry, int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum); unsigned int queuenum);
void (*nf_hook_drop)(struct net *net, void (*nf_hook_drop)(struct net *net,
struct nf_hook_ops *ops); const struct nf_hook_entry *hooks);
}; };
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
......
...@@ -16,6 +16,6 @@ struct netns_nf { ...@@ -16,6 +16,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header; struct ctl_table_header *nf_log_dir_header;
#endif #endif
struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
}; };
#endif #endif
...@@ -1002,28 +1002,21 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, ...@@ -1002,28 +1002,21 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
int (*okfn)(struct net *, struct sock *, int (*okfn)(struct net *, struct sock *,
struct sk_buff *)) struct sk_buff *))
{ {
struct nf_hook_ops *elem; struct nf_hook_entry *elem;
struct nf_hook_state state; struct nf_hook_state state;
struct list_head *head;
int ret; int ret;
head = &net->nf.hooks[NFPROTO_BRIDGE][hook]; elem = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]);
list_for_each_entry_rcu(elem, head, list) { while (elem && (elem->ops.priority <= NF_BR_PRI_BRNF))
struct nf_hook_ops *next; elem = rcu_dereference(elem->next);
next = list_entry_rcu(list_next_rcu(&elem->list), if (!elem)
struct nf_hook_ops, list);
if (next->priority <= NF_BR_PRI_BRNF)
continue;
}
if (&elem->list == head)
return okfn(net, sk, skb); return okfn(net, sk, skb);
/* We may already have this, but read-locks nest anyway */ /* We may already have this, but read-locks nest anyway */
rcu_read_lock(); rcu_read_lock();
nf_hook_state_init(&state, head, hook, NF_BR_PRI_BRNF + 1, nf_hook_state_init(&state, elem, hook, NF_BR_PRI_BRNF + 1,
NFPROTO_BRIDGE, indev, outdev, sk, net, okfn); NFPROTO_BRIDGE, indev, outdev, sk, net, okfn);
ret = nf_hook_slow(skb, &state); ret = nf_hook_slow(skb, &state);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/sock.h> #include <net/sock.h>
...@@ -61,33 +62,50 @@ EXPORT_SYMBOL(nf_hooks_needed); ...@@ -61,33 +62,50 @@ EXPORT_SYMBOL(nf_hooks_needed);
#endif #endif
static DEFINE_MUTEX(nf_hook_mutex); static DEFINE_MUTEX(nf_hook_mutex);
#define nf_entry_dereference(e) \
rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
static struct list_head *nf_find_hook_list(struct net *net, static struct nf_hook_entry *nf_hook_entry_head(struct net *net,
const struct nf_hook_ops *reg) const struct nf_hook_ops *reg)
{ {
struct list_head *hook_list = NULL; struct nf_hook_entry *hook_head = NULL;
if (reg->pf != NFPROTO_NETDEV) if (reg->pf != NFPROTO_NETDEV)
hook_list = &net->nf.hooks[reg->pf][reg->hooknum]; hook_head = nf_entry_dereference(net->nf.hooks[reg->pf]
[reg->hooknum]);
else if (reg->hooknum == NF_NETDEV_INGRESS) { else if (reg->hooknum == NF_NETDEV_INGRESS) {
#ifdef CONFIG_NETFILTER_INGRESS #ifdef CONFIG_NETFILTER_INGRESS
if (reg->dev && dev_net(reg->dev) == net) if (reg->dev && dev_net(reg->dev) == net)
hook_list = &reg->dev->nf_hooks_ingress; hook_head =
nf_entry_dereference(
reg->dev->nf_hooks_ingress);
#endif #endif
} }
return hook_list; return hook_head;
} }
struct nf_hook_entry { /* must hold nf_hook_mutex */
const struct nf_hook_ops *orig_ops; static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg,
struct nf_hook_ops ops; struct nf_hook_entry *entry)
}; {
switch (reg->pf) {
case NFPROTO_NETDEV:
/* We already checked in nf_register_net_hook() that this is
* used from ingress.
*/
rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry);
break;
default:
rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum],
entry);
break;
}
}
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
{ {
struct list_head *hook_list; struct nf_hook_entry *hooks_entry;
struct nf_hook_entry *entry; struct nf_hook_entry *entry;
struct nf_hook_ops *elem;
if (reg->pf == NFPROTO_NETDEV && if (reg->pf == NFPROTO_NETDEV &&
(reg->hooknum != NF_NETDEV_INGRESS || (reg->hooknum != NF_NETDEV_INGRESS ||
...@@ -100,19 +118,30 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) ...@@ -100,19 +118,30 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
entry->orig_ops = reg; entry->orig_ops = reg;
entry->ops = *reg; entry->ops = *reg;
entry->next = NULL;
mutex_lock(&nf_hook_mutex);
hooks_entry = nf_hook_entry_head(net, reg);
hook_list = nf_find_hook_list(net, reg); if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) {
if (!hook_list) { /* This is the case where we need to insert at the head */
kfree(entry); entry->next = hooks_entry;
return -ENOENT; hooks_entry = NULL;
} }
mutex_lock(&nf_hook_mutex); while (hooks_entry &&
list_for_each_entry(elem, hook_list, list) { reg->priority >= hooks_entry->orig_ops->priority &&
if (reg->priority < elem->priority) nf_entry_dereference(hooks_entry->next)) {
break; hooks_entry = nf_entry_dereference(hooks_entry->next);
}
if (hooks_entry) {
entry->next = nf_entry_dereference(hooks_entry->next);
rcu_assign_pointer(hooks_entry->next, entry);
} else {
nf_set_hooks_head(net, reg, entry);
} }
list_add_rcu(&entry->ops.list, elem->list.prev);
mutex_unlock(&nf_hook_mutex); mutex_unlock(&nf_hook_mutex);
#ifdef CONFIG_NETFILTER_INGRESS #ifdef CONFIG_NETFILTER_INGRESS
if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
...@@ -127,24 +156,33 @@ EXPORT_SYMBOL(nf_register_net_hook); ...@@ -127,24 +156,33 @@ EXPORT_SYMBOL(nf_register_net_hook);
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{ {
struct list_head *hook_list; struct nf_hook_entry *hooks_entry;
struct nf_hook_entry *entry;
struct nf_hook_ops *elem;
hook_list = nf_find_hook_list(net, reg);
if (!hook_list)
return;
mutex_lock(&nf_hook_mutex); mutex_lock(&nf_hook_mutex);
list_for_each_entry(elem, hook_list, list) { hooks_entry = nf_hook_entry_head(net, reg);
entry = container_of(elem, struct nf_hook_entry, ops); if (hooks_entry->orig_ops == reg) {
if (entry->orig_ops == reg) { nf_set_hooks_head(net, reg,
list_del_rcu(&entry->ops.list); nf_entry_dereference(hooks_entry->next));
break; goto unlock;
}
while (hooks_entry && nf_entry_dereference(hooks_entry->next)) {
struct nf_hook_entry *next =
nf_entry_dereference(hooks_entry->next);
struct nf_hook_entry *nnext;
if (next->orig_ops != reg) {
hooks_entry = next;
continue;
} }
nnext = nf_entry_dereference(next->next);
rcu_assign_pointer(hooks_entry->next, nnext);
hooks_entry = next;
break;
} }
unlock:
mutex_unlock(&nf_hook_mutex); mutex_unlock(&nf_hook_mutex);
if (&elem->list == hook_list) { if (!hooks_entry) {
WARN(1, "nf_unregister_net_hook: hook not found!\n"); WARN(1, "nf_unregister_net_hook: hook not found!\n");
return; return;
} }
...@@ -156,10 +194,10 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) ...@@ -156,10 +194,10 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif #endif
synchronize_net(); synchronize_net();
nf_queue_nf_hook_drop(net, &entry->ops); nf_queue_nf_hook_drop(net, hooks_entry);
/* other cpu might still process nfqueue verdict that used reg */ /* other cpu might still process nfqueue verdict that used reg */
synchronize_net(); synchronize_net();
kfree(entry); kfree(hooks_entry);
} }
EXPORT_SYMBOL(nf_unregister_net_hook); EXPORT_SYMBOL(nf_unregister_net_hook);
...@@ -258,10 +296,9 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) ...@@ -258,10 +296,9 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
} }
EXPORT_SYMBOL(nf_unregister_hooks); EXPORT_SYMBOL(nf_unregister_hooks);
unsigned int nf_iterate(struct list_head *head, unsigned int nf_iterate(struct sk_buff *skb,
struct sk_buff *skb,
struct nf_hook_state *state, struct nf_hook_state *state,
struct nf_hook_ops **elemp) struct nf_hook_entry **entryp)
{ {
unsigned int verdict; unsigned int verdict;
...@@ -269,20 +306,23 @@ unsigned int nf_iterate(struct list_head *head, ...@@ -269,20 +306,23 @@ unsigned int nf_iterate(struct list_head *head,
* The caller must not block between calls to this * The caller must not block between calls to this
* function because of risk of continuing from deleted element. * function because of risk of continuing from deleted element.
*/ */
list_for_each_entry_continue_rcu((*elemp), head, list) { while (*entryp) {
if (state->thresh > (*elemp)->priority) if (state->thresh > (*entryp)->ops.priority) {
*entryp = rcu_dereference((*entryp)->next);
continue; continue;
}
/* Optimization: we don't need to hold module /* Optimization: we don't need to hold module
reference here, since function can't sleep. --RR */ reference here, since function can't sleep. --RR */
repeat: repeat:
verdict = (*elemp)->hook((*elemp)->priv, skb, state); verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state);
if (verdict != NF_ACCEPT) { if (verdict != NF_ACCEPT) {
#ifdef CONFIG_NETFILTER_DEBUG #ifdef CONFIG_NETFILTER_DEBUG
if (unlikely((verdict & NF_VERDICT_MASK) if (unlikely((verdict & NF_VERDICT_MASK)
> NF_MAX_VERDICT)) { > NF_MAX_VERDICT)) {
NFDEBUG("Evil return from %p(%u).\n", NFDEBUG("Evil return from %p(%u).\n",
(*elemp)->hook, state->hook); (*entryp)->ops.hook, state->hook);
*entryp = rcu_dereference((*entryp)->next);
continue; continue;
} }
#endif #endif
...@@ -290,6 +330,7 @@ unsigned int nf_iterate(struct list_head *head, ...@@ -290,6 +330,7 @@ unsigned int nf_iterate(struct list_head *head,
return verdict; return verdict;
goto repeat; goto repeat;
} }
*entryp = rcu_dereference((*entryp)->next);
} }
return NF_ACCEPT; return NF_ACCEPT;
} }
...@@ -299,13 +340,13 @@ unsigned int nf_iterate(struct list_head *head, ...@@ -299,13 +340,13 @@ unsigned int nf_iterate(struct list_head *head,
* -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
{ {
struct nf_hook_ops *elem; struct nf_hook_entry *entry;
unsigned int verdict; unsigned int verdict;
int ret = 0; int ret = 0;
elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list); entry = rcu_dereference(state->hook_entries);
next_hook: next_hook:
verdict = nf_iterate(state->hook_list, skb, state, &elem); verdict = nf_iterate(skb, state, &entry);
if (verdict == NF_ACCEPT || verdict == NF_STOP) { if (verdict == NF_ACCEPT || verdict == NF_STOP) {
ret = 1; ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
...@@ -314,8 +355,10 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) ...@@ -314,8 +355,10 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
if (ret == 0) if (ret == 0)
ret = -EPERM; ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
int err = nf_queue(skb, elem, state, int err;
verdict >> NF_VERDICT_QBITS);
RCU_INIT_POINTER(state->hook_entries, entry);
err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
if (err < 0) { if (err < 0) {
if (err == -ESRCH && if (err == -ESRCH &&
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
...@@ -442,7 +485,7 @@ static int __net_init netfilter_net_init(struct net *net) ...@@ -442,7 +485,7 @@ static int __net_init netfilter_net_init(struct net *net)
for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
for (h = 0; h < NF_MAX_HOOKS; h++) for (h = 0; h < NF_MAX_HOOKS; h++)
INIT_LIST_HEAD(&net->nf.hooks[i][h]); RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
......
...@@ -13,13 +13,13 @@ ...@@ -13,13 +13,13 @@
/* core.c */ /* core.c */
unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb, unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
struct nf_hook_state *state, struct nf_hook_ops **elemp); struct nf_hook_entry **entryp);
/* nf_queue.c */ /* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
struct nf_hook_state *state, unsigned int queuenum); unsigned int queuenum);
void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops); void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
int __init netfilter_queue_init(void); int __init netfilter_queue_init(void);
/* nf_log.c */ /* nf_log.c */
......
...@@ -96,14 +96,14 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry) ...@@ -96,14 +96,14 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
} }
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
{ {
const struct nf_queue_handler *qh; const struct nf_queue_handler *qh;
rcu_read_lock(); rcu_read_lock();
qh = rcu_dereference(net->nf.queue_handler); qh = rcu_dereference(net->nf.queue_handler);
if (qh) if (qh)
qh->nf_hook_drop(net, ops); qh->nf_hook_drop(net, entry);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -112,7 +112,6 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) ...@@ -112,7 +112,6 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
* through nf_reinject(). * through nf_reinject().
*/ */
int nf_queue(struct sk_buff *skb, int nf_queue(struct sk_buff *skb,
struct nf_hook_ops *elem,
struct nf_hook_state *state, struct nf_hook_state *state,
unsigned int queuenum) unsigned int queuenum)
{ {
...@@ -141,7 +140,6 @@ int nf_queue(struct sk_buff *skb, ...@@ -141,7 +140,6 @@ int nf_queue(struct sk_buff *skb,
*entry = (struct nf_queue_entry) { *entry = (struct nf_queue_entry) {
.skb = skb, .skb = skb,
.elem = elem,
.state = *state, .state = *state,
.size = sizeof(*entry) + afinfo->route_key_size, .size = sizeof(*entry) + afinfo->route_key_size,
}; };
...@@ -165,11 +163,15 @@ int nf_queue(struct sk_buff *skb, ...@@ -165,11 +163,15 @@ int nf_queue(struct sk_buff *skb,
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{ {
struct nf_hook_entry *hook_entry;
struct sk_buff *skb = entry->skb; struct sk_buff *skb = entry->skb;
struct nf_hook_ops *elem = entry->elem;
const struct nf_afinfo *afinfo; const struct nf_afinfo *afinfo;
struct nf_hook_ops *elem;
int err; int err;
hook_entry = rcu_dereference(entry->state.hook_entries);
elem = &hook_entry->ops;
nf_queue_entry_release_refs(entry); nf_queue_entry_release_refs(entry);
/* Continue traversal iff userspace said ok... */ /* Continue traversal iff userspace said ok... */
...@@ -186,8 +188,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ...@@ -186,8 +188,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
if (verdict == NF_ACCEPT) { if (verdict == NF_ACCEPT) {
next_hook: next_hook:
verdict = nf_iterate(entry->state.hook_list, verdict = nf_iterate(skb, &entry->state, &hook_entry);
skb, &entry->state, &elem);
} }
switch (verdict & NF_VERDICT_MASK) { switch (verdict & NF_VERDICT_MASK) {
...@@ -198,7 +199,8 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ...@@ -198,7 +199,8 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_enable(); local_bh_enable();
break; break;
case NF_QUEUE: case NF_QUEUE:
err = nf_queue(skb, elem, &entry->state, RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
err = nf_queue(skb, &entry->state,
verdict >> NF_VERDICT_QBITS); verdict >> NF_VERDICT_QBITS);
if (err < 0) { if (err < 0) {
if (err == -ESRCH && if (err == -ESRCH &&
......
...@@ -917,12 +917,14 @@ static struct notifier_block nfqnl_dev_notifier = { ...@@ -917,12 +917,14 @@ static struct notifier_block nfqnl_dev_notifier = {
.notifier_call = nfqnl_rcv_dev_event, .notifier_call = nfqnl_rcv_dev_event,
}; };
static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr) static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long entry_ptr)
{ {
return entry->elem == (struct nf_hook_ops *)ops_ptr; return rcu_access_pointer(entry->state.hook_entries) ==
(struct nf_hook_entry *)entry_ptr;
} }
static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook) static void nfqnl_nf_hook_drop(struct net *net,
const struct nf_hook_entry *hook)
{ {
struct nfnl_queue_net *q = nfnl_queue_pernet(net); struct nfnl_queue_net *q = nfnl_queue_pernet(net);
int i; int i;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册