提交 3e86638e 编写于 作者: F Florian Westphal 提交者: Pablo Neira Ayuso

netfilter: conntrack: consider ct netns in early_drop logic

When iterating, skip conntrack entries living in a different netns.

We could ignore netns and kill some other non-assured one, but it
has two problems:

- a netns can kill non-assured conntracks in other namespace
- we would start to 'over-subscribe' the affected/overlimit netns.
Signed-off-by: NFlorian Westphal <fw@strlen.de>
Signed-off-by: NPablo Neira Ayuso <pablo@netfilter.org>
上级 56d52d48
...@@ -764,18 +764,20 @@ static noinline int early_drop(struct net *net, unsigned int _hash) ...@@ -764,18 +764,20 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
{ {
/* Use oldest entry, which is roughly LRU */ /* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct = NULL, *tmp; struct nf_conn *tmp;
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
unsigned int i = 0, cnt = 0; unsigned int i, hash, sequence;
int dropped = 0; struct nf_conn *ct = NULL;
unsigned int hash, sequence;
spinlock_t *lockp; spinlock_t *lockp;
bool ret = false;
i = 0;
local_bh_disable(); local_bh_disable();
restart: restart:
sequence = read_seqcount_begin(&nf_conntrack_generation); sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = scale_hash(_hash); for (; i < NF_CT_EVICTION_RANGE; i++) {
for (; i < nf_conntrack_htable_size; i++) { hash = scale_hash(_hash++);
lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
nf_conntrack_lock(lockp); nf_conntrack_lock(lockp);
if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
...@@ -785,35 +787,40 @@ static noinline int early_drop(struct net *net, unsigned int _hash) ...@@ -785,35 +787,40 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
hnnode) { hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h); tmp = nf_ct_tuplehash_to_ctrack(h);
if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
!nf_ct_is_dying(tmp) && if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
atomic_inc_not_zero(&tmp->ct_general.use)) { !net_eq(nf_ct_net(tmp), net) ||
nf_ct_is_dying(tmp))
continue;
if (atomic_inc_not_zero(&tmp->ct_general.use)) {
ct = tmp; ct = tmp;
break; break;
} }
cnt++;
} }
hash = (hash + 1) % nf_conntrack_htable_size;
spin_unlock(lockp); spin_unlock(lockp);
if (ct)
if (ct || cnt >= NF_CT_EVICTION_RANGE)
break; break;
} }
local_bh_enable(); local_bh_enable();
if (!ct) if (!ct)
return dropped; return false;
if (del_timer(&ct->timeout)) { /* kill only if in same netns -- might have moved due to
* SLAB_DESTROY_BY_RCU rules
*/
if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
if (nf_ct_delete(ct, 0, 0)) { if (nf_ct_delete(ct, 0, 0)) {
dropped = 1;
NF_CT_STAT_INC_ATOMIC(net, early_drop); NF_CT_STAT_INC_ATOMIC(net, early_drop);
ret = true;
} }
} }
nf_ct_put(ct); nf_ct_put(ct);
return dropped; return ret;
} }
static struct nf_conn * static struct nf_conn *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册