提交 743b8bb6 编写于 作者: D David S. Miller

Merge branch 'act-ife-misc'

Alexander Aring says:

====================
sched: act: ife: UAPI checks and performance tweaks

this patch series contains at first a patch which adds a check for
IFE_ENCODE and IFE_DECODE when a ife act gets created or updated and adding
handling of these cases only inside the act callback only.

The second patch use per-cpu counters and move the spinlock around so that
the spinlock is less being held in act callback.

The last patch use rcu for update parameters and also move the spinlock for
the same purpose as in patch 2.

Notes:
 - There is still a spinlock around for protecting the metalist and a
   rw-lock for another list. Should be migrated to a rcu list, ife
   possible.

 - I use still dereference in dump callback, so I think what I didn't
   got was what happened when rcu_assign_pointer will do when rcu read
   lock is held. I suppose the pointer will be updated, then we don't
   have any issue here.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -6,12 +6,18 @@
#include <linux/rtnetlink.h>
#include <linux/module.h>
struct tcf_ife_info {
struct tc_action common;
struct tcf_ife_params {
u8 eth_dst[ETH_ALEN];
u8 eth_src[ETH_ALEN];
u16 eth_type;
u16 flags;
struct rcu_head rcu;
};
struct tcf_ife_info {
struct tc_action common;
struct tcf_ife_params __rcu *params;
/* list of metaids allowed */
struct list_head metalist;
};
......
......@@ -406,10 +406,14 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
static void tcf_ife_cleanup(struct tc_action *a, int bind)
{
struct tcf_ife_info *ife = to_ife(a);
struct tcf_ife_params *p;
spin_lock_bh(&ife->tcf_lock);
_tcf_ife_cleanup(a, bind);
spin_unlock_bh(&ife->tcf_lock);
p = rcu_dereference_protected(ife->params, 1);
kfree_rcu(p, rcu);
}
/* under ife->tcf_lock for existing action */
......@@ -446,6 +450,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, ife_net_id);
struct nlattr *tb[TCA_IFE_MAX + 1];
struct nlattr *tb2[IFE_META_MAX + 1];
struct tcf_ife_params *p, *p_old;
struct tcf_ife_info *ife;
u16 ife_type = ETH_P_IFE;
struct tc_ife *parm;
......@@ -464,24 +469,41 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_IFE_PARMS]);
/* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
* they cannot run as the same time. Check on all other values which
* are not supported right now.
*/
if (parm->flags & ~IFE_ENCODE)
return -EINVAL;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
exists = tcf_idr_check(tn, parm->index, a, bind);
if (exists && bind)
if (exists && bind) {
kfree(p);
return 0;
}
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
bind, false);
if (ret)
bind, true);
if (ret) {
kfree(p);
return ret;
}
ret = ACT_P_CREATED;
} else {
tcf_idr_release(*a, bind);
if (!ovr)
if (!ovr) {
kfree(p);
return -EEXIST;
}
}
ife = to_ife(*a);
ife->flags = parm->flags;
p->flags = parm->flags;
if (parm->flags & IFE_ENCODE) {
if (tb[TCA_IFE_TYPE])
......@@ -492,24 +514,25 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
saddr = nla_data(tb[TCA_IFE_SMAC]);
}
if (exists)
spin_lock_bh(&ife->tcf_lock);
ife->tcf_action = parm->action;
if (parm->flags & IFE_ENCODE) {
if (daddr)
ether_addr_copy(ife->eth_dst, daddr);
ether_addr_copy(p->eth_dst, daddr);
else
eth_zero_addr(ife->eth_dst);
eth_zero_addr(p->eth_dst);
if (saddr)
ether_addr_copy(ife->eth_src, saddr);
ether_addr_copy(p->eth_src, saddr);
else
eth_zero_addr(ife->eth_src);
eth_zero_addr(p->eth_src);
ife->eth_type = ife_type;
p->eth_type = ife_type;
}
if (exists)
spin_lock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED)
INIT_LIST_HEAD(&ife->metalist);
......@@ -525,6 +548,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (exists)
spin_unlock_bh(&ife->tcf_lock);
kfree(p);
return err;
}
......@@ -545,6 +569,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (exists)
spin_unlock_bh(&ife->tcf_lock);
kfree(p);
return err;
}
}
......@@ -552,6 +577,11 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (exists)
spin_unlock_bh(&ife->tcf_lock);
p_old = rtnl_dereference(ife->params);
rcu_assign_pointer(ife->params, p);
if (p_old)
kfree_rcu(p_old, rcu);
if (ret == ACT_P_CREATED)
tcf_idr_insert(tn, *a);
......@@ -563,12 +593,13 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_ife_info *ife = to_ife(a);
struct tcf_ife_params *p = rtnl_dereference(ife->params);
struct tc_ife opt = {
.index = ife->tcf_index,
.refcnt = ife->tcf_refcnt - ref,
.bindcnt = ife->tcf_bindcnt - bind,
.action = ife->tcf_action,
.flags = ife->flags,
.flags = p->flags,
};
struct tcf_t t;
......@@ -579,17 +610,17 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
goto nla_put_failure;
if (!is_zero_ether_addr(ife->eth_dst)) {
if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst))
if (!is_zero_ether_addr(p->eth_dst)) {
if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
goto nla_put_failure;
}
if (!is_zero_ether_addr(ife->eth_src)) {
if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src))
if (!is_zero_ether_addr(p->eth_src)) {
if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
goto nla_put_failure;
}
if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type))
if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
goto nla_put_failure;
if (dump_metalist(skb, ife)) {
......@@ -631,19 +662,15 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
u8 *tlv_data;
u16 metalen;
spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm);
spin_unlock(&ife->tcf_lock);
if (skb_at_tc_ingress(skb))
skb_push(skb, skb->dev->hard_header_len);
tlv_data = ife_decode(skb, &metalen);
if (unlikely(!tlv_data)) {
spin_lock(&ife->tcf_lock);
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
......@@ -661,14 +688,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
*/
pr_info_ratelimited("Unknown metaid %d dlen %d\n",
mtype, dlen);
ife->tcf_qstats.overlimits++;
qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
}
}
if (WARN_ON(tlv_data != ifehdr_end)) {
spin_lock(&ife->tcf_lock);
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
......@@ -697,7 +722,7 @@ static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
}
static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
struct tcf_result *res, struct tcf_ife_params *p)
{
struct tcf_ife_info *ife = to_ife(a);
int action = ife->tcf_action;
......@@ -720,23 +745,20 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
exceed_mtu = true;
}
spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */
/* abuse overlimits to count when we allow packet
* with no metadata
*/
ife->tcf_qstats.overlimits++;
spin_unlock(&ife->tcf_lock);
qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
return action;
}
/* could be stupid policy setup or mtu config
* so lets be conservative.. */
if ((action == TC_ACT_SHOT) || exceed_mtu) {
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
......@@ -745,6 +767,8 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
ife_meta = ife_encode(skb, metalen);
spin_lock(&ife->tcf_lock);
/* XXX: we dont have a clever way of telling encode to
* not repeat some of the computations that are done by
* ops->presence_check...
......@@ -756,25 +780,24 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
}
if (err < 0) {
/* too corrupt to keep around if overwritten */
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
return TC_ACT_SHOT;
}
skboff += err;
}
spin_unlock(&ife->tcf_lock);
oethh = (struct ethhdr *)skb->data;
if (!is_zero_ether_addr(ife->eth_src))
ether_addr_copy(oethh->h_source, ife->eth_src);
if (!is_zero_ether_addr(ife->eth_dst))
ether_addr_copy(oethh->h_dest, ife->eth_dst);
oethh->h_proto = htons(ife->eth_type);
if (!is_zero_ether_addr(p->eth_src))
ether_addr_copy(oethh->h_source, p->eth_src);
if (!is_zero_ether_addr(p->eth_dst))
ether_addr_copy(oethh->h_dest, p->eth_dst);
oethh->h_proto = htons(p->eth_type);
if (skb_at_tc_ingress(skb))
skb_pull(skb, skb->dev->hard_header_len);
spin_unlock(&ife->tcf_lock);
return action;
}
......@@ -782,21 +805,19 @@ static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_ife_info *ife = to_ife(a);
struct tcf_ife_params *p;
int ret;
rcu_read_lock();
p = rcu_dereference(ife->params);
if (p->flags & IFE_ENCODE) {
ret = tcf_ife_encode(skb, a, res, p);
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
if (ife->flags & IFE_ENCODE)
return tcf_ife_encode(skb, a, res);
if (!(ife->flags & IFE_ENCODE))
return tcf_ife_decode(skb, a, res);
pr_info_ratelimited("unknown failure(policy neither de/encode\n");
spin_lock(&ife->tcf_lock);
bstats_update(&ife->tcf_bstats, skb);
tcf_lastuse_update(&ife->tcf_tm);
ife->tcf_qstats.drops++;
spin_unlock(&ife->tcf_lock);
return TC_ACT_SHOT;
return tcf_ife_decode(skb, a, res);
}
static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册