提交 73ca4918 编写于 作者: P Patrick McHardy 提交者: David S. Miller

[NET_SCHED]: act_api: qdisc internal reclassify support

The behaviour of NET_CLS_POLICE for TC_POLICE_RECLASSIFY was to return
it to the qdisc, which could handle it internally or ignore it. With
NET_CLS_ACT however, tc_classify starts over at the first classifier
and never returns it to the qdisc. This makes it impossible to support
qdisc-internal reclassification, which in turn makes it impossible to
remove the old NET_CLS_POLICE code without breaking compatibility since
we have two qdiscs (CBQ and ATM) that support this.

This patch adds a tc_classify_compat function that handles
reclassification the old way and changes CBQ and ATM to use it.

This again is of course not fully backwards compatible with the previous
NET_CLS_ACT behaviour. Unfortunately there is no way to fully maintain
compatibility *and* support qdisc internal reclassification with
NET_CLS_ACT, but this seems like the better choice over keeping the two
incompatible options around forever.
Signed-off-by: NPatrick McHardy <kaber@trash.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 f6853e2d
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -89,8 +89,10 @@ static inline void qdisc_run(struct net_device *dev)
__qdisc_run(dev);
}
extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res);
extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res);
struct tcf_result *res);
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
......
......@@ -290,7 +290,7 @@ static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
goto drop;
......
......@@ -1145,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
to this qdisc, (optionally) tests for protocol and asks
specific classifiers.
*/
int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
__be16 protocol = skb->protocol;
int err = 0;
for (; tp; tp = tp->next) {
if ((tp->protocol == protocol ||
tp->protocol == htons(ETH_P_ALL)) &&
(err = tp->classify(skb, tp, res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
#endif
return err;
}
}
return -1;
}
EXPORT_SYMBOL(tc_classify_compat);
int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
struct tcf_result *res)
{
int err = 0;
__be16 protocol = skb->protocol;
__be16 protocol;
#ifdef CONFIG_NET_CLS_ACT
struct tcf_proto *otp = tp;
reclassify:
#endif
protocol = skb->protocol;
for ( ; tp; tp = tp->next) {
if ((tp->protocol == protocol ||
tp->protocol == htons(ETH_P_ALL)) &&
(err = tp->classify(skb, tp, res)) >= 0) {
err = tc_classify_compat(skb, tp, res);
#ifdef CONFIG_NET_CLS_ACT
if ( TC_ACT_RECLASSIFY == err) {
__u32 verd = (__u32) G_TC_VERD(skb->tc_verd);
tp = otp;
if (MAX_REC_LOOP < verd++) {
printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n",
tp->prio&0xffff, ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
goto reclassify;
} else {
if (skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
return err;
}
#else
return err;
#endif
if (err == TC_ACT_RECLASSIFY) {
u32 verd = G_TC_VERD(skb->tc_verd);
tp = otp;
if (verd++ >= MAX_REC_LOOP) {
printk("rule prio %u protocol %02x reclassify loop, "
"packet dropped\n",
tp->prio&0xffff, ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
goto reclassify;
}
return -1;
#endif
return err;
}
EXPORT_SYMBOL(tc_classify);
void tcf_destroy(struct tcf_proto *tp)
{
......@@ -1252,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab);
EXPORT_SYMBOL(qdisc_put_rtab);
EXPORT_SYMBOL(register_qdisc);
EXPORT_SYMBOL(unregister_qdisc);
EXPORT_SYMBOL(tc_classify);
......@@ -396,8 +396,9 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
!(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority)))
for (flow = p->flows; flow; flow = flow->next)
if (flow->filter_list) {
result = tc_classify(skb, flow->filter_list,
&res);
result = tc_classify_compat(skb,
flow->filter_list,
&res);
if (result < 0)
continue;
flow = (struct atm_flow_data *)res.class;
......@@ -420,6 +421,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_SHOT:
kfree_skb(skb);
goto drop;
case TC_POLICE_RECLASSIFY:
if (flow->excess)
flow = flow->excess;
else
ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
break;
}
#elif defined(CONFIG_NET_CLS_POLICE)
switch (result) {
......
......@@ -82,7 +82,7 @@ struct cbq_class
unsigned char priority2; /* priority to be used after overlimit */
unsigned char ewma_log; /* time constant for idle time calculation */
unsigned char ovl_strategy;
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
unsigned char police;
#endif
......@@ -154,7 +154,7 @@ struct cbq_sched_data
struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
with backlog */
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
struct cbq_class *rx_class;
#endif
struct cbq_class *tx_class;
......@@ -196,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
return NULL;
}
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
......@@ -247,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 2+n. Apply classifier.
*/
if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0)
if (!head->filter_list ||
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback;
if ((cl = (void*)res.class) == NULL) {
......@@ -267,6 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
case TC_ACT_RECLASSIFY:
return cbq_reclassify(skb, cl);
}
#elif defined(CONFIG_NET_CLS_POLICE)
switch (result) {
......@@ -389,7 +392,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int ret;
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
q->rx_class = cl;
#endif
if (cl == NULL) {
......@@ -399,7 +402,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
cl->q->__parent = sch;
#endif
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
......@@ -434,7 +437,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_mark_toplevel(q, cl);
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
q->rx_class = cl;
cl->q->__parent = sch;
#endif
......@@ -670,7 +673,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
}
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
{
......@@ -1364,7 +1367,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
return 0;
}
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
{
cl->police = p->police;
......@@ -1532,7 +1535,7 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
return -1;
}
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
......@@ -1558,7 +1561,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
cbq_dump_rate(skb, cl) < 0 ||
cbq_dump_wrr(skb, cl) < 0 ||
cbq_dump_ovl(skb, cl) < 0 ||
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
cbq_dump_police(skb, cl) < 0 ||
#endif
cbq_dump_fopt(skb, cl) < 0)
......@@ -1653,7 +1656,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->classid)) == NULL)
return -ENOBUFS;
} else {
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (cl->police == TC_POLICE_RECLASSIFY)
new->reshape_fail = cbq_reshape_fail;
#endif
......@@ -1718,7 +1721,7 @@ cbq_destroy(struct Qdisc* sch)
struct cbq_class *cl;
unsigned h;
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
q->rx_class = NULL;
#endif
/*
......@@ -1747,7 +1750,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
struct cbq_class *cl = (struct cbq_class*)arg;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(&sch->dev->queue_lock);
......@@ -1795,7 +1798,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
return -EINVAL;
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (tb[TCA_CBQ_POLICE-1] &&
RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
return -EINVAL;
......@@ -1838,7 +1841,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
if (tb[TCA_CBQ_OVL_STRATEGY-1])
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (tb[TCA_CBQ_POLICE-1])
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
#endif
......@@ -1931,7 +1934,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY-1])
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (tb[TCA_CBQ_POLICE-1])
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
#endif
......@@ -1975,7 +1978,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
q->tx_class = NULL;
q->tx_borrowed = NULL;
}
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (q->rx_class == cl)
q->rx_class = NULL;
#endif
......
......@@ -125,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (skb->len > q->max_size) {
sch->qstats.drops++;
#ifdef CONFIG_NET_CLS_POLICE
#if defined(CONFIG_NET_CLS_ACT) || defined(CONFIG_NET_CLS_POLICE)
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
#endif
kfree_skb(skb);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部