提交 7d2681a6 编写于 作者: P Patrick McHardy 提交者: David S. Miller

[NET_SCHED]: sch_sfq: add support for external classifiers

Add support for external classifiers to allow using different flow
hash functions similar to ESFQ. When no classifier is attached the
built-in hash is used as before.
Signed-off-by: NPatrick McHardy <kaber@trash.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 5239008b
......@@ -95,6 +95,7 @@ struct sfq_sched_data
int limit;
/* Variables */
struct tcf_proto *filter_list;
struct timer_list perturb_timer;
u32 perturbation;
sfq_index tail; /* Index of current slot in round */
......@@ -155,6 +156,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
return sfq_fold_hash(q, h, h2);
}
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tcf_result res;
int result;
if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
return TC_H_MIN(skb->priority);
if (!q->filter_list)
return sfq_hash(q, skb) + 1;
*qerr = NET_XMIT_BYPASS;
result = tc_classify(skb, q->filter_list, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return 0;
}
#endif
if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
return TC_H_MIN(res.classid);
}
return 0;
}
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
sfq_index p, n;
......@@ -245,8 +279,18 @@ static int
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
unsigned hash = sfq_hash(q, skb);
unsigned int hash;
sfq_index x;
int ret;
hash = sfq_classify(skb, sch, &ret);
if (hash == 0) {
if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
hash--;
x = q->ht[hash];
if (x == SFQ_DEPTH) {
......@@ -289,8 +333,18 @@ static int
sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
unsigned hash = sfq_hash(q, skb);
unsigned int hash;
sfq_index x;
int ret;
hash = sfq_classify(skb, sch, &ret);
if (hash == 0) {
if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
}
hash--;
x = q->ht[hash];
if (x == SFQ_DEPTH) {
......@@ -465,6 +519,8 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
static void sfq_destroy(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(q->filter_list);
del_timer(&q->perturb_timer);
}
......@@ -490,9 +546,40 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
return -1;
}
static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr **tca, unsigned long *arg)
{
return -EOPNOTSUPP;
}
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
return 0;
}
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct sfq_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
return &q->filter_list;
}
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
return;
}
static const struct Qdisc_class_ops sfq_class_ops = {
.get = sfq_get,
.change = sfq_change_class,
.tcf_chain = sfq_find_tcf,
.walk = sfq_walk,
};
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.next = NULL,
.cl_ops = NULL,
.cl_ops = &sfq_class_ops,
.id = "sfq",
.priv_size = sizeof(struct sfq_sched_data),
.enqueue = sfq_enqueue,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册