提交 b8d99ba0 编写于 作者: D David S. Miller

Merge branch 'cbq-kill-drop'

Florian Westphal says:

====================
sched, cbq: remove OVL_STRATEGY/POLICE support

iproute2 does not implement any options that result in the
TCA_CBQ_OVL_STRATEGY/TCA_CBQ_POLICE attributes being set/used.

This series removes these two attributes from cbq and makes kernel reject
 them via EOPNOTSUPP in case they are present.

The two followup changes then remove several features from qdisc
infrastructure that are then no longer used/needed.  These are:
 - The 'drop' method provided by most qdiscs
 - the 'reshape_fail' function used by some qdiscs
 - the __parent member in struct Qdisc

I tested this with allmod and allyesconfig builds and also with
a brief cbq script:

  tc qdisc add dev eth0 root handle 1:0 cbq bandwidth 10Mbit avpkt 1000 cell 8
  tc class add dev eth0 parent 1:0 classid 1:1 est 1sec 8sec cbq bandwidth 10Mbit rate 5Mbit prio 1 allot 1514 maxburst 20 cell 8 avpkt 1000 bounded split 1:0 defmap 3f
  tc class add dev eth0 parent 1:0 classid 1:2 est 1sec 8sec cbq bandwidth 10Mbit rate 5Mbit prio 1 allot 1514 maxburst 20 cell 8 avpkt 1000 bounded split 1:0 defmap 3f
  tc filter add dev eth0 parent 1:0 protocol ip prio 1 u32 match ip tos 0x10 0xff classid 1:1 police rate 2Mbit burst 10K reclassify
  tc filter add dev eth0 parent 1:0 protocol ip prio 1 u32 match ip tos 0x0c 0xff classid 1:2
  tc filter add dev eth0 parent 1:0 protocol ip prio 2 u32 match ip tos 0x10 0xff classid 1:2
  tc filter add dev eth0 parent 1:0 protocol ip prio 3 u32 match ip tos 0x0 0x0 classid 1:2

No changes since v1 except patch #5 to fix up struct Qdisc layout.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -63,26 +63,19 @@ struct Qdisc {
struct list_head list;
u32 handle;
u32 parent;
int (*reshape_fail)(struct sk_buff *skb,
struct Qdisc *q);
void *u32_node;
/* This field is deprecated, but it is still used by CBQ
* and it will live until better solution will be invented.
*/
struct Qdisc *__parent;
struct netdev_queue *dev_queue;
struct gnet_stats_rate_est64 rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct Qdisc *next_sched;
struct sk_buff *gso_skb;
/*
* For performance sake on SMP, we put highly modified fields at the end
*/
struct Qdisc *next_sched ____cacheline_aligned_in_smp;
struct sk_buff *gso_skb;
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
......@@ -181,7 +174,6 @@ struct Qdisc_ops {
int (*enqueue)(struct sk_buff *, struct Qdisc *);
struct sk_buff * (*dequeue)(struct Qdisc *);
struct sk_buff * (*peek)(struct Qdisc *);
unsigned int (*drop)(struct Qdisc *);
int (*init)(struct Qdisc *, struct nlattr *arg);
void (*reset)(struct Qdisc *);
......@@ -665,22 +657,6 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
return __qdisc_queue_drop_head(sch, &sch->q);
}
static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
struct sk_buff_head *list)
{
struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL))
qdisc_qstats_backlog_dec(sch, skb);
return skb;
}
static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
{
return __qdisc_dequeue_tail(sch, &sch->q);
}
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
return skb_peek(&sch->q);
......@@ -748,25 +724,6 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
return old;
}
static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
struct sk_buff_head *list)
{
struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
kfree_skb(skb);
return len;
}
return 0;
}
static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
{
return __qdisc_queue_drop(sch, &sch->q);
}
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
kfree_skb(skb);
......@@ -775,22 +732,6 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_DROP;
}
static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
qdisc_qstats_drop(sch);
#ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
goto drop;
return NET_XMIT_SUCCESS;
drop:
#endif
kfree_skb(skb);
return NET_XMIT_DROP;
}
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
......
......@@ -519,20 +519,6 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
return p->link.q->ops->peek(p->link.q);
}
static unsigned int atm_tc_drop(struct Qdisc *sch)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow;
unsigned int len;
pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
list_for_each_entry(flow, &p->flows, list) {
if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
return len;
}
return 0;
}
static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
......@@ -672,7 +658,6 @@ static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
.enqueue = atm_tc_enqueue,
.dequeue = atm_tc_dequeue,
.peek = atm_tc_peek,
.drop = atm_tc_drop,
.init = atm_tc_init,
.reset = atm_tc_reset,
.destroy = atm_tc_destroy,
......
......@@ -80,10 +80,6 @@ struct cbq_class {
unsigned char priority; /* class priority */
unsigned char priority2; /* priority to be used after overlimit */
unsigned char ewma_log; /* time constant for idle time calculation */
unsigned char ovl_strategy;
#ifdef CONFIG_NET_CLS_ACT
unsigned char police;
#endif
u32 defmap;
......@@ -94,10 +90,6 @@ struct cbq_class {
u32 avpkt;
struct qdisc_rate_table *R_tab;
/* Overlimit strategy parameters */
void (*overlimit)(struct cbq_class *cl);
psched_tdiff_t penalty;
/* General scheduler (WRR) parameters */
long allot;
long quantum; /* Allotment per WRR round */
......@@ -382,9 +374,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
#ifdef CONFIG_NET_CLS_ACT
cl->q->__parent = sch;
#endif
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
......@@ -402,11 +391,8 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
/* Overlimit actions */
/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
static void cbq_ovl_classic(struct cbq_class *cl)
/* Overlimit action: penalize leaf class by adding offtime */
static void cbq_overlimit(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
......@@ -456,99 +442,6 @@ static void cbq_ovl_classic(struct cbq_class *cl)
}
}
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
* they go overlimit
*/
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this = cl;
do {
if (cl->level > q->toplevel) {
cl = NULL;
break;
}
} while ((cl = cl->borrow) != NULL);
if (cl == NULL)
cl = this;
cbq_ovl_classic(cl);
}
/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
static void cbq_ovl_delay(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(cl->qdisc)->state))
return;
if (!cl->delayed) {
psched_time_t sched = q->now;
ktime_t expires;
delay += cl->offtime;
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
if (cl->avgidle < cl->minidle)
cl->avgidle = cl->minidle;
cl->undertime = q->now + delay;
if (delay > 0) {
sched += delay + cl->penalty;
cl->penalized = sched;
cl->cpriority = TC_CBQ_MAXPRIO;
q->pmask |= (1<<TC_CBQ_MAXPRIO);
expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
if (hrtimer_try_to_cancel(&q->delay_timer) &&
ktime_to_ns(ktime_sub(
hrtimer_get_expires(&q->delay_timer),
expires)) > 0)
hrtimer_set_expires(&q->delay_timer, expires);
hrtimer_restart(&q->delay_timer);
cl->delayed = 1;
cl->xstats.overactions++;
return;
}
delay = 1;
}
if (q->wd_expires == 0 || q->wd_expires > delay)
q->wd_expires = delay;
}
/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
static void cbq_ovl_lowprio(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl->penalized = q->now + cl->penalty;
if (cl->cpriority != cl->priority2) {
cl->cpriority = cl->priority2;
q->pmask |= (1<<cl->cpriority);
cl->xstats.overactions++;
}
cbq_ovl_classic(cl);
}
/* TC_CBQ_OVL_DROP: penalize class by dropping */
static void cbq_ovl_drop(struct cbq_class *cl)
{
if (cl->q->ops->drop)
if (cl->q->ops->drop(cl->q))
cl->qdisc->q.qlen--;
cl->xstats.overactions++;
cbq_ovl_classic(cl);
}
static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
psched_time_t now)
{
......@@ -625,40 +518,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
#ifdef CONFIG_NET_CLS_ACT
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
{
struct Qdisc *sch = child->__parent;
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = q->rx_class;
q->rx_class = NULL;
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
int ret;
cbq_mark_toplevel(q, cl);
q->rx_class = cl;
cl->q->__parent = sch;
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
}
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
return 0;
}
qdisc_qstats_drop(sch);
return -1;
}
#endif
/*
* It is mission critical procedure.
*
......@@ -807,7 +666,7 @@ cbq_under_limit(struct cbq_class *cl)
cl = cl->borrow;
if (!cl) {
this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl);
cbq_overlimit(this_cl);
return NULL;
}
if (cl->level > q->toplevel)
......@@ -1166,31 +1025,6 @@ static void cbq_link_class(struct cbq_class *this)
}
}
static unsigned int cbq_drop(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
int prio;
unsigned int len;
for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
cl_head = q->active[prio];
if (!cl_head)
continue;
cl = cl_head;
do {
if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
sch->q.qlen--;
if (!cl->q->q.qlen)
cbq_deactivate_class(cl);
return len;
}
} while ((cl = cl->next_alive) != cl_head);
}
return 0;
}
static void
cbq_reset(struct Qdisc *sch)
{
......@@ -1280,50 +1114,6 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
return 0;
}
static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
{
switch (ovl->strategy) {
case TC_CBQ_OVL_CLASSIC:
cl->overlimit = cbq_ovl_classic;
break;
case TC_CBQ_OVL_DELAY:
cl->overlimit = cbq_ovl_delay;
break;
case TC_CBQ_OVL_LOWPRIO:
if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
ovl->priority2 - 1 <= cl->priority)
return -EINVAL;
cl->priority2 = ovl->priority2 - 1;
cl->overlimit = cbq_ovl_lowprio;
break;
case TC_CBQ_OVL_DROP:
cl->overlimit = cbq_ovl_drop;
break;
case TC_CBQ_OVL_RCLASSIC:
cl->overlimit = cbq_ovl_rclassic;
break;
default:
return -EINVAL;
}
cl->penalty = ovl->penalty;
return 0;
}
#ifdef CONFIG_NET_CLS_ACT
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
{
cl->police = p->police;
if (cl->q->handle) {
if (p->police == TC_POLICE_RECLASSIFY)
cl->q->reshape_fail = cbq_reshape_fail;
else
cl->q->reshape_fail = NULL;
}
return 0;
}
#endif
static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
{
cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
......@@ -1375,8 +1165,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
q->link.priority = TC_CBQ_MAXPRIO - 1;
q->link.priority2 = TC_CBQ_MAXPRIO - 1;
q->link.cpriority = TC_CBQ_MAXPRIO - 1;
q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
q->link.overlimit = cbq_ovl_classic;
q->link.allot = psched_mtu(qdisc_dev(sch));
q->link.quantum = q->link.allot;
q->link.weight = q->link.R_tab->rate.rate;
......@@ -1463,24 +1251,6 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
return -1;
}
static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_ovl opt;
opt.strategy = cl->ovl_strategy;
opt.priority2 = cl->priority2 + 1;
opt.pad = 0;
opt.penalty = cl->penalty;
if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
......@@ -1500,36 +1270,11 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
return -1;
}
#ifdef CONFIG_NET_CLS_ACT
static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_police opt;
if (cl->police) {
opt.police = cl->police;
opt.__res1 = 0;
opt.__res2 = 0;
if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
goto nla_put_failure;
}
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
#endif
static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
{
if (cbq_dump_lss(skb, cl) < 0 ||
cbq_dump_rate(skb, cl) < 0 ||
cbq_dump_wrr(skb, cl) < 0 ||
cbq_dump_ovl(skb, cl) < 0 ||
#ifdef CONFIG_NET_CLS_ACT
cbq_dump_police(skb, cl) < 0 ||
#endif
cbq_dump_fopt(skb, cl) < 0)
return -1;
return 0;
......@@ -1619,11 +1364,6 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
&pfifo_qdisc_ops, cl->common.classid);
if (new == NULL)
return -ENOBUFS;
} else {
#ifdef CONFIG_NET_CLS_ACT
if (cl->police == TC_POLICE_RECLASSIFY)
new->reshape_fail = cbq_reshape_fail;
#endif
}
*old = qdisc_replace(sch, new, &cl->q);
......@@ -1736,6 +1476,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (err < 0)
return err;
if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
return -EOPNOTSUPP;
if (cl) {
/* Check parent */
if (parentid) {
......@@ -1784,14 +1527,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
}
if (tb[TCA_CBQ_OVL_STRATEGY])
cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE])
cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
#endif
if (tb[TCA_CBQ_FOPT])
cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
......@@ -1887,13 +1622,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->maxidle = q->link.maxidle;
if (cl->avpkt == 0)
cl->avpkt = q->link.avpkt;
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY])
cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE])
cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
#endif
if (tb[TCA_CBQ_FOPT])
cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
sch_tree_unlock(sch);
......@@ -2038,7 +1766,6 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
.enqueue = cbq_enqueue,
.dequeue = cbq_dequeue,
.peek = qdisc_peek_dequeued,
.drop = cbq_drop,
.init = cbq_init,
.reset = cbq_reset,
.destroy = cbq_destroy,
......
......@@ -365,22 +365,6 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
return skb;
}
static unsigned int choke_drop(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
unsigned int len;
len = qdisc_queue_drop(sch);
if (len > 0)
q->stats.other++;
else {
if (!red_is_idling(&q->vars))
red_start_of_idle_period(&q->vars);
}
return len;
}
static void choke_reset(struct Qdisc *sch)
{
struct choke_sched_data *q = qdisc_priv(sch);
......@@ -569,7 +553,6 @@ static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
.enqueue = choke_enqueue,
.dequeue = choke_dequeue,
.peek = choke_peek_head,
.drop = choke_drop,
.init = choke_init,
.destroy = choke_destroy,
.reset = choke_reset,
......
......@@ -421,26 +421,6 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
return NULL;
}
static unsigned int drr_drop(struct Qdisc *sch)
{
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
unsigned int len;
list_for_each_entry(cl, &q->active, alist) {
if (cl->qdisc->ops->drop) {
len = cl->qdisc->ops->drop(cl->qdisc);
if (len > 0) {
sch->q.qlen--;
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
return len;
}
}
}
return 0;
}
static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
{
struct drr_sched *q = qdisc_priv(sch);
......@@ -509,7 +489,6 @@ static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
.enqueue = drr_enqueue,
.dequeue = drr_dequeue,
.peek = qdisc_peek_dequeued,
.drop = drr_drop,
.init = drr_init_qdisc,
.reset = drr_reset_qdisc,
.destroy = drr_destroy_qdisc,
......
......@@ -320,23 +320,6 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
return p->q->ops->peek(p->q);
}
static unsigned int dsmark_drop(struct Qdisc *sch)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
unsigned int len;
pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
if (p->q->ops->drop == NULL)
return 0;
len = p->q->ops->drop(p->q);
if (len)
sch->q.qlen--;
return len;
}
static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
......@@ -489,7 +472,6 @@ static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
.enqueue = dsmark_enqueue,
.dequeue = dsmark_dequeue,
.peek = dsmark_peek,
.drop = dsmark_drop,
.init = dsmark_init,
.reset = dsmark_reset,
.destroy = dsmark_destroy,
......
......@@ -24,7 +24,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
return qdisc_enqueue_tail(skb, sch);
return qdisc_reshape_fail(skb, sch);
return qdisc_drop(skb, sch);
}
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
......@@ -32,7 +32,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(skb_queue_len(&sch->q) < sch->limit))
return qdisc_enqueue_tail(skb, sch);
return qdisc_reshape_fail(skb, sch);
return qdisc_drop(skb, sch);
}
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
......@@ -99,7 +99,6 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
.enqueue = pfifo_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.drop = qdisc_queue_drop,
.init = fifo_init,
.reset = qdisc_reset_queue,
.change = fifo_init,
......@@ -114,7 +113,6 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
.enqueue = bfifo_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.drop = qdisc_queue_drop,
.init = fifo_init,
.reset = qdisc_reset_queue,
.change = fifo_init,
......@@ -129,7 +127,6 @@ struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
.enqueue = pfifo_tail_enqueue,
.dequeue = qdisc_dequeue_head,
.peek = qdisc_peek_head,
.drop = qdisc_queue_drop_head,
.init = fifo_init,
.reset = qdisc_reset_queue,
.change = fifo_init,
......
......@@ -184,15 +184,6 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets)
return idx;
}
static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
{
unsigned int prev_backlog;
prev_backlog = sch->qstats.backlog;
fq_codel_drop(sch, 1U);
return prev_backlog - sch->qstats.backlog;
}
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
......@@ -704,7 +695,6 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
.enqueue = fq_codel_enqueue,
.dequeue = fq_codel_dequeue,
.peek = qdisc_peek_dequeued,
.drop = fq_codel_qdisc_drop,
.init = fq_codel_init,
.reset = fq_codel_reset,
.destroy = fq_codel_destroy,
......
......@@ -276,40 +276,6 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch)
return NULL;
}
static unsigned int gred_drop(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
skb = qdisc_dequeue_tail(sch);
if (skb) {
unsigned int len = qdisc_pkt_len(skb);
struct gred_sched_data *q;
u16 dp = tc_index_to_dp(skb);
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
tc_index_to_dp(skb));
} else {
q->backlog -= len;
q->stats.other++;
if (gred_wred_mode(t)) {
if (!sch->qstats.backlog)
red_start_of_idle_period(&t->wred_set);
} else {
if (!q->backlog)
red_start_of_idle_period(&q->vars);
}
}
qdisc_drop(skb, sch);
return len;
}
return 0;
}
static void gred_reset(struct Qdisc *sch)
{
int i;
......@@ -623,7 +589,6 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
.enqueue = gred_enqueue,
.dequeue = gred_dequeue,
.peek = qdisc_peek_head,
.drop = gred_drop,
.init = gred_init,
.reset = gred_reset,
.destroy = gred_destroy,
......
......@@ -1677,31 +1677,6 @@ hfsc_dequeue(struct Qdisc *sch)
return skb;
}
static unsigned int
hfsc_drop(struct Qdisc *sch)
{
struct hfsc_sched *q = qdisc_priv(sch);
struct hfsc_class *cl;
unsigned int len;
list_for_each_entry(cl, &q->droplist, dlist) {
if (cl->qdisc->ops->drop != NULL &&
(len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
if (cl->qdisc->q.qlen == 0) {
update_vf(cl, 0, 0);
set_passive(cl);
} else {
list_move_tail(&cl->dlist, &q->droplist);
}
cl->qstats.drops++;
qdisc_qstats_drop(sch);
sch->q.qlen--;
return len;
}
}
return 0;
}
static const struct Qdisc_class_ops hfsc_class_ops = {
.change = hfsc_change_class,
.delete = hfsc_delete_class,
......@@ -1728,7 +1703,6 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
.enqueue = hfsc_enqueue,
.dequeue = hfsc_dequeue,
.peek = qdisc_peek_dequeued,
.drop = hfsc_drop,
.cl_ops = &hfsc_class_ops,
.priv_size = sizeof(struct hfsc_sched),
.owner = THIS_MODULE
......
......@@ -368,15 +368,6 @@ static unsigned int hhf_drop(struct Qdisc *sch)
return bucket - q->buckets;
}
static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
{
unsigned int prev_backlog;
prev_backlog = sch->qstats.backlog;
hhf_drop(sch);
return prev_backlog - sch->qstats.backlog;
}
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct hhf_sched_data *q = qdisc_priv(sch);
......@@ -709,7 +700,6 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = {
.enqueue = hhf_enqueue,
.dequeue = hhf_dequeue,
.peek = qdisc_peek_dequeued,
.drop = hhf_qdisc_drop,
.init = hhf_init,
.reset = hhf_reset,
.destroy = hhf_destroy,
......
......@@ -936,31 +936,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
return skb;
}
/* try to drop from each class (by prio) until one succeed */
static unsigned int htb_drop(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
struct list_head *p;
list_for_each(p, q->drops + prio) {
struct htb_class *cl = list_entry(p, struct htb_class,
un.leaf.drop_list);
unsigned int len;
if (cl->un.leaf.q->ops->drop &&
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
sch->qstats.backlog -= len;
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
return len;
}
}
}
return 0;
}
/* reset all classes */
/* always caled under BH & queue lock */
static void htb_reset(struct Qdisc *sch)
......@@ -1600,7 +1575,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
.enqueue = htb_enqueue,
.dequeue = htb_dequeue,
.peek = qdisc_peek_dequeued,
.drop = htb_drop,
.init = htb_init,
.reset = htb_reset,
.destroy = htb_destroy,
......
......@@ -151,27 +151,6 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
}
static unsigned int multiq_drop(struct Qdisc *sch)
{
struct multiq_sched_data *q = qdisc_priv(sch);
int band;
unsigned int len;
struct Qdisc *qdisc;
for (band = q->bands - 1; band >= 0; band--) {
qdisc = q->queues[band];
if (qdisc->ops->drop) {
len = qdisc->ops->drop(qdisc);
if (len != 0) {
sch->q.qlen--;
return len;
}
}
}
return 0;
}
static void
multiq_reset(struct Qdisc *sch)
{
......@@ -416,7 +395,6 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
.enqueue = multiq_enqueue,
.dequeue = multiq_dequeue,
.peek = multiq_peek,
.drop = multiq_drop,
.init = multiq_init,
.reset = multiq_reset,
.destroy = multiq_destroy,
......
......@@ -407,7 +407,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) {
qdisc_reshape_fail(skb, sch);
qdisc_drop(skb, sch);
return NULL;
}
consume_skb(skb);
......@@ -499,7 +499,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
return qdisc_reshape_fail(skb, sch);
return qdisc_drop(skb, sch);
qdisc_qstats_backlog_inc(sch, skb);
......@@ -576,35 +576,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
static unsigned int netem_drop(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len;
len = qdisc_queue_drop(sch);
if (!len) {
struct rb_node *p = rb_first(&q->t_root);
if (p) {
struct sk_buff *skb = netem_rb_to_skb(p);
rb_erase(p, &q->t_root);
sch->q.qlen--;
skb->next = NULL;
skb->prev = NULL;
qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
}
}
if (!len && q->qdisc && q->qdisc->ops->drop)
len = q->qdisc->ops->drop(q->qdisc);
if (len)
qdisc_qstats_drop(sch);
return len;
}
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
......@@ -1143,7 +1114,6 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
.enqueue = netem_enqueue,
.dequeue = netem_dequeue,
.peek = qdisc_peek_dequeued,
.drop = netem_drop,
.init = netem_init,
.reset = netem_reset,
.destroy = netem_destroy,
......
......@@ -96,7 +96,7 @@ static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue_tail(skb, sch);
}
return qdisc_reshape_fail(skb, sch);
return qdisc_drop(skb, sch);
}
static struct sk_buff *plug_dequeue(struct Qdisc *sch)
......
......@@ -125,24 +125,6 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
}
static unsigned int prio_drop(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
unsigned int len;
struct Qdisc *qdisc;
for (prio = q->bands-1; prio >= 0; prio--) {
qdisc = q->queues[prio];
if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
sch->q.qlen--;
return len;
}
}
return 0;
}
static void
prio_reset(struct Qdisc *sch)
{
......@@ -379,7 +361,6 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.enqueue = prio_enqueue,
.dequeue = prio_dequeue,
.peek = prio_peek,
.drop = prio_drop,
.init = prio_init,
.reset = prio_reset,
.destroy = prio_destroy,
......
......@@ -1423,52 +1423,6 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
qfq_deactivate_class(q, cl);
}
static unsigned int qfq_drop_from_slot(struct qfq_sched *q,
struct hlist_head *slot)
{
struct qfq_aggregate *agg;
struct qfq_class *cl;
unsigned int len;
hlist_for_each_entry(agg, slot, next) {
list_for_each_entry(cl, &agg->active, alist) {
if (!cl->qdisc->ops->drop)
continue;
len = cl->qdisc->ops->drop(cl->qdisc);
if (len > 0) {
if (cl->qdisc->q.qlen == 0)
qfq_deactivate_class(q, cl);
return len;
}
}
}
return 0;
}
static unsigned int qfq_drop(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
unsigned int i, j, len;
for (i = 0; i <= QFQ_MAX_INDEX; i++) {
grp = &q->groups[i];
for (j = 0; j < QFQ_MAX_SLOTS; j++) {
len = qfq_drop_from_slot(q, &grp->slots[j]);
if (len > 0) {
sch->q.qlen--;
return len;
}
}
}
return 0;
}
static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
{
struct qfq_sched *q = qdisc_priv(sch);
......@@ -1563,7 +1517,6 @@ static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.enqueue = qfq_enqueue,
.dequeue = qfq_dequeue,
.peek = qdisc_peek_dequeued,
.drop = qfq_drop,
.init = qfq_init_qdisc,
.reset = qfq_reset_qdisc,
.destroy = qfq_destroy_qdisc,
......
......@@ -134,25 +134,6 @@ static struct sk_buff *red_peek(struct Qdisc *sch)
return child->ops->peek(child);
}
static unsigned int red_drop(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
unsigned int len;
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++;
qdisc_qstats_drop(sch);
sch->q.qlen--;
return len;
}
if (!red_is_idling(&q->vars))
red_start_of_idle_period(&q->vars);
return 0;
}
static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
......@@ -361,7 +342,6 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = {
.enqueue = red_enqueue,
.dequeue = red_dequeue,
.peek = red_peek,
.drop = red_drop,
.init = red_init,
.reset = red_reset,
.destroy = red_destroy,
......
......@@ -896,7 +896,6 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.enqueue = sfq_enqueue,
.dequeue = sfq_dequeue,
.peek = qdisc_peek_dequeued,
.drop = sfq_drop,
.init = sfq_init,
.reset = sfq_reset,
.destroy = sfq_destroy,
......
......@@ -166,7 +166,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs))
return qdisc_reshape_fail(skb, sch);
return qdisc_drop(skb, sch);
nb = 0;
while (segs) {
......@@ -198,7 +198,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (qdisc_pkt_len(skb) > q->max_size) {
if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
return tbf_segment(skb, sch);
return qdisc_reshape_fail(skb, sch);
return qdisc_drop(skb, sch);
}
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
......@@ -211,18 +211,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
static unsigned int tbf_drop(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->q.qlen--;
qdisc_qstats_drop(sch);
}
return len;
}
static bool tbf_peak_present(const struct tbf_sched_data *q)
{
return q->peak.rate_bytes_ps;
......@@ -555,7 +543,6 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
.enqueue = tbf_enqueue,
.dequeue = tbf_dequeue,
.peek = qdisc_peek_dequeued,
.drop = tbf_drop,
.init = tbf_init,
.reset = tbf_reset,
.destroy = tbf_destroy,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册