提交 865ec552 编写于 作者: E Eric Dumazet 提交者: David S. Miller

fq_codel: should use qdisc backlog as threshold

codel_should_drop() logic allows a packet being not dropped if queue
size is under max packet size.

In fq_codel, we have two possible backlogs : The qdisc global one, and
the flow local one.

The meaningful one for codel_should_drop() should be the global backlog,
not the per flow one, so that thin flows can have a non zero drop/mark
probability.
Signed-off-by: NEric Dumazet <edumazet@google.com>
Cc: Dave Taht <dave.taht@bufferbloat.net>
Cc: Kathleen Nichols <nichols@pollere.com>
Cc: Van Jacobson <van@pollere.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 c27b46e7
...@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t, ...@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t,
static bool codel_should_drop(const struct sk_buff *skb, static bool codel_should_drop(const struct sk_buff *skb,
unsigned int *backlog, struct Qdisc *sch,
struct codel_vars *vars, struct codel_vars *vars,
struct codel_params *params, struct codel_params *params,
struct codel_stats *stats, struct codel_stats *stats,
...@@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb, ...@@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb,
} }
vars->ldelay = now - codel_get_enqueue_time(skb); vars->ldelay = now - codel_get_enqueue_time(skb);
*backlog -= qdisc_pkt_len(skb); sch->qstats.backlog -= qdisc_pkt_len(skb);
if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
stats->maxpacket = qdisc_pkt_len(skb); stats->maxpacket = qdisc_pkt_len(skb);
if (codel_time_before(vars->ldelay, params->target) || if (codel_time_before(vars->ldelay, params->target) ||
*backlog <= stats->maxpacket) { sch->qstats.backlog <= stats->maxpacket) {
/* went below - stay below for at least interval */ /* went below - stay below for at least interval */
vars->first_above_time = 0; vars->first_above_time = 0;
return false; return false;
...@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, ...@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
struct codel_params *params, struct codel_params *params,
struct codel_vars *vars, struct codel_vars *vars,
struct codel_stats *stats, struct codel_stats *stats,
codel_skb_dequeue_t dequeue_func, codel_skb_dequeue_t dequeue_func)
u32 *backlog)
{ {
struct sk_buff *skb = dequeue_func(vars, sch); struct sk_buff *skb = dequeue_func(vars, sch);
codel_time_t now; codel_time_t now;
...@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, ...@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
return skb; return skb;
} }
now = codel_get_time(); now = codel_get_time();
drop = codel_should_drop(skb, backlog, vars, params, stats, now); drop = codel_should_drop(skb, sch, vars, params, stats, now);
if (vars->dropping) { if (vars->dropping) {
if (!drop) { if (!drop) {
/* sojourn time below target - leave dropping state */ /* sojourn time below target - leave dropping state */
...@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, ...@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
stats->drop_count++; stats->drop_count++;
skb = dequeue_func(vars, sch); skb = dequeue_func(vars, sch);
if (!codel_should_drop(skb, backlog, if (!codel_should_drop(skb, sch,
vars, params, stats, now)) { vars, params, stats, now)) {
/* leave dropping state */ /* leave dropping state */
vars->dropping = false; vars->dropping = false;
...@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, ...@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
stats->drop_count++; stats->drop_count++;
skb = dequeue_func(vars, sch); skb = dequeue_func(vars, sch);
drop = codel_should_drop(skb, backlog, vars, params, drop = codel_should_drop(skb, sch, vars, params,
stats, now); stats, now);
} }
vars->dropping = true; vars->dropping = true;
......
...@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) ...@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
struct codel_sched_data *q = qdisc_priv(sch); struct codel_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb; struct sk_buff *skb;
skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
dequeue, &sch->qstats.backlog);
/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
* or HTB crashes. Defer it for next round. * or HTB crashes. Defer it for next round.
*/ */
......
...@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/ */
static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch) static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
{ {
struct fq_codel_sched_data *q = qdisc_priv(sch);
struct fq_codel_flow *flow; struct fq_codel_flow *flow;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
flow = container_of(vars, struct fq_codel_flow, cvars); flow = container_of(vars, struct fq_codel_flow, cvars);
if (flow->head) { if (flow->head) {
skb = dequeue_head(flow); skb = dequeue_head(flow);
sch->qstats.backlog -= qdisc_pkt_len(skb); q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
sch->q.qlen--; sch->q.qlen--;
} }
return skb; return skb;
...@@ -256,7 +257,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) ...@@ -256,7 +257,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
prev_ecn_mark = q->cstats.ecn_mark; prev_ecn_mark = q->cstats.ecn_mark;
skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
dequeue, &q->backlogs[flow - q->flows]); dequeue);
flow->dropped += q->cstats.drop_count - prev_drop_count; flow->dropped += q->cstats.drop_count - prev_drop_count;
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册