sch_sfq.c 15.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
22
#include <linux/jhash.h>
23
#include <linux/slab.h>
24 25
#include <net/ip.h>
#include <net/netlink.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
#include <net/pkt_sched.h>


/*	Stochastic Fairness Queuing algorithm.
	=======================================

	Source:
	Paul E. McKenney "Stochastic Fairness Queuing",
	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

	Paul E. McKenney "Stochastic Fairness Queuing",
	"Interworking: Research and Experience", v.2, 1991, p.113-131.


	See also:
	M. Shreedhar and George Varghese "Efficient Fair
	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


45
	This is not the thing that is usually called (W)FQ nowadays.
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54
	It does not use any timestamp mechanism, but instead
	processes queues in round-robin order.

	ADVANTAGE:

	- It is very cheap. Both CPU and memory requirements are minimal.

	DRAWBACKS:

55
	- "Stochastic" -> It is not 100% fair.
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69
	When hash collisions occur, several flows are considered as one.

	- "Round-robin" -> It introduces larger delays than virtual clock
	based schemes, and should not be used for isolating interactive
	traffic	from non-interactive. It means, that this scheduler
	should be used as leaf of CBQ or P3, which put interactive traffic
	to higher priority band.

	We still need true WFQ for top level CSZ, but using WFQ
	for the best effort traffic is absolutely pointless:
	SFQ is superior for this purpose.

	IMPLEMENTATION:
	This implementation limits maximal queue length to 128;
70
	maximal mtu to 2^15-1; max 128 flows, number of hash buckets to 1024.
L
Linus Torvalds 已提交
71
	The only goal of this restrictions was that all data
72
	fit into one 4K page on 32bit arches.
L
Linus Torvalds 已提交
73 74 75

	It is easy to increase these values, but not in flight.  */

76 77 78
#define SFQ_DEPTH		128 /* max number of packets per flow */
#define SFQ_SLOTS		128 /* max number of flows */
#define SFQ_EMPTY_SLOT		255
L
Linus Torvalds 已提交
79 80
#define SFQ_HASH_DIVISOR	1024

81
/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
L
Linus Torvalds 已提交
82 83
typedef unsigned char sfq_index;

84 85 86 87 88 89
/*
 * We dont use pointers to save space.
 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
 * are 'pointers' to dep[] array
 */
L
Linus Torvalds 已提交
90 91 92 93 94 95
struct sfq_head
{
	sfq_index	next;
	sfq_index	prev;
};

96 97 98 99 100 101 102 103 104 105
struct sfq_slot {
	struct sk_buff	*skblist_next;
	struct sk_buff	*skblist_prev;
	sfq_index	qlen; /* number of skbs in skblist */
	sfq_index	next; /* next slot in sfq chain */
	struct sfq_head dep; /* anchor in dep[] chains */
	unsigned short	hash; /* hash value (index in ht[]) */
	short		allot; /* credit for this slot */
};

L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113
struct sfq_sched_data
{
/* Parameters */
	int		perturb_period;
	unsigned	quantum;	/* Allotment per round: MUST BE >= MTU */
	int		limit;

/* Variables */
114
	struct tcf_proto *filter_list;
L
Linus Torvalds 已提交
115
	struct timer_list perturb_timer;
116
	u32		perturbation;
117
	sfq_index	cur_depth;	/* depth of longest slot */
L
Linus Torvalds 已提交
118

119
	struct sfq_slot *tail;		/* current slot in round */
L
Linus Torvalds 已提交
120
	sfq_index	ht[SFQ_HASH_DIVISOR];	/* Hash table */
121 122
	struct sfq_slot	slots[SFQ_SLOTS];
	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
L
Linus Torvalds 已提交
123 124
};

125 126 127 128 129 130 131 132 133 134
/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{
	if (val < SFQ_SLOTS)
		return &q->slots[val].dep;
	return &q->dep[val - SFQ_SLOTS];
}

L
Linus Torvalds 已提交
135 136
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
137
	return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
L
Linus Torvalds 已提交
138 139 140 141 142 143 144
}

static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
	u32 h, h2;

	switch (skb->protocol) {
145
	case htons(ETH_P_IP):
L
Linus Torvalds 已提交
146
	{
147
		const struct iphdr *iph;
148
		int poff;
149 150 151 152

		if (!pskb_network_may_pull(skb, sizeof(*iph)))
			goto err;
		iph = ip_hdr(skb);
153 154
		h = (__force u32)iph->daddr;
		h2 = (__force u32)iph->saddr ^ iph->protocol;
155 156 157 158 159 160 161 162
		if (iph->frag_off & htons(IP_MF|IP_OFFSET))
			break;
		poff = proto_ports_offset(iph->protocol);
		if (poff >= 0 &&
		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
			iph = ip_hdr(skb);
			h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
		}
L
Linus Torvalds 已提交
163 164
		break;
	}
165
	case htons(ETH_P_IPV6):
L
Linus Torvalds 已提交
166
	{
167
		struct ipv6hdr *iph;
168
		int poff;
169 170 171 172

		if (!pskb_network_may_pull(skb, sizeof(*iph)))
			goto err;
		iph = ipv6_hdr(skb);
173 174
		h = (__force u32)iph->daddr.s6_addr32[3];
		h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
175 176 177 178 179 180
		poff = proto_ports_offset(iph->nexthdr);
		if (poff >= 0 &&
		    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
			iph = ipv6_hdr(skb);
			h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
		}
L
Linus Torvalds 已提交
181 182 183
		break;
	}
	default:
184
err:
185
		h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
186
		h2 = (unsigned long)skb->sk;
L
Linus Torvalds 已提交
187
	}
188

L
Linus Torvalds 已提交
189 190 191
	return sfq_fold_hash(q, h, h2);
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
				 int *qerr)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tcf_result res;
	int result;

	if (TC_H_MAJ(skb->priority) == sch->handle &&
	    TC_H_MIN(skb->priority) > 0 &&
	    TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
		return TC_H_MIN(skb->priority);

	if (!q->filter_list)
		return sfq_hash(q, skb) + 1;

207
	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
208 209 210 211 212 213
	result = tc_classify(skb, q->filter_list, &res);
	if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
		switch (result) {
		case TC_ACT_STOLEN:
		case TC_ACT_QUEUED:
214
			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
215 216 217 218 219 220 221 222 223 224
		case TC_ACT_SHOT:
			return 0;
		}
#endif
		if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
			return TC_H_MIN(res.classid);
	}
	return 0;
}

225 226 227
/*
 * x : slot number [0 .. SFQ_SLOTS - 1]
 */
L
Linus Torvalds 已提交
228 229 230
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
231 232 233 234
	int qlen = q->slots[x].qlen;

	p = qlen + SFQ_SLOTS;
	n = q->dep[qlen].next;
L
Linus Torvalds 已提交
235

236 237 238 239 240
	q->slots[x].dep.next = n;
	q->slots[x].dep.prev = p;

	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
	sfq_dep_head(q, n)->prev = x;
L
Linus Torvalds 已提交
241 242
}

243 244 245 246 247 248 249
#define sfq_unlink(q, x, n, p)			\
	n = q->slots[x].dep.next;		\
	p = q->slots[x].dep.prev;		\
	sfq_dep_head(q, p)->next = n;		\
	sfq_dep_head(q, n)->prev = p


L
Linus Torvalds 已提交
250 251 252
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
253
	int d;
L
Linus Torvalds 已提交
254

255
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
256

257 258 259
	d = q->slots[x].qlen--;
	if (n == p && q->cur_depth == d)
		q->cur_depth--;
L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267
	sfq_link(q, x);
}

static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
	int d;

268
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
269

270 271 272
	d = ++q->slots[x].qlen;
	if (q->cur_depth < d)
		q->cur_depth = d;
L
Linus Torvalds 已提交
273 274 275
	sfq_link(q, x);
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_prev;

	slot->skblist_prev = skb->prev;
	skb->next = skb->prev = NULL;
	return skb;
}

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_next;

	slot->skblist_next = skb->next;
	skb->next = skb->prev = NULL;
	return skb;
}

static inline void slot_queue_init(struct sfq_slot *slot)
{
	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
}

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
{
	skb->prev = slot->skblist_prev;
	skb->next = (struct sk_buff *)slot;
	slot->skblist_prev->next = skb;
	slot->skblist_prev = skb;
}

#define	slot_queue_walk(slot, skb)		\
	for (skb = slot->skblist_next;		\
	     skb != (struct sk_buff *)slot;	\
	     skb = skb->next)

L
Linus Torvalds 已提交
317 318 319
static unsigned int sfq_drop(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
320
	sfq_index x, d = q->cur_depth;
L
Linus Torvalds 已提交
321 322
	struct sk_buff *skb;
	unsigned int len;
323
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
324

325
	/* Queue is full! Find the longest slot and drop tail packet from it */
L
Linus Torvalds 已提交
326
	if (d > 1) {
327 328 329 330
		x = q->dep[d].next;
		slot = &q->slots[x];
drop:
		skb = slot_dequeue_tail(slot);
331
		len = qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
332
		sfq_dec(q, x);
333
		kfree_skb(skb);
L
Linus Torvalds 已提交
334 335
		sch->q.qlen--;
		sch->qstats.drops++;
336
		sch->qstats.backlog -= len;
L
Linus Torvalds 已提交
337 338 339 340 341
		return len;
	}

	if (d == 1) {
		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
342 343 344 345 346
		x = q->tail->next;
		slot = &q->slots[x];
		q->tail->next = slot->next;
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		goto drop;
L
Linus Torvalds 已提交
347 348 349 350 351 352
	}

	return 0;
}

static int
353
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
L
Linus Torvalds 已提交
354 355
{
	struct sfq_sched_data *q = qdisc_priv(sch);
356
	unsigned int hash;
L
Linus Torvalds 已提交
357
	sfq_index x;
358
	struct sfq_slot *slot;
359
	int uninitialized_var(ret);
360 361 362

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
363
		if (ret & __NET_XMIT_BYPASS)
364 365 366 367 368
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;
L
Linus Torvalds 已提交
369 370

	x = q->ht[hash];
371 372 373 374 375 376 377
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
		slot_queue_init(slot);
L
Linus Torvalds 已提交
378
	}
379

380
	/* If selected queue has length q->limit, do simple tail drop,
381 382
	 * i.e. drop _this_ packet.
	 */
383
	if (slot->qlen >= q->limit)
384 385
		return qdisc_drop(skb, sch);

386
	sch->qstats.backlog += qdisc_pkt_len(skb);
387
	slot_queue_add(slot, skb);
L
Linus Torvalds 已提交
388
	sfq_inc(q, x);
389 390 391
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
L
Linus Torvalds 已提交
392
		} else {
393 394
			slot->next = q->tail->next;
			q->tail->next = x;
L
Linus Torvalds 已提交
395
		}
396 397
		q->tail = slot;
		slot->allot = q->quantum;
L
Linus Torvalds 已提交
398
	}
399
	if (++sch->q.qlen <= q->limit) {
400
		sch->bstats.bytes += qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
401
		sch->bstats.packets++;
402
		return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
403 404 405 406 407 408
	}

	sfq_drop(sch);
	return NET_XMIT_CN;
}

409 410 411 412
static struct sk_buff *
sfq_peek(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
L
Linus Torvalds 已提交
413

414
	/* No active slots */
415
	if (q->tail == NULL)
416
		return NULL;
L
Linus Torvalds 已提交
417

418
	return q->slots[q->tail->next].skblist_next;
419
}
L
Linus Torvalds 已提交
420 421

static struct sk_buff *
422
sfq_dequeue(struct Qdisc *sch)
L
Linus Torvalds 已提交
423 424 425
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
426
	sfq_index a, next_a;
427
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
428 429

	/* No active slots */
430
	if (q->tail == NULL)
L
Linus Torvalds 已提交
431 432
		return NULL;

433 434 435
	a = q->tail->next;
	slot = &q->slots[a];
	skb = slot_dequeue_head(slot);
L
Linus Torvalds 已提交
436 437
	sfq_dec(q, a);
	sch->q.qlen--;
438
	sch->qstats.backlog -= qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
439 440

	/* Is the slot empty? */
441 442 443
	if (slot->qlen == 0) {
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		next_a = slot->next;
444
		if (a == next_a) {
445
			q->tail = NULL; /* no more active slots */
L
Linus Torvalds 已提交
446 447
			return skb;
		}
448 449 450 451
		q->tail->next = next_a;
	} else if ((slot->allot -= qdisc_pkt_len(skb)) <= 0) {
		q->tail = slot;
		slot->allot += q->quantum;
L
Linus Torvalds 已提交
452 453 454 455 456
	}
	return skb;
}

static void
457
sfq_reset(struct Qdisc *sch)
L
Linus Torvalds 已提交
458 459 460 461 462 463 464 465 466
{
	struct sk_buff *skb;

	while ((skb = sfq_dequeue(sch)) != NULL)
		kfree_skb(skb);
}

static void sfq_perturbation(unsigned long arg)
{
467
	struct Qdisc *sch = (struct Qdisc *)arg;
L
Linus Torvalds 已提交
468 469
	struct sfq_sched_data *q = qdisc_priv(sch);

470
	q->perturbation = net_random();
L
Linus Torvalds 已提交
471

472 473
	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
L
Linus Torvalds 已提交
474 475
}

476
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
477 478
{
	struct sfq_sched_data *q = qdisc_priv(sch);
479
	struct tc_sfq_qopt *ctl = nla_data(opt);
480
	unsigned int qlen;
L
Linus Torvalds 已提交
481

482
	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
L
Linus Torvalds 已提交
483 484 485
		return -EINVAL;

	sch_tree_lock(sch);
486
	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
487
	q->perturb_period = ctl->perturb_period * HZ;
L
Linus Torvalds 已提交
488
	if (ctl->limit)
489
		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
L
Linus Torvalds 已提交
490

491
	qlen = sch->q.qlen;
492
	while (sch->q.qlen > q->limit)
L
Linus Torvalds 已提交
493
		sfq_drop(sch);
494
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
L
Linus Torvalds 已提交
495 496 497

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
498
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
499
		q->perturbation = net_random();
L
Linus Torvalds 已提交
500 501 502 503 504
	}
	sch_tree_unlock(sch);
	return 0;
}

505
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
506 507 508 509
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

510
	q->perturb_timer.function = sfq_perturbation;
511
	q->perturb_timer.data = (unsigned long)sch;
512
	init_timer_deferrable(&q->perturb_timer);
L
Linus Torvalds 已提交
513

514
	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
515
		q->ht[i] = SFQ_EMPTY_SLOT;
516 517

	for (i = 0; i < SFQ_DEPTH; i++) {
518 519
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
L
Linus Torvalds 已提交
520
	}
521

522
	q->limit = SFQ_DEPTH - 1;
523 524
	q->cur_depth = 0;
	q->tail = NULL;
L
Linus Torvalds 已提交
525
	if (opt == NULL) {
526
		q->quantum = psched_mtu(qdisc_dev(sch));
L
Linus Torvalds 已提交
527
		q->perturb_period = 0;
528
		q->perturbation = net_random();
L
Linus Torvalds 已提交
529 530 531 532 533
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}
534

535
	for (i = 0; i < SFQ_SLOTS; i++)
L
Linus Torvalds 已提交
536 537 538 539 540 541 542
		sfq_link(q, i);
	return 0;
}

static void sfq_destroy(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
543

544
	tcf_destroy_chain(&q->filter_list);
545 546
	q->perturb_period = 0;
	del_timer_sync(&q->perturb_timer);
L
Linus Torvalds 已提交
547 548 549 550 551
}

static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
552
	unsigned char *b = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
553 554 555
	struct tc_sfq_qopt opt;

	opt.quantum = q->quantum;
556
	opt.perturb_period = q->perturb_period / HZ;
L
Linus Torvalds 已提交
557 558 559

	opt.limit = q->limit;
	opt.divisor = SFQ_HASH_DIVISOR;
560
	opt.flows = q->limit;
L
Linus Torvalds 已提交
561

562
	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
L
Linus Torvalds 已提交
563 564 565

	return skb->len;

566
nla_put_failure:
567
	nlmsg_trim(skb, b);
L
Linus Torvalds 已提交
568 569 570
	return -1;
}

571 572 573 574 575
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{
	return NULL;
}

576 577 578 579 580
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
	return 0;
}

581 582 583 584 585 586
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{
	return 0;
}

587 588 589 590
static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}

591 592 593 594 595 596 597 598 599
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
	struct sfq_sched_data *q = qdisc_priv(sch);

	if (cl)
		return NULL;
	return &q->filter_list;
}

600 601 602 603 604 605 606 607 608 609 610
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	tcm->tcm_handle |= TC_H_MIN(cl);
	return 0;
}

static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				struct gnet_dump *d)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
611 612 613
	const struct sfq_slot *slot = &q->slots[q->ht[cl - 1]];
	struct gnet_stats_queue qs = { .qlen = slot->qlen };
	struct tc_sfq_xstats xstats = { .allot = slot->allot };
614 615
	struct sk_buff *skb;

616
	slot_queue_walk(slot, skb)
617
		qs.backlog += qdisc_pkt_len(skb);
618 619 620 621 622 623

	if (gnet_stats_copy_queue(d, &qs) < 0)
		return -1;
	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}

624 625
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
626 627 628 629 630 631 632
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int i;

	if (arg->stop)
		return;

	for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
633
		if (q->ht[i] == SFQ_EMPTY_SLOT ||
634 635 636 637 638 639 640 641 642 643
		    arg->count < arg->skip) {
			arg->count++;
			continue;
		}
		if (arg->fn(sch, i + 1, arg) < 0) {
			arg->stop = 1;
			break;
		}
		arg->count++;
	}
644 645 646
}

static const struct Qdisc_class_ops sfq_class_ops = {
647
	.leaf		=	sfq_leaf,
648
	.get		=	sfq_get,
649
	.put		=	sfq_put,
650
	.tcf_chain	=	sfq_find_tcf,
651
	.bind_tcf	=	sfq_bind,
652
	.unbind_tcf	=	sfq_put,
653 654
	.dump		=	sfq_dump_class,
	.dump_stats	=	sfq_dump_class_stats,
655 656 657
	.walk		=	sfq_walk,
};

658
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
659
	.cl_ops		=	&sfq_class_ops,
L
Linus Torvalds 已提交
660 661 662 663
	.id		=	"sfq",
	.priv_size	=	sizeof(struct sfq_sched_data),
	.enqueue	=	sfq_enqueue,
	.dequeue	=	sfq_dequeue,
664
	.peek		=	sfq_peek,
L
Linus Torvalds 已提交
665 666 667 668 669 670 671 672 673 674 675 676 677
	.drop		=	sfq_drop,
	.init		=	sfq_init,
	.reset		=	sfq_reset,
	.destroy	=	sfq_destroy,
	.change		=	NULL,
	.dump		=	sfq_dump,
	.owner		=	THIS_MODULE,
};

static int __init sfq_module_init(void)
{
	return register_qdisc(&sfq_qdisc_ops);
}
678
static void __exit sfq_module_exit(void)
L
Linus Torvalds 已提交
679 680 681 682 683 684
{
	unregister_qdisc(&sfq_qdisc_ops);
}
module_init(sfq_module_init)
module_exit(sfq_module_exit)
MODULE_LICENSE("GPL");