sch_sfq.c 16.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
22
#include <linux/jhash.h>
23
#include <linux/slab.h>
24 25
#include <net/ip.h>
#include <net/netlink.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
#include <net/pkt_sched.h>


/*	Stochastic Fairness Queuing algorithm.
	=======================================

	Source:
	Paul E. McKenney "Stochastic Fairness Queuing",
	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

	Paul E. McKenney "Stochastic Fairness Queuing",
	"Interworking: Research and Experience", v.2, 1991, p.113-131.


	See also:
	M. Shreedhar and George Varghese "Efficient Fair
	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


45
	This is not the thing that is usually called (W)FQ nowadays.
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54
	It does not use any timestamp mechanism, but instead
	processes queues in round-robin order.

	ADVANTAGE:

	- It is very cheap. Both CPU and memory requirements are minimal.

	DRAWBACKS:

55
	- "Stochastic" -> It is not 100% fair.
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69
	When hash collisions occur, several flows are considered as one.

	- "Round-robin" -> It introduces larger delays than virtual clock
	based schemes, and should not be used for isolating interactive
	traffic	from non-interactive. It means, that this scheduler
	should be used as leaf of CBQ or P3, which put interactive traffic
	to higher priority band.

	We still need true WFQ for top level CSZ, but using WFQ
	for the best effort traffic is absolutely pointless:
	SFQ is superior for this purpose.

	IMPLEMENTATION:
	This implementation limits maximal queue length to 128;
70
	max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
L
Linus Torvalds 已提交
71
	The only goal of this restrictions was that all data
72
	fit into one 4K page on 32bit arches.
L
Linus Torvalds 已提交
73 74 75

	It is easy to increase these values, but not in flight.  */

76 77 78
#define SFQ_DEPTH		128 /* max number of packets per flow */
#define SFQ_SLOTS		128 /* max number of flows */
#define SFQ_EMPTY_SLOT		255
L
Linus Torvalds 已提交
79
#define SFQ_HASH_DIVISOR	1024
80 81 82 83 84
/* We use 16 bits to store allot, and want to handle packets up to 64K
 * Scale allot by 8 (1<<3) so that no overflow occurs.
 */
#define SFQ_ALLOT_SHIFT		3
#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
L
Linus Torvalds 已提交
85

86
/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
L
Linus Torvalds 已提交
87 88
typedef unsigned char sfq_index;

89 90 91 92 93 94
/*
 * We dont use pointers to save space.
 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
 * are 'pointers' to dep[] array
 */
L
Linus Torvalds 已提交
95 96 97 98 99 100
struct sfq_head
{
	sfq_index	next;
	sfq_index	prev;
};

101 102 103 104 105 106 107 108 109 110
struct sfq_slot {
	struct sk_buff	*skblist_next;
	struct sk_buff	*skblist_prev;
	sfq_index	qlen; /* number of skbs in skblist */
	sfq_index	next; /* next slot in sfq chain */
	struct sfq_head dep; /* anchor in dep[] chains */
	unsigned short	hash; /* hash value (index in ht[]) */
	short		allot; /* credit for this slot */
};

L
Linus Torvalds 已提交
111 112 113 114 115 116 117 118
struct sfq_sched_data
{
/* Parameters */
	int		perturb_period;
	unsigned	quantum;	/* Allotment per round: MUST BE >= MTU */
	int		limit;

/* Variables */
119
	struct tcf_proto *filter_list;
L
Linus Torvalds 已提交
120
	struct timer_list perturb_timer;
121
	u32		perturbation;
122
	sfq_index	cur_depth;	/* depth of longest slot */
123
	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124
	struct sfq_slot *tail;		/* current slot in round */
L
Linus Torvalds 已提交
125
	sfq_index	ht[SFQ_HASH_DIVISOR];	/* Hash table */
126 127
	struct sfq_slot	slots[SFQ_SLOTS];
	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
L
Linus Torvalds 已提交
128 129
};

130 131 132 133 134 135 136 137 138 139
/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{
	if (val < SFQ_SLOTS)
		return &q->slots[val].dep;
	return &q->dep[val - SFQ_SLOTS];
}

L
Linus Torvalds 已提交
140 141
static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
142
	return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
L
Linus Torvalds 已提交
143 144 145 146 147 148 149
}

static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
	u32 h, h2;

	switch (skb->protocol) {
150
	case htons(ETH_P_IP):
L
Linus Torvalds 已提交
151
	{
152
		const struct iphdr *iph;
153
		int poff;
154 155 156 157

		if (!pskb_network_may_pull(skb, sizeof(*iph)))
			goto err;
		iph = ip_hdr(skb);
158 159
		h = (__force u32)iph->daddr;
		h2 = (__force u32)iph->saddr ^ iph->protocol;
160 161 162 163 164 165 166 167
		if (iph->frag_off & htons(IP_MF|IP_OFFSET))
			break;
		poff = proto_ports_offset(iph->protocol);
		if (poff >= 0 &&
		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
			iph = ip_hdr(skb);
			h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
		}
L
Linus Torvalds 已提交
168 169
		break;
	}
170
	case htons(ETH_P_IPV6):
L
Linus Torvalds 已提交
171
	{
172
		struct ipv6hdr *iph;
173
		int poff;
174 175 176 177

		if (!pskb_network_may_pull(skb, sizeof(*iph)))
			goto err;
		iph = ipv6_hdr(skb);
178 179
		h = (__force u32)iph->daddr.s6_addr32[3];
		h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
180 181 182 183 184 185
		poff = proto_ports_offset(iph->nexthdr);
		if (poff >= 0 &&
		    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
			iph = ipv6_hdr(skb);
			h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
		}
L
Linus Torvalds 已提交
186 187 188
		break;
	}
	default:
189
err:
190
		h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
191
		h2 = (unsigned long)skb->sk;
L
Linus Torvalds 已提交
192
	}
193

L
Linus Torvalds 已提交
194 195 196
	return sfq_fold_hash(q, h, h2);
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
				 int *qerr)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tcf_result res;
	int result;

	if (TC_H_MAJ(skb->priority) == sch->handle &&
	    TC_H_MIN(skb->priority) > 0 &&
	    TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
		return TC_H_MIN(skb->priority);

	if (!q->filter_list)
		return sfq_hash(q, skb) + 1;

212
	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
213 214 215 216 217 218
	result = tc_classify(skb, q->filter_list, &res);
	if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
		switch (result) {
		case TC_ACT_STOLEN:
		case TC_ACT_QUEUED:
219
			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
220 221 222 223 224 225 226 227 228 229
		case TC_ACT_SHOT:
			return 0;
		}
#endif
		if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
			return TC_H_MIN(res.classid);
	}
	return 0;
}

230 231 232
/*
 * x : slot number [0 .. SFQ_SLOTS - 1]
 */
L
Linus Torvalds 已提交
233 234 235
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
236 237 238 239
	int qlen = q->slots[x].qlen;

	p = qlen + SFQ_SLOTS;
	n = q->dep[qlen].next;
L
Linus Torvalds 已提交
240

241 242 243 244 245
	q->slots[x].dep.next = n;
	q->slots[x].dep.prev = p;

	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
	sfq_dep_head(q, n)->prev = x;
L
Linus Torvalds 已提交
246 247
}

248 249 250 251 252 253 254
#define sfq_unlink(q, x, n, p)			\
	n = q->slots[x].dep.next;		\
	p = q->slots[x].dep.prev;		\
	sfq_dep_head(q, p)->next = n;		\
	sfq_dep_head(q, n)->prev = p


L
Linus Torvalds 已提交
255 256 257
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
258
	int d;
L
Linus Torvalds 已提交
259

260
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
261

262 263 264
	d = q->slots[x].qlen--;
	if (n == p && q->cur_depth == d)
		q->cur_depth--;
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272
	sfq_link(q, x);
}

static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
	int d;

273
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
274

275 276 277
	d = ++q->slots[x].qlen;
	if (q->cur_depth < d)
		q->cur_depth = d;
L
Linus Torvalds 已提交
278 279 280
	sfq_link(q, x);
}

281 282 283 284 285 286 287 288
/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_prev;

	slot->skblist_prev = skb->prev;
E
Eric Dumazet 已提交
289
	skb->prev->next = (struct sk_buff *)slot;
290 291 292 293 294 295 296 297 298 299
	skb->next = skb->prev = NULL;
	return skb;
}

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_next;

	slot->skblist_next = skb->next;
E
Eric Dumazet 已提交
300
	skb->next->prev = (struct sk_buff *)slot;
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	skb->next = skb->prev = NULL;
	return skb;
}

static inline void slot_queue_init(struct sfq_slot *slot)
{
	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
}

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
{
	skb->prev = slot->skblist_prev;
	skb->next = (struct sk_buff *)slot;
	slot->skblist_prev->next = skb;
	slot->skblist_prev = skb;
}

#define	slot_queue_walk(slot, skb)		\
	for (skb = slot->skblist_next;		\
	     skb != (struct sk_buff *)slot;	\
	     skb = skb->next)

L
Linus Torvalds 已提交
324 325 326
static unsigned int sfq_drop(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
327
	sfq_index x, d = q->cur_depth;
L
Linus Torvalds 已提交
328 329
	struct sk_buff *skb;
	unsigned int len;
330
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
331

332
	/* Queue is full! Find the longest slot and drop tail packet from it */
L
Linus Torvalds 已提交
333
	if (d > 1) {
334 335 336 337
		x = q->dep[d].next;
		slot = &q->slots[x];
drop:
		skb = slot_dequeue_tail(slot);
338
		len = qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
339
		sfq_dec(q, x);
340
		kfree_skb(skb);
L
Linus Torvalds 已提交
341 342
		sch->q.qlen--;
		sch->qstats.drops++;
343
		sch->qstats.backlog -= len;
L
Linus Torvalds 已提交
344 345 346 347 348
		return len;
	}

	if (d == 1) {
		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
349 350 351 352 353
		x = q->tail->next;
		slot = &q->slots[x];
		q->tail->next = slot->next;
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		goto drop;
L
Linus Torvalds 已提交
354 355 356 357 358 359
	}

	return 0;
}

static int
360
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
L
Linus Torvalds 已提交
361 362
{
	struct sfq_sched_data *q = qdisc_priv(sch);
363
	unsigned int hash;
L
Linus Torvalds 已提交
364
	sfq_index x;
365
	struct sfq_slot *slot;
366
	int uninitialized_var(ret);
367 368 369

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
370
		if (ret & __NET_XMIT_BYPASS)
371 372 373 374 375
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;
L
Linus Torvalds 已提交
376 377

	x = q->ht[hash];
378 379 380 381 382 383
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
L
Linus Torvalds 已提交
384
	}
385

386
	/* If selected queue has length q->limit, do simple tail drop,
387 388
	 * i.e. drop _this_ packet.
	 */
389
	if (slot->qlen >= q->limit)
390 391
		return qdisc_drop(skb, sch);

392
	sch->qstats.backlog += qdisc_pkt_len(skb);
393
	slot_queue_add(slot, skb);
L
Linus Torvalds 已提交
394
	sfq_inc(q, x);
395 396 397
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
L
Linus Torvalds 已提交
398
		} else {
399 400
			slot->next = q->tail->next;
			q->tail->next = x;
L
Linus Torvalds 已提交
401
		}
402
		q->tail = slot;
403
		slot->allot = q->scaled_quantum;
L
Linus Torvalds 已提交
404
	}
405
	if (++sch->q.qlen <= q->limit) {
406
		qdisc_bstats_update(sch, skb);
407
		return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
408 409 410 411 412 413
	}

	sfq_drop(sch);
	return NET_XMIT_CN;
}

414 415 416 417
static struct sk_buff *
sfq_peek(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
L
Linus Torvalds 已提交
418

419
	/* No active slots */
420
	if (q->tail == NULL)
421
		return NULL;
L
Linus Torvalds 已提交
422

423
	return q->slots[q->tail->next].skblist_next;
424
}
L
Linus Torvalds 已提交
425 426

static struct sk_buff *
427
sfq_dequeue(struct Qdisc *sch)
L
Linus Torvalds 已提交
428 429 430
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
431
	sfq_index a, next_a;
432
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
433 434

	/* No active slots */
435
	if (q->tail == NULL)
L
Linus Torvalds 已提交
436 437
		return NULL;

438
next_slot:
439 440
	a = q->tail->next;
	slot = &q->slots[a];
441 442 443 444 445
	if (slot->allot <= 0) {
		q->tail = slot;
		slot->allot += q->scaled_quantum;
		goto next_slot;
	}
446
	skb = slot_dequeue_head(slot);
L
Linus Torvalds 已提交
447 448
	sfq_dec(q, a);
	sch->q.qlen--;
449
	sch->qstats.backlog -= qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
450 451

	/* Is the slot empty? */
452 453 454
	if (slot->qlen == 0) {
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		next_a = slot->next;
455
		if (a == next_a) {
456
			q->tail = NULL; /* no more active slots */
L
Linus Torvalds 已提交
457 458
			return skb;
		}
459
		q->tail->next = next_a;
460 461
	} else {
		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
L
Linus Torvalds 已提交
462 463 464 465 466
	}
	return skb;
}

static void
467
sfq_reset(struct Qdisc *sch)
L
Linus Torvalds 已提交
468 469 470 471 472 473 474 475 476
{
	struct sk_buff *skb;

	while ((skb = sfq_dequeue(sch)) != NULL)
		kfree_skb(skb);
}

static void sfq_perturbation(unsigned long arg)
{
477
	struct Qdisc *sch = (struct Qdisc *)arg;
L
Linus Torvalds 已提交
478 479
	struct sfq_sched_data *q = qdisc_priv(sch);

480
	q->perturbation = net_random();
L
Linus Torvalds 已提交
481

482 483
	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
L
Linus Torvalds 已提交
484 485
}

486
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
487 488
{
	struct sfq_sched_data *q = qdisc_priv(sch);
489
	struct tc_sfq_qopt *ctl = nla_data(opt);
490
	unsigned int qlen;
L
Linus Torvalds 已提交
491

492
	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
L
Linus Torvalds 已提交
493 494 495
		return -EINVAL;

	sch_tree_lock(sch);
496
	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
497
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
498
	q->perturb_period = ctl->perturb_period * HZ;
L
Linus Torvalds 已提交
499
	if (ctl->limit)
500
		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
L
Linus Torvalds 已提交
501

502
	qlen = sch->q.qlen;
503
	while (sch->q.qlen > q->limit)
L
Linus Torvalds 已提交
504
		sfq_drop(sch);
505
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
L
Linus Torvalds 已提交
506 507 508

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
509
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
510
		q->perturbation = net_random();
L
Linus Torvalds 已提交
511 512 513 514 515
	}
	sch_tree_unlock(sch);
	return 0;
}

516
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
517 518 519 520
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

521
	q->perturb_timer.function = sfq_perturbation;
522
	q->perturb_timer.data = (unsigned long)sch;
523
	init_timer_deferrable(&q->perturb_timer);
L
Linus Torvalds 已提交
524

525
	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
526
		q->ht[i] = SFQ_EMPTY_SLOT;
527 528

	for (i = 0; i < SFQ_DEPTH; i++) {
529 530
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
L
Linus Torvalds 已提交
531
	}
532

533
	q->limit = SFQ_DEPTH - 1;
534 535
	q->cur_depth = 0;
	q->tail = NULL;
L
Linus Torvalds 已提交
536
	if (opt == NULL) {
537
		q->quantum = psched_mtu(qdisc_dev(sch));
538
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
L
Linus Torvalds 已提交
539
		q->perturb_period = 0;
540
		q->perturbation = net_random();
L
Linus Torvalds 已提交
541 542 543 544 545
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}
546

E
Eric Dumazet 已提交
547 548
	for (i = 0; i < SFQ_SLOTS; i++) {
		slot_queue_init(&q->slots[i]);
L
Linus Torvalds 已提交
549
		sfq_link(q, i);
E
Eric Dumazet 已提交
550
	}
L
Linus Torvalds 已提交
551 552 553 554 555 556
	return 0;
}

static void sfq_destroy(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
557

558
	tcf_destroy_chain(&q->filter_list);
559 560
	q->perturb_period = 0;
	del_timer_sync(&q->perturb_timer);
L
Linus Torvalds 已提交
561 562 563 564 565
}

static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
566
	unsigned char *b = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
567 568 569
	struct tc_sfq_qopt opt;

	opt.quantum = q->quantum;
570
	opt.perturb_period = q->perturb_period / HZ;
L
Linus Torvalds 已提交
571 572 573

	opt.limit = q->limit;
	opt.divisor = SFQ_HASH_DIVISOR;
574
	opt.flows = q->limit;
L
Linus Torvalds 已提交
575

576
	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
L
Linus Torvalds 已提交
577 578 579

	return skb->len;

580
nla_put_failure:
581
	nlmsg_trim(skb, b);
L
Linus Torvalds 已提交
582 583 584
	return -1;
}

585 586 587 588 589
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{
	return NULL;
}

590 591 592 593 594
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
	return 0;
}

595 596 597 598 599 600
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{
	return 0;
}

601 602 603 604
static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}

605 606 607 608 609 610 611 612 613
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
	struct sfq_sched_data *q = qdisc_priv(sch);

	if (cl)
		return NULL;
	return &q->filter_list;
}

614 615 616 617 618 619 620 621 622 623 624
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	tcm->tcm_handle |= TC_H_MIN(cl);
	return 0;
}

static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				struct gnet_dump *d)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
E
Eric Dumazet 已提交
625 626 627
	sfq_index idx = q->ht[cl - 1];
	struct gnet_stats_queue qs = { 0 };
	struct tc_sfq_xstats xstats = { 0 };
628 629
	struct sk_buff *skb;

E
Eric Dumazet 已提交
630 631
	if (idx != SFQ_EMPTY_SLOT) {
		const struct sfq_slot *slot = &q->slots[idx];
632

633
		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
E
Eric Dumazet 已提交
634 635 636 637
		qs.qlen = slot->qlen;
		slot_queue_walk(slot, skb)
			qs.backlog += qdisc_pkt_len(skb);
	}
638 639 640 641 642
	if (gnet_stats_copy_queue(d, &qs) < 0)
		return -1;
	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}

643 644
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
645 646 647 648 649 650 651
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int i;

	if (arg->stop)
		return;

	for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
652
		if (q->ht[i] == SFQ_EMPTY_SLOT ||
653 654 655 656 657 658 659 660 661 662
		    arg->count < arg->skip) {
			arg->count++;
			continue;
		}
		if (arg->fn(sch, i + 1, arg) < 0) {
			arg->stop = 1;
			break;
		}
		arg->count++;
	}
663 664 665
}

static const struct Qdisc_class_ops sfq_class_ops = {
666
	.leaf		=	sfq_leaf,
667
	.get		=	sfq_get,
668
	.put		=	sfq_put,
669
	.tcf_chain	=	sfq_find_tcf,
670
	.bind_tcf	=	sfq_bind,
671
	.unbind_tcf	=	sfq_put,
672 673
	.dump		=	sfq_dump_class,
	.dump_stats	=	sfq_dump_class_stats,
674 675 676
	.walk		=	sfq_walk,
};

677
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
678
	.cl_ops		=	&sfq_class_ops,
L
Linus Torvalds 已提交
679 680 681 682
	.id		=	"sfq",
	.priv_size	=	sizeof(struct sfq_sched_data),
	.enqueue	=	sfq_enqueue,
	.dequeue	=	sfq_dequeue,
683
	.peek		=	sfq_peek,
L
Linus Torvalds 已提交
684 685 686 687 688 689 690 691 692 693 694 695 696
	.drop		=	sfq_drop,
	.init		=	sfq_init,
	.reset		=	sfq_reset,
	.destroy	=	sfq_destroy,
	.change		=	NULL,
	.dump		=	sfq_dump,
	.owner		=	THIS_MODULE,
};

static int __init sfq_module_init(void)
{
	return register_qdisc(&sfq_qdisc_ops);
}
697
static void __exit sfq_module_exit(void)
L
Linus Torvalds 已提交
698 699 700 701 702 703
{
	unregister_qdisc(&sfq_qdisc_ops);
}
module_init(sfq_module_init)
module_exit(sfq_module_exit)
MODULE_LICENSE("GPL");