sch_sfq.c 17.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
21
#include <linux/jhash.h>
22
#include <linux/slab.h>
23
#include <linux/vmalloc.h>
24
#include <net/netlink.h>
L
Linus Torvalds 已提交
25
#include <net/pkt_sched.h>
E
Eric Dumazet 已提交
26
#include <net/flow_keys.h>
L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44


/*	Stochastic Fairness Queuing algorithm.
	=======================================

	Source:
	Paul E. McKenney "Stochastic Fairness Queuing",
	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

	Paul E. McKenney "Stochastic Fairness Queuing",
	"Interworking: Research and Experience", v.2, 1991, p.113-131.


	See also:
	M. Shreedhar and George Varghese "Efficient Fair
	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


45
	This is not the thing that is usually called (W)FQ nowadays.
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54
	It does not use any timestamp mechanism, but instead
	processes queues in round-robin order.

	ADVANTAGE:

	- It is very cheap. Both CPU and memory requirements are minimal.

	DRAWBACKS:

55
	- "Stochastic" -> It is not 100% fair.
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69
	When hash collisions occur, several flows are considered as one.

	- "Round-robin" -> It introduces larger delays than virtual clock
	based schemes, and should not be used for isolating interactive
	traffic	from non-interactive. It means, that this scheduler
	should be used as leaf of CBQ or P3, which put interactive traffic
	to higher priority band.

	We still need true WFQ for top level CSZ, but using WFQ
	for the best effort traffic is absolutely pointless:
	SFQ is superior for this purpose.

	IMPLEMENTATION:
	This implementation limits maximal queue length to 128;
70
	max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
L
Linus Torvalds 已提交
71
	The only goal of this restrictions was that all data
72
	fit into one 4K page on 32bit arches.
L
Linus Torvalds 已提交
73 74 75

	It is easy to increase these values, but not in flight.  */

76 77 78
#define SFQ_DEPTH		128 /* max number of packets per flow */
#define SFQ_SLOTS		128 /* max number of flows */
#define SFQ_EMPTY_SLOT		255
79 80
#define SFQ_DEFAULT_HASH_DIVISOR 1024

81 82 83 84 85
/* We use 16 bits to store allot, and want to handle packets up to 64K
 * Scale allot by 8 (1<<3) so that no overflow occurs.
 */
#define SFQ_ALLOT_SHIFT		3
#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
L
Linus Torvalds 已提交
86

87
/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
L
Linus Torvalds 已提交
88 89
typedef unsigned char sfq_index;

90 91 92 93 94 95
/*
 * We dont use pointers to save space.
 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
 * are 'pointers' to dep[] array
 */
E
Eric Dumazet 已提交
96
struct sfq_head {
L
Linus Torvalds 已提交
97 98 99 100
	sfq_index	next;
	sfq_index	prev;
};

101 102 103 104 105 106 107 108 109 110
struct sfq_slot {
	struct sk_buff	*skblist_next;
	struct sk_buff	*skblist_prev;
	sfq_index	qlen; /* number of skbs in skblist */
	sfq_index	next; /* next slot in sfq chain */
	struct sfq_head dep; /* anchor in dep[] chains */
	unsigned short	hash; /* hash value (index in ht[]) */
	short		allot; /* credit for this slot */
};

E
Eric Dumazet 已提交
111
struct sfq_sched_data {
L
Linus Torvalds 已提交
112 113
/* Parameters */
	int		perturb_period;
E
Eric Dumazet 已提交
114
	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
L
Linus Torvalds 已提交
115
	int		limit;
116
	unsigned int	divisor;	/* number of slots in hash table */
L
Linus Torvalds 已提交
117
/* Variables */
118
	struct tcf_proto *filter_list;
L
Linus Torvalds 已提交
119
	struct timer_list perturb_timer;
120
	u32		perturbation;
121
	sfq_index	cur_depth;	/* depth of longest slot */
122
	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
123
	struct sfq_slot *tail;		/* current slot in round */
124
	sfq_index	*ht;		/* Hash table (divisor slots) */
125 126
	struct sfq_slot	slots[SFQ_SLOTS];
	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
L
Linus Torvalds 已提交
127 128
};

129 130 131 132 133 134 135 136 137 138
/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{
	if (val < SFQ_SLOTS)
		return &q->slots[val].dep;
	return &q->dep[val - SFQ_SLOTS];
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/*
 * In order to be able to quickly rehash our queue when timer changes
 * q->perturbation, we store flow_keys in skb->cb[]
 */
struct sfq_skb_cb {
       struct flow_keys        keys;
};

static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
{
       BUILD_BUG_ON(sizeof(skb->cb) <
               sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
       return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
}

E
Eric Dumazet 已提交
154 155
static unsigned int sfq_hash(const struct sfq_sched_data *q,
			     const struct sk_buff *skb)
L
Linus Torvalds 已提交
156
{
157
	const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
E
Eric Dumazet 已提交
158 159
	unsigned int hash;

160 161 162
	hash = jhash_3words((__force u32)keys->dst,
			    (__force u32)keys->src ^ keys->ip_proto,
			    (__force u32)keys->ports, q->perturbation);
E
Eric Dumazet 已提交
163
	return hash & (q->divisor - 1);
L
Linus Torvalds 已提交
164 165
}

166 167 168 169 170 171 172 173 174
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
				 int *qerr)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tcf_result res;
	int result;

	if (TC_H_MAJ(skb->priority) == sch->handle &&
	    TC_H_MIN(skb->priority) > 0 &&
175
	    TC_H_MIN(skb->priority) <= q->divisor)
176 177
		return TC_H_MIN(skb->priority);

178 179
	if (!q->filter_list) {
		skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
180
		return sfq_hash(q, skb) + 1;
181
	}
182

183
	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
184 185 186 187 188 189
	result = tc_classify(skb, q->filter_list, &res);
	if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
		switch (result) {
		case TC_ACT_STOLEN:
		case TC_ACT_QUEUED:
190
			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
191 192 193 194
		case TC_ACT_SHOT:
			return 0;
		}
#endif
195
		if (TC_H_MIN(res.classid) <= q->divisor)
196 197 198 199 200
			return TC_H_MIN(res.classid);
	}
	return 0;
}

201 202 203
/*
 * x : slot number [0 .. SFQ_SLOTS - 1]
 */
L
Linus Torvalds 已提交
204 205 206
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
207 208 209 210
	int qlen = q->slots[x].qlen;

	p = qlen + SFQ_SLOTS;
	n = q->dep[qlen].next;
L
Linus Torvalds 已提交
211

212 213 214 215 216
	q->slots[x].dep.next = n;
	q->slots[x].dep.prev = p;

	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
	sfq_dep_head(q, n)->prev = x;
L
Linus Torvalds 已提交
217 218
}

219 220 221 222 223 224 225
#define sfq_unlink(q, x, n, p)			\
	n = q->slots[x].dep.next;		\
	p = q->slots[x].dep.prev;		\
	sfq_dep_head(q, p)->next = n;		\
	sfq_dep_head(q, n)->prev = p


L
Linus Torvalds 已提交
226 227 228
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
229
	int d;
L
Linus Torvalds 已提交
230

231
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
232

233 234 235
	d = q->slots[x].qlen--;
	if (n == p && q->cur_depth == d)
		q->cur_depth--;
L
Linus Torvalds 已提交
236 237 238 239 240 241 242 243
	sfq_link(q, x);
}

static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
	int d;

244
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
245

246 247 248
	d = ++q->slots[x].qlen;
	if (q->cur_depth < d)
		q->cur_depth = d;
L
Linus Torvalds 已提交
249 250 251
	sfq_link(q, x);
}

252 253 254 255 256 257 258 259
/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_prev;

	slot->skblist_prev = skb->prev;
E
Eric Dumazet 已提交
260
	skb->prev->next = (struct sk_buff *)slot;
261 262 263 264 265 266 267 268 269 270
	skb->next = skb->prev = NULL;
	return skb;
}

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_next;

	slot->skblist_next = skb->next;
E
Eric Dumazet 已提交
271
	skb->next->prev = (struct sk_buff *)slot;
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	skb->next = skb->prev = NULL;
	return skb;
}

static inline void slot_queue_init(struct sfq_slot *slot)
{
	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
}

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
{
	skb->prev = slot->skblist_prev;
	skb->next = (struct sk_buff *)slot;
	slot->skblist_prev->next = skb;
	slot->skblist_prev = skb;
}

#define	slot_queue_walk(slot, skb)		\
	for (skb = slot->skblist_next;		\
	     skb != (struct sk_buff *)slot;	\
	     skb = skb->next)

L
Linus Torvalds 已提交
295 296 297
static unsigned int sfq_drop(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
298
	sfq_index x, d = q->cur_depth;
L
Linus Torvalds 已提交
299 300
	struct sk_buff *skb;
	unsigned int len;
301
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
302

303
	/* Queue is full! Find the longest slot and drop tail packet from it */
L
Linus Torvalds 已提交
304
	if (d > 1) {
305 306 307 308
		x = q->dep[d].next;
		slot = &q->slots[x];
drop:
		skb = slot_dequeue_tail(slot);
309
		len = qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
310
		sfq_dec(q, x);
311
		kfree_skb(skb);
L
Linus Torvalds 已提交
312 313
		sch->q.qlen--;
		sch->qstats.drops++;
314
		sch->qstats.backlog -= len;
L
Linus Torvalds 已提交
315 316 317 318 319
		return len;
	}

	if (d == 1) {
		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
320 321 322 323 324
		x = q->tail->next;
		slot = &q->slots[x];
		q->tail->next = slot->next;
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		goto drop;
L
Linus Torvalds 已提交
325 326 327 328 329 330
	}

	return 0;
}

static int
331
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
L
Linus Torvalds 已提交
332 333
{
	struct sfq_sched_data *q = qdisc_priv(sch);
334
	unsigned int hash;
335
	sfq_index x, qlen;
336
	struct sfq_slot *slot;
337
	int uninitialized_var(ret);
338 339 340

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
341
		if (ret & __NET_XMIT_BYPASS)
342 343 344 345 346
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;
L
Linus Torvalds 已提交
347 348

	x = q->ht[hash];
349 350 351 352 353 354
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
L
Linus Torvalds 已提交
355
	}
356

357
	/* If selected queue has length q->limit, do simple tail drop,
358 359
	 * i.e. drop _this_ packet.
	 */
360
	if (slot->qlen >= q->limit)
361 362
		return qdisc_drop(skb, sch);

363
	sch->qstats.backlog += qdisc_pkt_len(skb);
364
	slot_queue_add(slot, skb);
L
Linus Torvalds 已提交
365
	sfq_inc(q, x);
366 367 368
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
369
			q->tail = slot;
L
Linus Torvalds 已提交
370
		} else {
371 372
			slot->next = q->tail->next;
			q->tail->next = x;
L
Linus Torvalds 已提交
373
		}
374
		slot->allot = q->scaled_quantum;
L
Linus Torvalds 已提交
375
	}
376
	if (++sch->q.qlen <= q->limit)
377
		return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
378

379
	qlen = slot->qlen;
L
Linus Torvalds 已提交
380
	sfq_drop(sch);
381 382 383
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
E
Eric Dumazet 已提交
384 385 386 387 388 389
	if (qlen != slot->qlen)
		return NET_XMIT_CN;

	/* As we dropped a packet, better let upper stack know this */
	qdisc_tree_decrease_qlen(sch, 1);
	return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
390 391 392
}

static struct sk_buff *
393
sfq_dequeue(struct Qdisc *sch)
L
Linus Torvalds 已提交
394 395 396
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
397
	sfq_index a, next_a;
398
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
399 400

	/* No active slots */
401
	if (q->tail == NULL)
L
Linus Torvalds 已提交
402 403
		return NULL;

404
next_slot:
405 406
	a = q->tail->next;
	slot = &q->slots[a];
407 408 409 410 411
	if (slot->allot <= 0) {
		q->tail = slot;
		slot->allot += q->scaled_quantum;
		goto next_slot;
	}
412
	skb = slot_dequeue_head(slot);
L
Linus Torvalds 已提交
413
	sfq_dec(q, a);
414
	qdisc_bstats_update(sch, skb);
L
Linus Torvalds 已提交
415
	sch->q.qlen--;
416
	sch->qstats.backlog -= qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
417 418

	/* Is the slot empty? */
419 420 421
	if (slot->qlen == 0) {
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		next_a = slot->next;
422
		if (a == next_a) {
423
			q->tail = NULL; /* no more active slots */
L
Linus Torvalds 已提交
424 425
			return skb;
		}
426
		q->tail->next = next_a;
427 428
	} else {
		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
L
Linus Torvalds 已提交
429 430 431 432 433
	}
	return skb;
}

static void
434
sfq_reset(struct Qdisc *sch)
L
Linus Torvalds 已提交
435 436 437 438 439 440 441
{
	struct sk_buff *skb;

	while ((skb = sfq_dequeue(sch)) != NULL)
		kfree_skb(skb);
}

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
/*
 * When q->perturbation is changed, we rehash all queued skbs
 * to avoid OOO (Out Of Order) effects.
 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
 * counters.
 */
static void sfq_rehash(struct sfq_sched_data *q)
{
	struct sk_buff *skb;
	int i;
	struct sfq_slot *slot;
	struct sk_buff_head list;

	__skb_queue_head_init(&list);

	for (i = 0; i < SFQ_SLOTS; i++) {
		slot = &q->slots[i];
		if (!slot->qlen)
			continue;
		while (slot->qlen) {
			skb = slot_dequeue_head(slot);
			sfq_dec(q, i);
			__skb_queue_tail(&list, skb);
		}
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
	}
	q->tail = NULL;

	while ((skb = __skb_dequeue(&list)) != NULL) {
		unsigned int hash = sfq_hash(q, skb);
		sfq_index x = q->ht[hash];

		slot = &q->slots[x];
		if (x == SFQ_EMPTY_SLOT) {
			x = q->dep[0].next; /* get a free slot */
			q->ht[hash] = x;
			slot = &q->slots[x];
			slot->hash = hash;
		}
		slot_queue_add(slot, skb);
		sfq_inc(q, x);
		if (slot->qlen == 1) {		/* The flow is new */
			if (q->tail == NULL) {	/* It is the first flow */
				slot->next = x;
			} else {
				slot->next = q->tail->next;
				q->tail->next = x;
			}
			q->tail = slot;
			slot->allot = q->scaled_quantum;
		}
	}
}

L
Linus Torvalds 已提交
496 497
static void sfq_perturbation(unsigned long arg)
{
498
	struct Qdisc *sch = (struct Qdisc *)arg;
L
Linus Torvalds 已提交
499
	struct sfq_sched_data *q = qdisc_priv(sch);
500
	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
L
Linus Torvalds 已提交
501

502
	spin_lock(root_lock);
503
	q->perturbation = net_random();
504 505 506
	if (!q->filter_list && q->tail)
		sfq_rehash(q);
	spin_unlock(root_lock);
L
Linus Torvalds 已提交
507

508 509
	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
L
Linus Torvalds 已提交
510 511
}

512
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
513 514
{
	struct sfq_sched_data *q = qdisc_priv(sch);
515
	struct tc_sfq_qopt *ctl = nla_data(opt);
516
	unsigned int qlen;
L
Linus Torvalds 已提交
517

518
	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
L
Linus Torvalds 已提交
519 520
		return -EINVAL;

S
stephen hemminger 已提交
521 522 523 524
	if (ctl->divisor &&
	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
		return -EINVAL;

L
Linus Torvalds 已提交
525
	sch_tree_lock(sch);
526
	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
527
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
528
	q->perturb_period = ctl->perturb_period * HZ;
L
Linus Torvalds 已提交
529
	if (ctl->limit)
530
		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
S
stephen hemminger 已提交
531
	if (ctl->divisor)
532
		q->divisor = ctl->divisor;
533
	qlen = sch->q.qlen;
534
	while (sch->q.qlen > q->limit)
L
Linus Torvalds 已提交
535
		sfq_drop(sch);
536
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
L
Linus Torvalds 已提交
537 538 539

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
540
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
541
		q->perturbation = net_random();
L
Linus Torvalds 已提交
542 543 544 545 546
	}
	sch_tree_unlock(sch);
	return 0;
}

547
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
548 549
{
	struct sfq_sched_data *q = qdisc_priv(sch);
550
	size_t sz;
L
Linus Torvalds 已提交
551 552
	int i;

553
	q->perturb_timer.function = sfq_perturbation;
554
	q->perturb_timer.data = (unsigned long)sch;
555
	init_timer_deferrable(&q->perturb_timer);
L
Linus Torvalds 已提交
556

557
	for (i = 0; i < SFQ_DEPTH; i++) {
558 559
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
L
Linus Torvalds 已提交
560
	}
561

562
	q->limit = SFQ_DEPTH - 1;
563 564
	q->cur_depth = 0;
	q->tail = NULL;
565
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
L
Linus Torvalds 已提交
566
	if (opt == NULL) {
567
		q->quantum = psched_mtu(qdisc_dev(sch));
568
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
L
Linus Torvalds 已提交
569
		q->perturb_period = 0;
570
		q->perturbation = net_random();
L
Linus Torvalds 已提交
571 572 573 574 575
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}
576

577 578 579 580 581 582 583 584 585
	sz = sizeof(q->ht[0]) * q->divisor;
	q->ht = kmalloc(sz, GFP_KERNEL);
	if (!q->ht && sz > PAGE_SIZE)
		q->ht = vmalloc(sz);
	if (!q->ht)
		return -ENOMEM;
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

E
Eric Dumazet 已提交
586 587
	for (i = 0; i < SFQ_SLOTS; i++) {
		slot_queue_init(&q->slots[i]);
L
Linus Torvalds 已提交
588
		sfq_link(q, i);
E
Eric Dumazet 已提交
589
	}
590 591 592 593
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
L
Linus Torvalds 已提交
594 595 596 597 598 599
	return 0;
}

static void sfq_destroy(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
600

601
	tcf_destroy_chain(&q->filter_list);
602 603
	q->perturb_period = 0;
	del_timer_sync(&q->perturb_timer);
604 605 606 607
	if (is_vmalloc_addr(q->ht))
		vfree(q->ht);
	else
		kfree(q->ht);
L
Linus Torvalds 已提交
608 609 610 611 612
}

static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
613
	unsigned char *b = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
614 615 616
	struct tc_sfq_qopt opt;

	opt.quantum = q->quantum;
617
	opt.perturb_period = q->perturb_period / HZ;
L
Linus Torvalds 已提交
618 619

	opt.limit = q->limit;
620
	opt.divisor = q->divisor;
621
	opt.flows = q->limit;
L
Linus Torvalds 已提交
622

623
	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
L
Linus Torvalds 已提交
624 625 626

	return skb->len;

627
nla_put_failure:
628
	nlmsg_trim(skb, b);
L
Linus Torvalds 已提交
629 630 631
	return -1;
}

632 633 634 635 636
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{
	return NULL;
}

637 638 639 640 641
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
	return 0;
}

642 643 644
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{
645 646
	/* we cannot bypass queue discipline anymore */
	sch->flags &= ~TCQ_F_CAN_BYPASS;
647 648 649
	return 0;
}

650 651 652 653
static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}

654 655 656 657 658 659 660 661 662
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
	struct sfq_sched_data *q = qdisc_priv(sch);

	if (cl)
		return NULL;
	return &q->filter_list;
}

663 664 665 666 667 668 669 670 671 672 673
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	tcm->tcm_handle |= TC_H_MIN(cl);
	return 0;
}

static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				struct gnet_dump *d)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
E
Eric Dumazet 已提交
674 675 676
	sfq_index idx = q->ht[cl - 1];
	struct gnet_stats_queue qs = { 0 };
	struct tc_sfq_xstats xstats = { 0 };
677 678
	struct sk_buff *skb;

E
Eric Dumazet 已提交
679 680
	if (idx != SFQ_EMPTY_SLOT) {
		const struct sfq_slot *slot = &q->slots[idx];
681

682
		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
E
Eric Dumazet 已提交
683 684 685 686
		qs.qlen = slot->qlen;
		slot_queue_walk(slot, skb)
			qs.backlog += qdisc_pkt_len(skb);
	}
687 688 689 690 691
	if (gnet_stats_copy_queue(d, &qs) < 0)
		return -1;
	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}

692 693
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
694 695 696 697 698 699
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int i;

	if (arg->stop)
		return;

700
	for (i = 0; i < q->divisor; i++) {
701
		if (q->ht[i] == SFQ_EMPTY_SLOT ||
702 703 704 705 706 707 708 709 710 711
		    arg->count < arg->skip) {
			arg->count++;
			continue;
		}
		if (arg->fn(sch, i + 1, arg) < 0) {
			arg->stop = 1;
			break;
		}
		arg->count++;
	}
712 713 714
}

static const struct Qdisc_class_ops sfq_class_ops = {
715
	.leaf		=	sfq_leaf,
716
	.get		=	sfq_get,
717
	.put		=	sfq_put,
718
	.tcf_chain	=	sfq_find_tcf,
719
	.bind_tcf	=	sfq_bind,
720
	.unbind_tcf	=	sfq_put,
721 722
	.dump		=	sfq_dump_class,
	.dump_stats	=	sfq_dump_class_stats,
723 724 725
	.walk		=	sfq_walk,
};

726
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
727
	.cl_ops		=	&sfq_class_ops,
L
Linus Torvalds 已提交
728 729 730 731
	.id		=	"sfq",
	.priv_size	=	sizeof(struct sfq_sched_data),
	.enqueue	=	sfq_enqueue,
	.dequeue	=	sfq_dequeue,
732
	.peek		=	qdisc_peek_dequeued,
L
Linus Torvalds 已提交
733 734 735 736 737 738 739 740 741 742 743 744 745
	.drop		=	sfq_drop,
	.init		=	sfq_init,
	.reset		=	sfq_reset,
	.destroy	=	sfq_destroy,
	.change		=	NULL,
	.dump		=	sfq_dump,
	.owner		=	THIS_MODULE,
};

static int __init sfq_module_init(void)
{
	return register_qdisc(&sfq_qdisc_ops);
}
746
static void __exit sfq_module_exit(void)
L
Linus Torvalds 已提交
747 748 749 750 751 752
{
	unregister_qdisc(&sfq_qdisc_ops);
}
module_init(sfq_module_init)
module_exit(sfq_module_exit)
MODULE_LICENSE("GPL");