sch_sfq.c 19.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
21
#include <linux/jhash.h>
22
#include <linux/slab.h>
23
#include <linux/vmalloc.h>
24
#include <net/netlink.h>
L
Linus Torvalds 已提交
25
#include <net/pkt_sched.h>
E
Eric Dumazet 已提交
26
#include <net/flow_keys.h>
L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44


/*	Stochastic Fairness Queuing algorithm.
	=======================================

	Source:
	Paul E. McKenney "Stochastic Fairness Queuing",
	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

	Paul E. McKenney "Stochastic Fairness Queuing",
	"Interworking: Research and Experience", v.2, 1991, p.113-131.


	See also:
	M. Shreedhar and George Varghese "Efficient Fair
	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


45
	This is not the thing that is usually called (W)FQ nowadays.
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54
	It does not use any timestamp mechanism, but instead
	processes queues in round-robin order.

	ADVANTAGE:

	- It is very cheap. Both CPU and memory requirements are minimal.

	DRAWBACKS:

55
	- "Stochastic" -> It is not 100% fair.
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68
	When hash collisions occur, several flows are considered as one.

	- "Round-robin" -> It introduces larger delays than virtual clock
	based schemes, and should not be used for isolating interactive
	traffic	from non-interactive. It means, that this scheduler
	should be used as leaf of CBQ or P3, which put interactive traffic
	to higher priority band.

	We still need true WFQ for top level CSZ, but using WFQ
	for the best effort traffic is absolutely pointless:
	SFQ is superior for this purpose.

	IMPLEMENTATION:
E
Eric Dumazet 已提交
69 70 71 72 73
	This implementation limits :
	- maximal queue length per flow to 127 packets.
	- max mtu to 2^18-1;
	- max 65408 flows,
	- number of hash buckets to 65536.
L
Linus Torvalds 已提交
74 75 76

	It is easy to increase these values, but not in flight.  */

E
Eric Dumazet 已提交
77 78 79 80
#define SFQ_MAX_DEPTH		127 /* max number of packets per flow */
#define SFQ_DEFAULT_FLOWS	128
#define SFQ_MAX_FLOWS		(0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
#define SFQ_EMPTY_SLOT		0xffff
81 82
#define SFQ_DEFAULT_HASH_DIVISOR 1024

83 84 85 86 87
/* We use 16 bits to store allot, and want to handle packets up to 64K
 * Scale allot by 8 (1<<3) so that no overflow occurs.
 */
#define SFQ_ALLOT_SHIFT		3
#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
L
Linus Torvalds 已提交
88

E
Eric Dumazet 已提交
89 90
/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
typedef u16 sfq_index;
L
Linus Torvalds 已提交
91

92 93
/*
 * We dont use pointers to save space.
E
Eric Dumazet 已提交
94 95
 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
96 97
 * are 'pointers' to dep[] array
 */
E
Eric Dumazet 已提交
98
struct sfq_head {
L
Linus Torvalds 已提交
99 100 101 102
	sfq_index	next;
	sfq_index	prev;
};

103 104 105 106
struct sfq_slot {
	struct sk_buff	*skblist_next;
	struct sk_buff	*skblist_prev;
	sfq_index	qlen; /* number of skbs in skblist */
E
Eric Dumazet 已提交
107
	sfq_index	next; /* next slot in sfq RR chain */
108 109 110 111 112
	struct sfq_head dep; /* anchor in dep[] chains */
	unsigned short	hash; /* hash value (index in ht[]) */
	short		allot; /* credit for this slot */
};

E
Eric Dumazet 已提交
113
struct sfq_sched_data {
E
Eric Dumazet 已提交
114 115
/* frequently used fields */
	int		limit;		/* limit of total number of packets in this qdisc */
116
	unsigned int	divisor;	/* number of slots in hash table */
E
Eric Dumazet 已提交
117 118 119 120
	unsigned int	maxflows;	/* number of flows in flows array */
	int		headdrop;
	int		maxdepth;	/* limit of packets per flow */

121
	u32		perturbation;
E
Eric Dumazet 已提交
122
	struct tcf_proto *filter_list;
123
	sfq_index	cur_depth;	/* depth of longest slot */
124
	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
125
	struct sfq_slot *tail;		/* current slot in round */
E
Eric Dumazet 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138
	sfq_index	*ht;		/* Hash table ('divisor' slots) */
	struct sfq_slot	*slots;		/* Flows table ('maxflows' entries) */

	struct sfq_head	dep[SFQ_MAX_DEPTH + 1];
					/* Linked lists of slots, indexed by depth
					 * dep[0] : list of unused flows
					 * dep[1] : list of flows with 1 packet
					 * dep[X] : list of flows with X packets
					 */

	int		perturb_period;
	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
	struct timer_list perturb_timer;
L
Linus Torvalds 已提交
139 140
};

141 142 143 144 145
/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{
E
Eric Dumazet 已提交
146
	if (val < SFQ_MAX_FLOWS)
147
		return &q->slots[val].dep;
E
Eric Dumazet 已提交
148
	return &q->dep[val - SFQ_MAX_FLOWS];
149 150
}

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
/*
 * In order to be able to quickly rehash our queue when timer changes
 * q->perturbation, we store flow_keys in skb->cb[]
 */
struct sfq_skb_cb {
       struct flow_keys        keys;
};

static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
{
       BUILD_BUG_ON(sizeof(skb->cb) <
               sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
       return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
}

E
Eric Dumazet 已提交
166 167
static unsigned int sfq_hash(const struct sfq_sched_data *q,
			     const struct sk_buff *skb)
L
Linus Torvalds 已提交
168
{
169
	const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
E
Eric Dumazet 已提交
170 171
	unsigned int hash;

172 173 174
	hash = jhash_3words((__force u32)keys->dst,
			    (__force u32)keys->src ^ keys->ip_proto,
			    (__force u32)keys->ports, q->perturbation);
E
Eric Dumazet 已提交
175
	return hash & (q->divisor - 1);
L
Linus Torvalds 已提交
176 177
}

178 179 180 181 182 183 184 185 186
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
				 int *qerr)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tcf_result res;
	int result;

	if (TC_H_MAJ(skb->priority) == sch->handle &&
	    TC_H_MIN(skb->priority) > 0 &&
187
	    TC_H_MIN(skb->priority) <= q->divisor)
188 189
		return TC_H_MIN(skb->priority);

190 191
	if (!q->filter_list) {
		skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
192
		return sfq_hash(q, skb) + 1;
193
	}
194

195
	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
196 197 198 199 200 201
	result = tc_classify(skb, q->filter_list, &res);
	if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
		switch (result) {
		case TC_ACT_STOLEN:
		case TC_ACT_QUEUED:
202
			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
203 204 205 206
		case TC_ACT_SHOT:
			return 0;
		}
#endif
207
		if (TC_H_MIN(res.classid) <= q->divisor)
208 209 210 211 212
			return TC_H_MIN(res.classid);
	}
	return 0;
}

213
/*
E
Eric Dumazet 已提交
214
 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
215
 */
L
Linus Torvalds 已提交
216 217 218
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
E
Eric Dumazet 已提交
219 220
	struct sfq_slot *slot = &q->slots[x];
	int qlen = slot->qlen;
221

E
Eric Dumazet 已提交
222
	p = qlen + SFQ_MAX_FLOWS;
223
	n = q->dep[qlen].next;
L
Linus Torvalds 已提交
224

E
Eric Dumazet 已提交
225 226
	slot->dep.next = n;
	slot->dep.prev = p;
227 228 229

	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
	sfq_dep_head(q, n)->prev = x;
L
Linus Torvalds 已提交
230 231
}

232 233 234 235 236 237 238
#define sfq_unlink(q, x, n, p)			\
	n = q->slots[x].dep.next;		\
	p = q->slots[x].dep.prev;		\
	sfq_dep_head(q, p)->next = n;		\
	sfq_dep_head(q, n)->prev = p


L
Linus Torvalds 已提交
239 240 241
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
242
	int d;
L
Linus Torvalds 已提交
243

244
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
245

246 247 248
	d = q->slots[x].qlen--;
	if (n == p && q->cur_depth == d)
		q->cur_depth--;
L
Linus Torvalds 已提交
249 250 251 252 253 254 255 256
	sfq_link(q, x);
}

static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
	int d;

257
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
258

259 260 261
	d = ++q->slots[x].qlen;
	if (q->cur_depth < d)
		q->cur_depth = d;
L
Linus Torvalds 已提交
262 263 264
	sfq_link(q, x);
}

265 266 267 268 269 270 271 272
/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_prev;

	slot->skblist_prev = skb->prev;
E
Eric Dumazet 已提交
273
	skb->prev->next = (struct sk_buff *)slot;
274 275 276 277 278 279 280 281 282 283
	skb->next = skb->prev = NULL;
	return skb;
}

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_next;

	slot->skblist_next = skb->next;
E
Eric Dumazet 已提交
284
	skb->next->prev = (struct sk_buff *)slot;
285 286 287 288 289 290
	skb->next = skb->prev = NULL;
	return skb;
}

static inline void slot_queue_init(struct sfq_slot *slot)
{
E
Eric Dumazet 已提交
291
	memset(slot, 0, sizeof(*slot));
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
}

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
{
	skb->prev = slot->skblist_prev;
	skb->next = (struct sk_buff *)slot;
	slot->skblist_prev->next = skb;
	slot->skblist_prev = skb;
}

#define	slot_queue_walk(slot, skb)		\
	for (skb = slot->skblist_next;		\
	     skb != (struct sk_buff *)slot;	\
	     skb = skb->next)

L
Linus Torvalds 已提交
309 310 311
static unsigned int sfq_drop(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
312
	sfq_index x, d = q->cur_depth;
L
Linus Torvalds 已提交
313 314
	struct sk_buff *skb;
	unsigned int len;
315
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
316

317
	/* Queue is full! Find the longest slot and drop tail packet from it */
L
Linus Torvalds 已提交
318
	if (d > 1) {
319 320 321
		x = q->dep[d].next;
		slot = &q->slots[x];
drop:
E
Eric Dumazet 已提交
322
		skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
323
		len = qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
324
		sfq_dec(q, x);
325
		kfree_skb(skb);
L
Linus Torvalds 已提交
326 327
		sch->q.qlen--;
		sch->qstats.drops++;
328
		sch->qstats.backlog -= len;
L
Linus Torvalds 已提交
329 330 331 332 333
		return len;
	}

	if (d == 1) {
		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
334 335 336 337 338
		x = q->tail->next;
		slot = &q->slots[x];
		q->tail->next = slot->next;
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		goto drop;
L
Linus Torvalds 已提交
339 340 341 342 343 344
	}

	return 0;
}

static int
345
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
L
Linus Torvalds 已提交
346 347
{
	struct sfq_sched_data *q = qdisc_priv(sch);
348
	unsigned int hash;
349
	sfq_index x, qlen;
350
	struct sfq_slot *slot;
351
	int uninitialized_var(ret);
352 353 354

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
355
		if (ret & __NET_XMIT_BYPASS)
356 357 358 359 360
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;
L
Linus Torvalds 已提交
361 362

	x = q->ht[hash];
363 364 365
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
E
Eric Dumazet 已提交
366 367
		if (x >= SFQ_MAX_FLOWS)
			return qdisc_drop(skb, sch);
368 369 370
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
L
Linus Torvalds 已提交
371
	}
372

E
Eric Dumazet 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386
	if (slot->qlen >= q->maxdepth) {
		struct sk_buff *head;

		if (!q->headdrop)
			return qdisc_drop(skb, sch);

		head = slot_dequeue_head(slot);
		sch->qstats.backlog -= qdisc_pkt_len(head);
		qdisc_drop(head, sch);

		sch->qstats.backlog += qdisc_pkt_len(skb);
		slot_queue_add(slot, skb);
		return NET_XMIT_CN;
	}
387

388
	sch->qstats.backlog += qdisc_pkt_len(skb);
389
	slot_queue_add(slot, skb);
L
Linus Torvalds 已提交
390
	sfq_inc(q, x);
391 392 393
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
394
			q->tail = slot;
L
Linus Torvalds 已提交
395
		} else {
396 397
			slot->next = q->tail->next;
			q->tail->next = x;
L
Linus Torvalds 已提交
398
		}
399
		slot->allot = q->scaled_quantum;
L
Linus Torvalds 已提交
400
	}
401
	if (++sch->q.qlen <= q->limit)
402
		return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
403

404
	qlen = slot->qlen;
L
Linus Torvalds 已提交
405
	sfq_drop(sch);
406 407 408
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
E
Eric Dumazet 已提交
409 410 411 412 413 414
	if (qlen != slot->qlen)
		return NET_XMIT_CN;

	/* As we dropped a packet, better let upper stack know this */
	qdisc_tree_decrease_qlen(sch, 1);
	return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
415 416 417
}

static struct sk_buff *
418
sfq_dequeue(struct Qdisc *sch)
L
Linus Torvalds 已提交
419 420 421
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
422
	sfq_index a, next_a;
423
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
424 425

	/* No active slots */
426
	if (q->tail == NULL)
L
Linus Torvalds 已提交
427 428
		return NULL;

429
next_slot:
430 431
	a = q->tail->next;
	slot = &q->slots[a];
432 433 434 435 436
	if (slot->allot <= 0) {
		q->tail = slot;
		slot->allot += q->scaled_quantum;
		goto next_slot;
	}
437
	skb = slot_dequeue_head(slot);
L
Linus Torvalds 已提交
438
	sfq_dec(q, a);
439
	qdisc_bstats_update(sch, skb);
L
Linus Torvalds 已提交
440
	sch->q.qlen--;
441
	sch->qstats.backlog -= qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
442 443

	/* Is the slot empty? */
444 445 446
	if (slot->qlen == 0) {
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		next_a = slot->next;
447
		if (a == next_a) {
448
			q->tail = NULL; /* no more active slots */
L
Linus Torvalds 已提交
449 450
			return skb;
		}
451
		q->tail->next = next_a;
452 453
	} else {
		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
L
Linus Torvalds 已提交
454 455 456 457 458
	}
	return skb;
}

static void
459
sfq_reset(struct Qdisc *sch)
L
Linus Torvalds 已提交
460 461 462 463 464 465 466
{
	struct sk_buff *skb;

	while ((skb = sfq_dequeue(sch)) != NULL)
		kfree_skb(skb);
}

467 468 469 470 471 472
/*
 * When q->perturbation is changed, we rehash all queued skbs
 * to avoid OOO (Out Of Order) effects.
 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
 * counters.
 */
E
Eric Dumazet 已提交
473
static void sfq_rehash(struct Qdisc *sch)
474
{
E
Eric Dumazet 已提交
475
	struct sfq_sched_data *q = qdisc_priv(sch);
476 477 478 479
	struct sk_buff *skb;
	int i;
	struct sfq_slot *slot;
	struct sk_buff_head list;
E
Eric Dumazet 已提交
480
	int dropped = 0;
481 482 483

	__skb_queue_head_init(&list);

E
Eric Dumazet 已提交
484
	for (i = 0; i < q->maxflows; i++) {
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
		slot = &q->slots[i];
		if (!slot->qlen)
			continue;
		while (slot->qlen) {
			skb = slot_dequeue_head(slot);
			sfq_dec(q, i);
			__skb_queue_tail(&list, skb);
		}
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
	}
	q->tail = NULL;

	while ((skb = __skb_dequeue(&list)) != NULL) {
		unsigned int hash = sfq_hash(q, skb);
		sfq_index x = q->ht[hash];

		slot = &q->slots[x];
		if (x == SFQ_EMPTY_SLOT) {
			x = q->dep[0].next; /* get a free slot */
E
Eric Dumazet 已提交
504 505 506 507 508 509
			if (x >= SFQ_MAX_FLOWS) {
drop:				sch->qstats.backlog -= qdisc_pkt_len(skb);
				kfree_skb(skb);
				dropped++;
				continue;
			}
510 511 512 513
			q->ht[hash] = x;
			slot = &q->slots[x];
			slot->hash = hash;
		}
E
Eric Dumazet 已提交
514 515
		if (slot->qlen >= q->maxdepth)
			goto drop;
516 517 518 519 520 521 522 523 524 525 526 527 528
		slot_queue_add(slot, skb);
		sfq_inc(q, x);
		if (slot->qlen == 1) {		/* The flow is new */
			if (q->tail == NULL) {	/* It is the first flow */
				slot->next = x;
			} else {
				slot->next = q->tail->next;
				q->tail->next = x;
			}
			q->tail = slot;
			slot->allot = q->scaled_quantum;
		}
	}
E
Eric Dumazet 已提交
529 530
	sch->q.qlen -= dropped;
	qdisc_tree_decrease_qlen(sch, dropped);
531 532
}

L
Linus Torvalds 已提交
533 534
static void sfq_perturbation(unsigned long arg)
{
535
	struct Qdisc *sch = (struct Qdisc *)arg;
L
Linus Torvalds 已提交
536
	struct sfq_sched_data *q = qdisc_priv(sch);
537
	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
L
Linus Torvalds 已提交
538

539
	spin_lock(root_lock);
540
	q->perturbation = net_random();
541
	if (!q->filter_list && q->tail)
E
Eric Dumazet 已提交
542
		sfq_rehash(sch);
543
	spin_unlock(root_lock);
L
Linus Torvalds 已提交
544

545 546
	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
L
Linus Torvalds 已提交
547 548
}

549
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
550 551
{
	struct sfq_sched_data *q = qdisc_priv(sch);
552
	struct tc_sfq_qopt *ctl = nla_data(opt);
E
Eric Dumazet 已提交
553
	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
554
	unsigned int qlen;
L
Linus Torvalds 已提交
555

556
	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
L
Linus Torvalds 已提交
557
		return -EINVAL;
E
Eric Dumazet 已提交
558 559
	if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
		ctl_v1 = nla_data(opt);
S
stephen hemminger 已提交
560 561 562 563
	if (ctl->divisor &&
	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
		return -EINVAL;

L
Linus Torvalds 已提交
564
	sch_tree_lock(sch);
E
Eric Dumazet 已提交
565 566 567 568
	if (ctl->quantum) {
		q->quantum = ctl->quantum;
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	}
569
	q->perturb_period = ctl->perturb_period * HZ;
E
Eric Dumazet 已提交
570 571 572
	if (ctl->flows)
		q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
	if (ctl->divisor) {
573
		q->divisor = ctl->divisor;
E
Eric Dumazet 已提交
574 575 576 577 578 579 580 581 582 583 584 585
		q->maxflows = min_t(u32, q->maxflows, q->divisor);
	}
	if (ctl_v1) {
		if (ctl_v1->depth)
			q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
		q->headdrop = ctl_v1->headdrop;
	}
	if (ctl->limit) {
		q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
		q->maxflows = min_t(u32, q->maxflows, q->limit);
	}

586
	qlen = sch->q.qlen;
587
	while (sch->q.qlen > q->limit)
L
Linus Torvalds 已提交
588
		sfq_drop(sch);
589
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
L
Linus Torvalds 已提交
590 591 592

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
593
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
594
		q->perturbation = net_random();
L
Linus Torvalds 已提交
595 596 597 598 599
	}
	sch_tree_unlock(sch);
	return 0;
}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
static void *sfq_alloc(size_t sz)
{
	void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);

	if (!ptr)
		ptr = vmalloc(sz);
	return ptr;
}

static void sfq_free(void *addr)
{
	if (addr) {
		if (is_vmalloc_addr(addr))
			vfree(addr);
		else
			kfree(addr);
	}
}

static void sfq_destroy(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);

	tcf_destroy_chain(&q->filter_list);
	q->perturb_period = 0;
	del_timer_sync(&q->perturb_timer);
	sfq_free(q->ht);
E
Eric Dumazet 已提交
627
	sfq_free(q->slots);
628 629
}

630
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
631 632 633 634
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	int i;

635
	q->perturb_timer.function = sfq_perturbation;
636
	q->perturb_timer.data = (unsigned long)sch;
637
	init_timer_deferrable(&q->perturb_timer);
L
Linus Torvalds 已提交
638

E
Eric Dumazet 已提交
639 640 641
	for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
		q->dep[i].next = i + SFQ_MAX_FLOWS;
		q->dep[i].prev = i + SFQ_MAX_FLOWS;
L
Linus Torvalds 已提交
642
	}
643

E
Eric Dumazet 已提交
644 645
	q->limit = SFQ_MAX_DEPTH;
	q->maxdepth = SFQ_MAX_DEPTH;
646 647
	q->cur_depth = 0;
	q->tail = NULL;
648
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
E
Eric Dumazet 已提交
649
	q->maxflows = SFQ_DEFAULT_FLOWS;
650 651 652 653 654 655
	q->quantum = psched_mtu(qdisc_dev(sch));
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
	q->perturb_period = 0;
	q->perturbation = net_random();

	if (opt) {
L
Linus Torvalds 已提交
656 657 658 659
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}
660

661
	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
E
Eric Dumazet 已提交
662 663
	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
	if (!q->ht || !q->slots) {
664
		sfq_destroy(sch);
665
		return -ENOMEM;
666
	}
667 668 669
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

E
Eric Dumazet 已提交
670
	for (i = 0; i < q->maxflows; i++) {
E
Eric Dumazet 已提交
671
		slot_queue_init(&q->slots[i]);
L
Linus Torvalds 已提交
672
		sfq_link(q, i);
E
Eric Dumazet 已提交
673
	}
674 675 676 677
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
L
Linus Torvalds 已提交
678 679 680 681 682 683
	return 0;
}

static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
684
	unsigned char *b = skb_tail_pointer(skb);
E
Eric Dumazet 已提交
685 686 687 688 689 690 691 692 693 694
	struct tc_sfq_qopt_v1 opt;

	memset(&opt, 0, sizeof(opt));
	opt.v0.quantum	= q->quantum;
	opt.v0.perturb_period = q->perturb_period / HZ;
	opt.v0.limit	= q->limit;
	opt.v0.divisor	= q->divisor;
	opt.v0.flows	= q->maxflows;
	opt.depth	= q->maxdepth;
	opt.headdrop	= q->headdrop;
L
Linus Torvalds 已提交
695

696
	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
L
Linus Torvalds 已提交
697 698 699

	return skb->len;

700
nla_put_failure:
701
	nlmsg_trim(skb, b);
L
Linus Torvalds 已提交
702 703 704
	return -1;
}

705 706 707 708 709
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{
	return NULL;
}

710 711 712 713 714
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
	return 0;
}

715 716 717
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{
718 719
	/* we cannot bypass queue discipline anymore */
	sch->flags &= ~TCQ_F_CAN_BYPASS;
720 721 722
	return 0;
}

723 724 725 726
static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}

727 728 729 730 731 732 733 734 735
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
	struct sfq_sched_data *q = qdisc_priv(sch);

	if (cl)
		return NULL;
	return &q->filter_list;
}

736 737 738 739 740 741 742 743 744 745 746
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	tcm->tcm_handle |= TC_H_MIN(cl);
	return 0;
}

static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				struct gnet_dump *d)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
E
Eric Dumazet 已提交
747 748 749
	sfq_index idx = q->ht[cl - 1];
	struct gnet_stats_queue qs = { 0 };
	struct tc_sfq_xstats xstats = { 0 };
750 751
	struct sk_buff *skb;

E
Eric Dumazet 已提交
752 753
	if (idx != SFQ_EMPTY_SLOT) {
		const struct sfq_slot *slot = &q->slots[idx];
754

755
		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
E
Eric Dumazet 已提交
756 757 758 759
		qs.qlen = slot->qlen;
		slot_queue_walk(slot, skb)
			qs.backlog += qdisc_pkt_len(skb);
	}
760 761 762 763 764
	if (gnet_stats_copy_queue(d, &qs) < 0)
		return -1;
	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}

765 766
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
767 768 769 770 771 772
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int i;

	if (arg->stop)
		return;

773
	for (i = 0; i < q->divisor; i++) {
774
		if (q->ht[i] == SFQ_EMPTY_SLOT ||
775 776 777 778 779 780 781 782 783 784
		    arg->count < arg->skip) {
			arg->count++;
			continue;
		}
		if (arg->fn(sch, i + 1, arg) < 0) {
			arg->stop = 1;
			break;
		}
		arg->count++;
	}
785 786 787
}

static const struct Qdisc_class_ops sfq_class_ops = {
788
	.leaf		=	sfq_leaf,
789
	.get		=	sfq_get,
790
	.put		=	sfq_put,
791
	.tcf_chain	=	sfq_find_tcf,
792
	.bind_tcf	=	sfq_bind,
793
	.unbind_tcf	=	sfq_put,
794 795
	.dump		=	sfq_dump_class,
	.dump_stats	=	sfq_dump_class_stats,
796 797 798
	.walk		=	sfq_walk,
};

799
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
800
	.cl_ops		=	&sfq_class_ops,
L
Linus Torvalds 已提交
801 802 803 804
	.id		=	"sfq",
	.priv_size	=	sizeof(struct sfq_sched_data),
	.enqueue	=	sfq_enqueue,
	.dequeue	=	sfq_dequeue,
805
	.peek		=	qdisc_peek_dequeued,
L
Linus Torvalds 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818
	.drop		=	sfq_drop,
	.init		=	sfq_init,
	.reset		=	sfq_reset,
	.destroy	=	sfq_destroy,
	.change		=	NULL,
	.dump		=	sfq_dump,
	.owner		=	THIS_MODULE,
};

static int __init sfq_module_init(void)
{
	return register_qdisc(&sfq_qdisc_ops);
}
819
static void __exit sfq_module_exit(void)
L
Linus Torvalds 已提交
820 821 822 823 824 825
{
	unregister_qdisc(&sfq_qdisc_ops);
}
module_init(sfq_module_init)
module_exit(sfq_module_exit)
MODULE_LICENSE("GPL");