sch_sfq.c 16.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * net/sched/sch_sfq.c	Stochastic Fairness Queueing discipline.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
22
#include <linux/jhash.h>
23
#include <linux/slab.h>
24
#include <linux/vmalloc.h>
25 26
#include <net/ip.h>
#include <net/netlink.h>
L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
#include <net/pkt_sched.h>


/*	Stochastic Fairness Queuing algorithm.
	=======================================

	Source:
	Paul E. McKenney "Stochastic Fairness Queuing",
	IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

	Paul E. McKenney "Stochastic Fairness Queuing",
	"Interworking: Research and Experience", v.2, 1991, p.113-131.


	See also:
	M. Shreedhar and George Varghese "Efficient Fair
	Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


46
	This is not the thing that is usually called (W)FQ nowadays.
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55
	It does not use any timestamp mechanism, but instead
	processes queues in round-robin order.

	ADVANTAGE:

	- It is very cheap. Both CPU and memory requirements are minimal.

	DRAWBACKS:

56
	- "Stochastic" -> It is not 100% fair.
L
Linus Torvalds 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70
	When hash collisions occur, several flows are considered as one.

	- "Round-robin" -> It introduces larger delays than virtual clock
	based schemes, and should not be used for isolating interactive
	traffic	from non-interactive. It means, that this scheduler
	should be used as leaf of CBQ or P3, which put interactive traffic
	to higher priority band.

	We still need true WFQ for top level CSZ, but using WFQ
	for the best effort traffic is absolutely pointless:
	SFQ is superior for this purpose.

	IMPLEMENTATION:
	This implementation limits maximal queue length to 128;
71
	max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
L
Linus Torvalds 已提交
72
	The only goal of this restrictions was that all data
73
	fit into one 4K page on 32bit arches.
L
Linus Torvalds 已提交
74 75 76

	It is easy to increase these values, but not in flight.  */

77 78 79
#define SFQ_DEPTH		128 /* max number of packets per flow */
#define SFQ_SLOTS		128 /* max number of flows */
#define SFQ_EMPTY_SLOT		255
80 81
#define SFQ_DEFAULT_HASH_DIVISOR 1024

82 83 84 85 86
/* We use 16 bits to store allot, and want to handle packets up to 64K
 * Scale allot by 8 (1<<3) so that no overflow occurs.
 */
#define SFQ_ALLOT_SHIFT		3
#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
L
Linus Torvalds 已提交
87

88
/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
L
Linus Torvalds 已提交
89 90
typedef unsigned char sfq_index;

91 92 93 94 95 96
/*
 * We dont use pointers to save space.
 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
 * are 'pointers' to dep[] array
 */
E
Eric Dumazet 已提交
97
struct sfq_head {
L
Linus Torvalds 已提交
98 99 100 101
	sfq_index	next;
	sfq_index	prev;
};

102 103 104 105 106 107 108 109 110 111
struct sfq_slot {
	struct sk_buff	*skblist_next;
	struct sk_buff	*skblist_prev;
	sfq_index	qlen; /* number of skbs in skblist */
	sfq_index	next; /* next slot in sfq chain */
	struct sfq_head dep; /* anchor in dep[] chains */
	unsigned short	hash; /* hash value (index in ht[]) */
	short		allot; /* credit for this slot */
};

E
Eric Dumazet 已提交
112
struct sfq_sched_data {
L
Linus Torvalds 已提交
113 114
/* Parameters */
	int		perturb_period;
E
Eric Dumazet 已提交
115
	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
L
Linus Torvalds 已提交
116
	int		limit;
117
	unsigned int	divisor;	/* number of slots in hash table */
L
Linus Torvalds 已提交
118
/* Variables */
119
	struct tcf_proto *filter_list;
L
Linus Torvalds 已提交
120
	struct timer_list perturb_timer;
121
	u32		perturbation;
122
	sfq_index	cur_depth;	/* depth of longest slot */
123
	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124
	struct sfq_slot *tail;		/* current slot in round */
125
	sfq_index	*ht;		/* Hash table (divisor slots) */
126 127
	struct sfq_slot	slots[SFQ_SLOTS];
	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
L
Linus Torvalds 已提交
128 129
};

130 131 132 133 134 135 136 137 138 139
/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */
static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
{
	if (val < SFQ_SLOTS)
		return &q->slots[val].dep;
	return &q->dep[val - SFQ_SLOTS];
}

E
Eric Dumazet 已提交
140
static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
L
Linus Torvalds 已提交
141
{
142
	return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
L
Linus Torvalds 已提交
143 144
}

E
Eric Dumazet 已提交
145
static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
L
Linus Torvalds 已提交
146 147 148 149
{
	u32 h, h2;

	switch (skb->protocol) {
150
	case htons(ETH_P_IP):
L
Linus Torvalds 已提交
151
	{
152
		const struct iphdr *iph;
153
		int poff;
154 155 156 157

		if (!pskb_network_may_pull(skb, sizeof(*iph)))
			goto err;
		iph = ip_hdr(skb);
158 159
		h = (__force u32)iph->daddr;
		h2 = (__force u32)iph->saddr ^ iph->protocol;
160
		if (ip_is_fragment(iph))
161 162 163 164 165
			break;
		poff = proto_ports_offset(iph->protocol);
		if (poff >= 0 &&
		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
			iph = ip_hdr(skb);
E
Eric Dumazet 已提交
166
			h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
167
		}
L
Linus Torvalds 已提交
168 169
		break;
	}
170
	case htons(ETH_P_IPV6):
L
Linus Torvalds 已提交
171
	{
172
		const struct ipv6hdr *iph;
173
		int poff;
174 175 176 177

		if (!pskb_network_may_pull(skb, sizeof(*iph)))
			goto err;
		iph = ipv6_hdr(skb);
178 179
		h = (__force u32)iph->daddr.s6_addr32[3];
		h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
180 181 182 183
		poff = proto_ports_offset(iph->nexthdr);
		if (poff >= 0 &&
		    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
			iph = ipv6_hdr(skb);
E
Eric Dumazet 已提交
184
			h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
185
		}
L
Linus Torvalds 已提交
186 187 188
		break;
	}
	default:
189
err:
190
		h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
191
		h2 = (unsigned long)skb->sk;
L
Linus Torvalds 已提交
192
	}
193

L
Linus Torvalds 已提交
194 195 196
	return sfq_fold_hash(q, h, h2);
}

197 198 199 200 201 202 203 204 205
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
				 int *qerr)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct tcf_result res;
	int result;

	if (TC_H_MAJ(skb->priority) == sch->handle &&
	    TC_H_MIN(skb->priority) > 0 &&
206
	    TC_H_MIN(skb->priority) <= q->divisor)
207 208 209 210 211
		return TC_H_MIN(skb->priority);

	if (!q->filter_list)
		return sfq_hash(q, skb) + 1;

212
	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
213 214 215 216 217 218
	result = tc_classify(skb, q->filter_list, &res);
	if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
		switch (result) {
		case TC_ACT_STOLEN:
		case TC_ACT_QUEUED:
219
			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
220 221 222 223
		case TC_ACT_SHOT:
			return 0;
		}
#endif
224
		if (TC_H_MIN(res.classid) <= q->divisor)
225 226 227 228 229
			return TC_H_MIN(res.classid);
	}
	return 0;
}

230 231 232
/*
 * x : slot number [0 .. SFQ_SLOTS - 1]
 */
L
Linus Torvalds 已提交
233 234 235
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
236 237 238 239
	int qlen = q->slots[x].qlen;

	p = qlen + SFQ_SLOTS;
	n = q->dep[qlen].next;
L
Linus Torvalds 已提交
240

241 242 243 244 245
	q->slots[x].dep.next = n;
	q->slots[x].dep.prev = p;

	q->dep[qlen].next = x;		/* sfq_dep_head(q, p)->next = x */
	sfq_dep_head(q, n)->prev = x;
L
Linus Torvalds 已提交
246 247
}

248 249 250 251 252 253 254
#define sfq_unlink(q, x, n, p)			\
	n = q->slots[x].dep.next;		\
	p = q->slots[x].dep.prev;		\
	sfq_dep_head(q, p)->next = n;		\
	sfq_dep_head(q, n)->prev = p


L
Linus Torvalds 已提交
255 256 257
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
258
	int d;
L
Linus Torvalds 已提交
259

260
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
261

262 263 264
	d = q->slots[x].qlen--;
	if (n == p && q->cur_depth == d)
		q->cur_depth--;
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272
	sfq_link(q, x);
}

static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
{
	sfq_index p, n;
	int d;

273
	sfq_unlink(q, x, n, p);
L
Linus Torvalds 已提交
274

275 276 277
	d = ++q->slots[x].qlen;
	if (q->cur_depth < d)
		q->cur_depth = d;
L
Linus Torvalds 已提交
278 279 280
	sfq_link(q, x);
}

281 282 283 284 285 286 287 288
/* helper functions : might be changed when/if skb use a standard list_head */

/* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_prev;

	slot->skblist_prev = skb->prev;
E
Eric Dumazet 已提交
289
	skb->prev->next = (struct sk_buff *)slot;
290 291 292 293 294 295 296 297 298 299
	skb->next = skb->prev = NULL;
	return skb;
}

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
{
	struct sk_buff *skb = slot->skblist_next;

	slot->skblist_next = skb->next;
E
Eric Dumazet 已提交
300
	skb->next->prev = (struct sk_buff *)slot;
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	skb->next = skb->prev = NULL;
	return skb;
}

static inline void slot_queue_init(struct sfq_slot *slot)
{
	slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
}

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
{
	skb->prev = slot->skblist_prev;
	skb->next = (struct sk_buff *)slot;
	slot->skblist_prev->next = skb;
	slot->skblist_prev = skb;
}

#define	slot_queue_walk(slot, skb)		\
	for (skb = slot->skblist_next;		\
	     skb != (struct sk_buff *)slot;	\
	     skb = skb->next)

L
Linus Torvalds 已提交
324 325 326
static unsigned int sfq_drop(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
327
	sfq_index x, d = q->cur_depth;
L
Linus Torvalds 已提交
328 329
	struct sk_buff *skb;
	unsigned int len;
330
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
331

332
	/* Queue is full! Find the longest slot and drop tail packet from it */
L
Linus Torvalds 已提交
333
	if (d > 1) {
334 335 336 337
		x = q->dep[d].next;
		slot = &q->slots[x];
drop:
		skb = slot_dequeue_tail(slot);
338
		len = qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
339
		sfq_dec(q, x);
340
		kfree_skb(skb);
L
Linus Torvalds 已提交
341 342
		sch->q.qlen--;
		sch->qstats.drops++;
343
		sch->qstats.backlog -= len;
L
Linus Torvalds 已提交
344 345 346 347 348
		return len;
	}

	if (d == 1) {
		/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
349 350 351 352 353
		x = q->tail->next;
		slot = &q->slots[x];
		q->tail->next = slot->next;
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		goto drop;
L
Linus Torvalds 已提交
354 355 356 357 358 359
	}

	return 0;
}

static int
360
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
L
Linus Torvalds 已提交
361 362
{
	struct sfq_sched_data *q = qdisc_priv(sch);
363
	unsigned int hash;
364
	sfq_index x, qlen;
365
	struct sfq_slot *slot;
366
	int uninitialized_var(ret);
367 368 369

	hash = sfq_classify(skb, sch, &ret);
	if (hash == 0) {
370
		if (ret & __NET_XMIT_BYPASS)
371 372 373 374 375
			sch->qstats.drops++;
		kfree_skb(skb);
		return ret;
	}
	hash--;
L
Linus Torvalds 已提交
376 377

	x = q->ht[hash];
378 379 380 381 382 383
	slot = &q->slots[x];
	if (x == SFQ_EMPTY_SLOT) {
		x = q->dep[0].next; /* get a free slot */
		q->ht[hash] = x;
		slot = &q->slots[x];
		slot->hash = hash;
L
Linus Torvalds 已提交
384
	}
385

386
	/* If selected queue has length q->limit, do simple tail drop,
387 388
	 * i.e. drop _this_ packet.
	 */
389
	if (slot->qlen >= q->limit)
390 391
		return qdisc_drop(skb, sch);

392
	sch->qstats.backlog += qdisc_pkt_len(skb);
393
	slot_queue_add(slot, skb);
L
Linus Torvalds 已提交
394
	sfq_inc(q, x);
395 396 397
	if (slot->qlen == 1) {		/* The flow is new */
		if (q->tail == NULL) {	/* It is the first flow */
			slot->next = x;
L
Linus Torvalds 已提交
398
		} else {
399 400
			slot->next = q->tail->next;
			q->tail->next = x;
L
Linus Torvalds 已提交
401
		}
402
		q->tail = slot;
403
		slot->allot = q->scaled_quantum;
L
Linus Torvalds 已提交
404
	}
405
	if (++sch->q.qlen <= q->limit)
406
		return NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
407

408
	qlen = slot->qlen;
L
Linus Torvalds 已提交
409
	sfq_drop(sch);
410 411 412 413
	/* Return Congestion Notification only if we dropped a packet
	 * from this flow.
	 */
	return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
L
Linus Torvalds 已提交
414 415 416
}

static struct sk_buff *
417
sfq_dequeue(struct Qdisc *sch)
L
Linus Torvalds 已提交
418 419 420
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;
421
	sfq_index a, next_a;
422
	struct sfq_slot *slot;
L
Linus Torvalds 已提交
423 424

	/* No active slots */
425
	if (q->tail == NULL)
L
Linus Torvalds 已提交
426 427
		return NULL;

428
next_slot:
429 430
	a = q->tail->next;
	slot = &q->slots[a];
431 432 433 434 435
	if (slot->allot <= 0) {
		q->tail = slot;
		slot->allot += q->scaled_quantum;
		goto next_slot;
	}
436
	skb = slot_dequeue_head(slot);
L
Linus Torvalds 已提交
437
	sfq_dec(q, a);
438
	qdisc_bstats_update(sch, skb);
L
Linus Torvalds 已提交
439
	sch->q.qlen--;
440
	sch->qstats.backlog -= qdisc_pkt_len(skb);
L
Linus Torvalds 已提交
441 442

	/* Is the slot empty? */
443 444 445
	if (slot->qlen == 0) {
		q->ht[slot->hash] = SFQ_EMPTY_SLOT;
		next_a = slot->next;
446
		if (a == next_a) {
447
			q->tail = NULL; /* no more active slots */
L
Linus Torvalds 已提交
448 449
			return skb;
		}
450
		q->tail->next = next_a;
451 452
	} else {
		slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
L
Linus Torvalds 已提交
453 454 455 456 457
	}
	return skb;
}

static void
458
sfq_reset(struct Qdisc *sch)
L
Linus Torvalds 已提交
459 460 461 462 463 464 465 466 467
{
	struct sk_buff *skb;

	while ((skb = sfq_dequeue(sch)) != NULL)
		kfree_skb(skb);
}

static void sfq_perturbation(unsigned long arg)
{
468
	struct Qdisc *sch = (struct Qdisc *)arg;
L
Linus Torvalds 已提交
469 470
	struct sfq_sched_data *q = qdisc_priv(sch);

471
	q->perturbation = net_random();
L
Linus Torvalds 已提交
472

473 474
	if (q->perturb_period)
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
L
Linus Torvalds 已提交
475 476
}

477
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
478 479
{
	struct sfq_sched_data *q = qdisc_priv(sch);
480
	struct tc_sfq_qopt *ctl = nla_data(opt);
481
	unsigned int qlen;
L
Linus Torvalds 已提交
482

483
	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
L
Linus Torvalds 已提交
484 485
		return -EINVAL;

S
stephen hemminger 已提交
486 487 488 489
	if (ctl->divisor &&
	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
		return -EINVAL;

L
Linus Torvalds 已提交
490
	sch_tree_lock(sch);
491
	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
492
	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
493
	q->perturb_period = ctl->perturb_period * HZ;
L
Linus Torvalds 已提交
494
	if (ctl->limit)
495
		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
S
stephen hemminger 已提交
496
	if (ctl->divisor)
497
		q->divisor = ctl->divisor;
498
	qlen = sch->q.qlen;
499
	while (sch->q.qlen > q->limit)
L
Linus Torvalds 已提交
500
		sfq_drop(sch);
501
	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
L
Linus Torvalds 已提交
502 503 504

	del_timer(&q->perturb_timer);
	if (q->perturb_period) {
505
		mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
506
		q->perturbation = net_random();
L
Linus Torvalds 已提交
507 508 509 510 511
	}
	sch_tree_unlock(sch);
	return 0;
}

512
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
L
Linus Torvalds 已提交
513 514
{
	struct sfq_sched_data *q = qdisc_priv(sch);
515
	size_t sz;
L
Linus Torvalds 已提交
516 517
	int i;

518
	q->perturb_timer.function = sfq_perturbation;
519
	q->perturb_timer.data = (unsigned long)sch;
520
	init_timer_deferrable(&q->perturb_timer);
L
Linus Torvalds 已提交
521

522
	for (i = 0; i < SFQ_DEPTH; i++) {
523 524
		q->dep[i].next = i + SFQ_SLOTS;
		q->dep[i].prev = i + SFQ_SLOTS;
L
Linus Torvalds 已提交
525
	}
526

527
	q->limit = SFQ_DEPTH - 1;
528 529
	q->cur_depth = 0;
	q->tail = NULL;
530
	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
L
Linus Torvalds 已提交
531
	if (opt == NULL) {
532
		q->quantum = psched_mtu(qdisc_dev(sch));
533
		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
L
Linus Torvalds 已提交
534
		q->perturb_period = 0;
535
		q->perturbation = net_random();
L
Linus Torvalds 已提交
536 537 538 539 540
	} else {
		int err = sfq_change(sch, opt);
		if (err)
			return err;
	}
541

542 543 544 545 546 547 548 549 550
	sz = sizeof(q->ht[0]) * q->divisor;
	q->ht = kmalloc(sz, GFP_KERNEL);
	if (!q->ht && sz > PAGE_SIZE)
		q->ht = vmalloc(sz);
	if (!q->ht)
		return -ENOMEM;
	for (i = 0; i < q->divisor; i++)
		q->ht[i] = SFQ_EMPTY_SLOT;

E
Eric Dumazet 已提交
551 552
	for (i = 0; i < SFQ_SLOTS; i++) {
		slot_queue_init(&q->slots[i]);
L
Linus Torvalds 已提交
553
		sfq_link(q, i);
E
Eric Dumazet 已提交
554
	}
555 556 557 558
	if (q->limit >= 1)
		sch->flags |= TCQ_F_CAN_BYPASS;
	else
		sch->flags &= ~TCQ_F_CAN_BYPASS;
L
Linus Torvalds 已提交
559 560 561 562 563 564
	return 0;
}

static void sfq_destroy(struct Qdisc *sch)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
565

566
	tcf_destroy_chain(&q->filter_list);
567 568
	q->perturb_period = 0;
	del_timer_sync(&q->perturb_timer);
569 570 571 572
	if (is_vmalloc_addr(q->ht))
		vfree(q->ht);
	else
		kfree(q->ht);
L
Linus Torvalds 已提交
573 574 575 576 577
}

static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
578
	unsigned char *b = skb_tail_pointer(skb);
L
Linus Torvalds 已提交
579 580 581
	struct tc_sfq_qopt opt;

	opt.quantum = q->quantum;
582
	opt.perturb_period = q->perturb_period / HZ;
L
Linus Torvalds 已提交
583 584

	opt.limit = q->limit;
585
	opt.divisor = q->divisor;
586
	opt.flows = q->limit;
L
Linus Torvalds 已提交
587

588
	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
L
Linus Torvalds 已提交
589 590 591

	return skb->len;

592
nla_put_failure:
593
	nlmsg_trim(skb, b);
L
Linus Torvalds 已提交
594 595 596
	return -1;
}

597 598 599 600 601
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{
	return NULL;
}

602 603 604 605 606
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
	return 0;
}

607 608 609
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
			      u32 classid)
{
610 611
	/* we cannot bypass queue discipline anymore */
	sch->flags &= ~TCQ_F_CAN_BYPASS;
612 613 614
	return 0;
}

615 616 617 618
static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}

619 620 621 622 623 624 625 626 627
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
	struct sfq_sched_data *q = qdisc_priv(sch);

	if (cl)
		return NULL;
	return &q->filter_list;
}

628 629 630 631 632 633 634 635 636 637 638
static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
			  struct sk_buff *skb, struct tcmsg *tcm)
{
	tcm->tcm_handle |= TC_H_MIN(cl);
	return 0;
}

static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
				struct gnet_dump *d)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
E
Eric Dumazet 已提交
639 640 641
	sfq_index idx = q->ht[cl - 1];
	struct gnet_stats_queue qs = { 0 };
	struct tc_sfq_xstats xstats = { 0 };
642 643
	struct sk_buff *skb;

E
Eric Dumazet 已提交
644 645
	if (idx != SFQ_EMPTY_SLOT) {
		const struct sfq_slot *slot = &q->slots[idx];
646

647
		xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
E
Eric Dumazet 已提交
648 649 650 651
		qs.qlen = slot->qlen;
		slot_queue_walk(slot, skb)
			qs.backlog += qdisc_pkt_len(skb);
	}
652 653 654 655 656
	if (gnet_stats_copy_queue(d, &qs) < 0)
		return -1;
	return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}

657 658
static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
659 660 661 662 663 664
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned int i;

	if (arg->stop)
		return;

665
	for (i = 0; i < q->divisor; i++) {
666
		if (q->ht[i] == SFQ_EMPTY_SLOT ||
667 668 669 670 671 672 673 674 675 676
		    arg->count < arg->skip) {
			arg->count++;
			continue;
		}
		if (arg->fn(sch, i + 1, arg) < 0) {
			arg->stop = 1;
			break;
		}
		arg->count++;
	}
677 678 679
}

static const struct Qdisc_class_ops sfq_class_ops = {
680
	.leaf		=	sfq_leaf,
681
	.get		=	sfq_get,
682
	.put		=	sfq_put,
683
	.tcf_chain	=	sfq_find_tcf,
684
	.bind_tcf	=	sfq_bind,
685
	.unbind_tcf	=	sfq_put,
686 687
	.dump		=	sfq_dump_class,
	.dump_stats	=	sfq_dump_class_stats,
688 689 690
	.walk		=	sfq_walk,
};

691
static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
692
	.cl_ops		=	&sfq_class_ops,
L
Linus Torvalds 已提交
693 694 695 696
	.id		=	"sfq",
	.priv_size	=	sizeof(struct sfq_sched_data),
	.enqueue	=	sfq_enqueue,
	.dequeue	=	sfq_dequeue,
697
	.peek		=	qdisc_peek_dequeued,
L
Linus Torvalds 已提交
698 699 700 701 702 703 704 705 706 707 708 709 710
	.drop		=	sfq_drop,
	.init		=	sfq_init,
	.reset		=	sfq_reset,
	.destroy	=	sfq_destroy,
	.change		=	NULL,
	.dump		=	sfq_dump,
	.owner		=	THIS_MODULE,
};

static int __init sfq_module_init(void)
{
	return register_qdisc(&sfq_qdisc_ops);
}
711
static void __exit sfq_module_exit(void)
L
Linus Torvalds 已提交
712 713 714 715 716 717
{
	unregister_qdisc(&sfq_qdisc_ops);
}
module_init(sfq_module_init)
module_exit(sfq_module_exit)
MODULE_LICENSE("GPL");