sch_fq.c 21.9 KB
Newer Older
1 2 3
/*
 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
 *
4
 *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
5 6 7 8 9 10
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
11
 *  Meant to be mostly used for locally generated traffic :
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
 *  Fast classification depends on skb->sk being set before reaching us.
 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
 *  All packets belonging to a socket are considered as a 'flow'.
 *
 *  Flows are dynamically allocated and stored in a hash table of RB trees
 *  They are also part of one Round Robin 'queues' (new or old flows)
 *
 *  Burst avoidance (aka pacing) capability :
 *
 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
 *  bunch of packets, and this packet scheduler adds delay between
 *  packets to respect rate limitation.
 *
 *  enqueue() :
 *   - lookup one RB tree (out of 1024 or more) to find the flow.
 *     If non existent flow, create it, add it to the tree.
 *     Add skb to the per flow list of skb (fifo).
 *   - Use a special fifo for high prio packets
 *
 *  dequeue() : serves flows in Round Robin
 *  Note : When a flow becomes empty, we do not immediately remove it from
 *  rb trees, for performance reasons (its expected to send additional packets,
 *  or SLAB cache will reuse socket for another flow)
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/hash.h>
E
Eric Dumazet 已提交
49
#include <linux/prefetch.h>
50
#include <linux/vmalloc.h>
51 52 53 54
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
E
Eric Dumazet 已提交
55
#include <net/tcp.h>
56 57 58 59 60 61 62 63 64 65

/*
 * Per flow structure, dynamically allocated
 */
struct fq_flow {
	struct sk_buff	*head;		/* list of skbs for this flow : first skb */
	union {
		struct sk_buff *tail;	/* last skb in the list */
		unsigned long  age;	/* jiffies when flow was emptied, for gc */
	};
66
	struct rb_node	fq_node;	/* anchor in fq_root[] trees */
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	struct sock	*sk;
	int		qlen;		/* number of packets in flow queue */
	int		credit;
	u32		socket_hash;	/* sk_hash */
	struct fq_flow *next;		/* next pointer in RR lists, or &detached */

	struct rb_node  rate_node;	/* anchor in q->delayed tree */
	u64		time_next_packet;
};

struct fq_flow_head {
	struct fq_flow *first;
	struct fq_flow *last;
};

struct fq_sched_data {
	struct fq_flow_head new_flows;

	struct fq_flow_head old_flows;

	struct rb_root	delayed;	/* for rate limited flows */
	u64		time_next_delayed_flow;
89
	unsigned long	unthrottle_latency_ns;
90 91 92 93

	struct fq_flow	internal;	/* for non classified or high prio packets */
	u32		quantum;
	u32		initial_quantum;
94
	u32		flow_refill_delay;
95 96
	u32		flow_max_rate;	/* optional max rate per flow */
	u32		flow_plimit;	/* max packets per flow */
97
	u32		orphan_mask;	/* mask for orphaned skb */
98
	u32		low_rate_threshold;
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	struct rb_root	*fq_root;
	u8		rate_enable;
	u8		fq_trees_log;

	u32		flows;
	u32		inactive_flows;
	u32		throttled_flows;

	u64		stat_gc_flows;
	u64		stat_internal_packets;
	u64		stat_tcp_retrans;
	u64		stat_throttled;
	u64		stat_flows_plimit;
	u64		stat_pkts_too_long;
	u64		stat_allocation_errors;
	struct qdisc_watchdog watchdog;
};

/* special value to mark a detached flow (not on old/new list) */
static struct fq_flow detached, throttled;

static void fq_flow_set_detached(struct fq_flow *f)
{
	f->next = &detached;
123
	f->age = jiffies;
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
}

static bool fq_flow_is_detached(const struct fq_flow *f)
{
	return f->next == &detached;
}

static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;

	while (*p) {
		struct fq_flow *aux;

		parent = *p;
G
Geliang Tang 已提交
139
		aux = rb_entry(parent, struct fq_flow, rate_node);
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		if (f->time_next_packet >= aux->time_next_packet)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}
	rb_link_node(&f->rate_node, parent, p);
	rb_insert_color(&f->rate_node, &q->delayed);
	q->throttled_flows++;
	q->stat_throttled++;

	f->next = &throttled;
	if (q->time_next_delayed_flow > f->time_next_packet)
		q->time_next_delayed_flow = f->time_next_packet;
}


static struct kmem_cache *fq_flow_cachep __read_mostly;

static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
{
	if (head->first)
		head->last->next = flow;
	else
		head->first = flow;
	head->last = flow;
	flow->next = NULL;
}

/* limit number of collected flows per round */
#define FQ_GC_MAX 8
#define FQ_GC_AGE (3*HZ)

static bool fq_gc_candidate(const struct fq_flow *f)
{
	return fq_flow_is_detached(f) &&
	       time_after(jiffies, f->age + FQ_GC_AGE);
}

static void fq_gc(struct fq_sched_data *q,
		  struct rb_root *root,
		  struct sock *sk)
{
	struct fq_flow *f, *tofree[FQ_GC_MAX];
	struct rb_node **p, *parent;
	int fcnt = 0;

	p = &root->rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;

G
Geliang Tang 已提交
191
		f = rb_entry(parent, struct fq_flow, fq_node);
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
		if (f->sk == sk)
			break;

		if (fq_gc_candidate(f)) {
			tofree[fcnt++] = f;
			if (fcnt == FQ_GC_MAX)
				break;
		}

		if (f->sk > sk)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

	q->flows -= fcnt;
	q->inactive_flows -= fcnt;
	q->stat_gc_flows += fcnt;
	while (fcnt) {
		struct fq_flow *f = tofree[--fcnt];

		rb_erase(&f->fq_node, root);
		kmem_cache_free(fq_flow_cachep, f);
	}
}

static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
{
	struct rb_node **p, *parent;
	struct sock *sk = skb->sk;
	struct rb_root *root;
	struct fq_flow *f;

	/* warning: no starvation prevention... */
226
	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
227 228
		return &q->internal;

229
	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
230
	 * or a listener (SYNCOOKIE mode)
231 232 233 234
	 * 1) request sockets are not full blown,
	 *    they do not contain sk_pacing_rate
	 * 2) They are not part of a 'flow' yet
	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
235
	 *    especially if the listener set SO_MAX_PACING_RATE
236
	 * 4) We pretend they are orphaned
237
	 */
238
	if (!sk || sk_listener(sk)) {
239 240
		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;

241 242 243
		/* By forcing low order bit to 1, we make sure to not
		 * collide with a local flow (socket pointers are word aligned)
		 */
244 245
		sk = (struct sock *)((hash << 1) | 1UL);
		skb_orphan(skb);
246 247
	}

E
Eric Dumazet 已提交
248
	root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
249 250 251 252 253 254 255 256 257 258

	if (q->flows >= (2U << q->fq_trees_log) &&
	    q->inactive_flows > q->flows/2)
		fq_gc(q, root, sk);

	p = &root->rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;

G
Geliang Tang 已提交
259
		f = rb_entry(parent, struct fq_flow, fq_node);
260 261 262 263 264 265 266 267 268 269
		if (f->sk == sk) {
			/* socket might have been reallocated, so check
			 * if its sk_hash is the same.
			 * It not, we need to refill credit with
			 * initial quantum
			 */
			if (unlikely(skb->sk &&
				     f->socket_hash != sk->sk_hash)) {
				f->credit = q->initial_quantum;
				f->socket_hash = sk->sk_hash;
270
				f->time_next_packet = 0ULL;
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
			}
			return f;
		}
		if (f->sk > sk)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}

	f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!f)) {
		q->stat_allocation_errors++;
		return &q->internal;
	}
	fq_flow_set_detached(f);
	f->sk = sk;
	if (skb->sk)
		f->socket_hash = sk->sk_hash;
	f->credit = q->initial_quantum;

	rb_link_node(&f->fq_node, parent, p);
	rb_insert_color(&f->fq_node, root);

	q->flows++;
	q->inactive_flows++;
	return f;
}


/* remove one skb from head of flow queue */
301
static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
302 303 304 305 306 307 308
{
	struct sk_buff *skb = flow->head;

	if (skb) {
		flow->head = skb->next;
		skb->next = NULL;
		flow->qlen--;
309
		qdisc_qstats_backlog_dec(sch, skb);
310
		sch->q.qlen--;
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	}
	return skb;
}

/* We might add in the future detection of retransmits
 * For the time being, just return false
 */
static bool skb_is_retransmit(struct sk_buff *skb)
{
	return false;
}

/* add skb to flow queue
 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
 * We special case tcp retransmits to be transmitted before other packets.
 * We rely on fact that TCP retransmits are unlikely, so we do not waste
 * a separate queue or a pointer.
 * head->  [retrans pkt 1]
 *         [retrans pkt 2]
 *         [ normal pkt 1]
 *         [ normal pkt 2]
 *         [ normal pkt 3]
 * tail->  [ normal pkt 4]
 */
static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
{
	struct sk_buff *prev, *head = flow->head;

	skb->next = NULL;
	if (!head) {
		flow->head = skb;
		flow->tail = skb;
		return;
	}
	if (likely(!skb_is_retransmit(skb))) {
		flow->tail->next = skb;
		flow->tail = skb;
		return;
	}

	/* This skb is a tcp retransmit,
	 * find the last retrans packet in the queue
	 */
	prev = NULL;
	while (skb_is_retransmit(head)) {
		prev = head;
		head = head->next;
		if (!head)
			break;
	}
	if (!prev) { /* no rtx packet in queue, become the new head */
		skb->next = flow->head;
		flow->head = skb;
	} else {
		if (prev == flow->tail)
			flow->tail = skb;
		else
			skb->next = prev->next;
		prev->next = skb;
	}
}

373 374
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
		      struct sk_buff **to_free)
375 376 377 378 379
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct fq_flow *f;

	if (unlikely(sch->q.qlen >= sch->limit))
380
		return qdisc_drop(skb, sch, to_free);
381 382 383 384

	f = fq_classify(skb, q);
	if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
		q->stat_flows_plimit++;
385
		return qdisc_drop(skb, sch, to_free);
386 387 388 389 390
	}

	f->qlen++;
	if (skb_is_retransmit(skb))
		q->stat_tcp_retrans++;
391
	qdisc_qstats_backlog_inc(sch, skb);
392 393
	if (fq_flow_is_detached(f)) {
		fq_flow_add_tail(&q->new_flows, f);
394 395
		if (time_after(jiffies, f->age + q->flow_refill_delay))
			f->credit = max_t(u32, f->credit, q->quantum);
396 397
		q->inactive_flows--;
	}
398 399 400 401

	/* Note: this overwrites f->age */
	flow_queue_add(f, skb);

402 403 404 405 406 407 408 409 410 411
	if (unlikely(f == &q->internal)) {
		q->stat_internal_packets++;
	}
	sch->q.qlen++;

	return NET_XMIT_SUCCESS;
}

static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{
412
	unsigned long sample;
413 414 415 416 417
	struct rb_node *p;

	if (q->time_next_delayed_flow > now)
		return;

418 419 420 421 422 423 424
	/* Update unthrottle latency EWMA.
	 * This is cheap and can help diagnosing timer/latency problems.
	 */
	sample = (unsigned long)(now - q->time_next_delayed_flow);
	q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
	q->unthrottle_latency_ns += sample >> 3;

425 426
	q->time_next_delayed_flow = ~0ULL;
	while ((p = rb_first(&q->delayed)) != NULL) {
G
Geliang Tang 已提交
427
		struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
428 429 430 431 432 433 434 435 436 437 438 439 440 441

		if (f->time_next_packet > now) {
			q->time_next_delayed_flow = f->time_next_packet;
			break;
		}
		rb_erase(p, &q->delayed);
		q->throttled_flows--;
		fq_flow_add_tail(&q->old_flows, f);
	}
}

static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
	struct fq_sched_data *q = qdisc_priv(sch);
442
	u64 now = ktime_get_ns();
443 444 445
	struct fq_flow_head *head;
	struct sk_buff *skb;
	struct fq_flow *f;
446
	u32 rate, plen;
447

448
	skb = fq_dequeue_head(sch, &q->internal);
449 450 451 452 453 454 455 456 457 458
	if (skb)
		goto out;
	fq_check_throttled(q, now);
begin:
	head = &q->new_flows;
	if (!head->first) {
		head = &q->old_flows;
		if (!head->first) {
			if (q->time_next_delayed_flow != ~0ULL)
				qdisc_watchdog_schedule_ns(&q->watchdog,
459
							   q->time_next_delayed_flow);
460 461 462 463 464 465 466 467 468 469 470 471
			return NULL;
		}
	}
	f = head->first;

	if (f->credit <= 0) {
		f->credit += q->quantum;
		head->first = f->next;
		fq_flow_add_tail(&q->old_flows, f);
		goto begin;
	}

E
Eric Dumazet 已提交
472 473 474
	skb = f->head;
	if (unlikely(skb && now < f->time_next_packet &&
		     !skb_is_tcp_pure_ack(skb))) {
475 476 477 478 479
		head->first = f->next;
		fq_flow_set_throttled(q, f);
		goto begin;
	}

480
	skb = fq_dequeue_head(sch, f);
481 482 483 484 485 486 487 488 489 490 491
	if (!skb) {
		head->first = f->next;
		/* force a pass through old_flows to prevent starvation */
		if ((head == &q->new_flows) && q->old_flows.first) {
			fq_flow_add_tail(&q->old_flows, f);
		} else {
			fq_flow_set_detached(f);
			q->inactive_flows++;
		}
		goto begin;
	}
E
Eric Dumazet 已提交
492
	prefetch(&skb->end);
493 494
	f->credit -= qdisc_pkt_len(skb);

495
	if (!q->rate_enable)
496
		goto out;
497

E
Eric Dumazet 已提交
498 499 500 501
	/* Do not pace locally generated ack packets */
	if (skb_is_tcp_pure_ack(skb))
		goto out;

502
	rate = q->flow_max_rate;
503
	if (skb->sk)
504
		rate = min(skb->sk->sk_pacing_rate, rate);
505

506 507 508 509 510 511 512 513
	if (rate <= q->low_rate_threshold) {
		f->credit = 0;
		plen = qdisc_pkt_len(skb);
	} else {
		plen = max(qdisc_pkt_len(skb), q->quantum);
		if (f->credit > 0)
			goto out;
	}
514
	if (rate != ~0U) {
515 516
		u64 len = (u64)plen * NSEC_PER_SEC;

517 518
		if (likely(rate))
			do_div(len, rate);
519
		/* Since socket rate can change later,
520 521
		 * clamp the delay to 1 second.
		 * Really, providers of too big packets should be fixed !
522
		 */
523 524
		if (unlikely(len > NSEC_PER_SEC)) {
			len = NSEC_PER_SEC;
525
			q->stat_pkts_too_long++;
526
		}
527 528 529 530 531 532
		/* Account for schedule/timers drifts.
		 * f->time_next_packet was set when prior packet was sent,
		 * and current time (@now) can be too late by tens of us.
		 */
		if (f->time_next_packet)
			len -= min(len/2, now - f->time_next_packet);
533
		f->time_next_packet = now + len;
534 535 536 537 538 539
	}
out:
	qdisc_bstats_update(sch, skb);
	return skb;
}

540 541 542 543 544 545 546
static void fq_flow_purge(struct fq_flow *flow)
{
	rtnl_kfree_skbs(flow->head, flow->tail);
	flow->head = NULL;
	flow->qlen = 0;
}

547 548
static void fq_reset(struct Qdisc *sch)
{
549 550 551 552 553
	struct fq_sched_data *q = qdisc_priv(sch);
	struct rb_root *root;
	struct rb_node *p;
	struct fq_flow *f;
	unsigned int idx;
554

555 556 557 558
	sch->q.qlen = 0;
	sch->qstats.backlog = 0;

	fq_flow_purge(&q->internal);
559 560 561 562 563 564 565

	if (!q->fq_root)
		return;

	for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
		root = &q->fq_root[idx];
		while ((p = rb_first(root)) != NULL) {
G
Geliang Tang 已提交
566
			f = rb_entry(p, struct fq_flow, fq_node);
567 568
			rb_erase(p, root);

569
			fq_flow_purge(f);
570 571 572 573 574 575 576 577 578 579

			kmem_cache_free(fq_flow_cachep, f);
		}
	}
	q->new_flows.first	= NULL;
	q->old_flows.first	= NULL;
	q->delayed		= RB_ROOT;
	q->flows		= 0;
	q->inactive_flows	= 0;
	q->throttled_flows	= 0;
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
}

static void fq_rehash(struct fq_sched_data *q,
		      struct rb_root *old_array, u32 old_log,
		      struct rb_root *new_array, u32 new_log)
{
	struct rb_node *op, **np, *parent;
	struct rb_root *oroot, *nroot;
	struct fq_flow *of, *nf;
	int fcnt = 0;
	u32 idx;

	for (idx = 0; idx < (1U << old_log); idx++) {
		oroot = &old_array[idx];
		while ((op = rb_first(oroot)) != NULL) {
			rb_erase(op, oroot);
G
Geliang Tang 已提交
596
			of = rb_entry(op, struct fq_flow, fq_node);
597 598 599 600 601
			if (fq_gc_candidate(of)) {
				fcnt++;
				kmem_cache_free(fq_flow_cachep, of);
				continue;
			}
E
Eric Dumazet 已提交
602
			nroot = &new_array[hash_ptr(of->sk, new_log)];
603 604 605 606 607 608

			np = &nroot->rb_node;
			parent = NULL;
			while (*np) {
				parent = *np;

G
Geliang Tang 已提交
609
				nf = rb_entry(parent, struct fq_flow, fq_node);
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
				BUG_ON(nf->sk == of->sk);

				if (nf->sk > of->sk)
					np = &parent->rb_right;
				else
					np = &parent->rb_left;
			}

			rb_link_node(&of->fq_node, parent, np);
			rb_insert_color(&of->fq_node, nroot);
		}
	}
	q->flows -= fcnt;
	q->inactive_flows -= fcnt;
	q->stat_gc_flows += fcnt;
}

627
static void *fq_alloc_node(size_t sz, int node)
628
{
629 630 631 632 633 634 635 636 637 638
	void *ptr;

	ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
	if (!ptr)
		ptr = vmalloc_node(sz, node);
	return ptr;
}

static void fq_free(void *addr)
{
W
WANG Cong 已提交
639
	kvfree(addr);
640 641 642 643 644
}

static int fq_resize(struct Qdisc *sch, u32 log)
{
	struct fq_sched_data *q = qdisc_priv(sch);
645
	struct rb_root *array;
646
	void *old_fq_root;
647 648 649 650 651
	u32 idx;

	if (q->fq_root && log == q->fq_trees_log)
		return 0;

652 653 654
	/* If XPS was setup, we can allocate memory on right NUMA node */
	array = fq_alloc_node(sizeof(struct rb_root) << log,
			      netdev_queue_numa_node_read(sch->dev_queue));
655 656 657 658 659 660
	if (!array)
		return -ENOMEM;

	for (idx = 0; idx < (1U << log); idx++)
		array[idx] = RB_ROOT;

661 662 663 664 665 666
	sch_tree_lock(sch);

	old_fq_root = q->fq_root;
	if (old_fq_root)
		fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);

667 668 669
	q->fq_root = array;
	q->fq_trees_log = log;

670 671 672 673
	sch_tree_unlock(sch);

	fq_free(old_fq_root);

674 675 676 677 678 679 680 681 682 683 684 685
	return 0;
}

static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
	[TCA_FQ_PLIMIT]			= { .type = NLA_U32 },
	[TCA_FQ_FLOW_PLIMIT]		= { .type = NLA_U32 },
	[TCA_FQ_QUANTUM]		= { .type = NLA_U32 },
	[TCA_FQ_INITIAL_QUANTUM]	= { .type = NLA_U32 },
	[TCA_FQ_RATE_ENABLE]		= { .type = NLA_U32 },
	[TCA_FQ_FLOW_DEFAULT_RATE]	= { .type = NLA_U32 },
	[TCA_FQ_FLOW_MAX_RATE]		= { .type = NLA_U32 },
	[TCA_FQ_BUCKETS_LOG]		= { .type = NLA_U32 },
686
	[TCA_FQ_FLOW_REFILL_DELAY]	= { .type = NLA_U32 },
687
	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
688 689 690 691 692 693 694
};

static int fq_change(struct Qdisc *sch, struct nlattr *opt)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct nlattr *tb[TCA_FQ_MAX + 1];
	int err, drop_count = 0;
695
	unsigned drop_len = 0;
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	u32 fq_log;

	if (!opt)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
	if (err < 0)
		return err;

	sch_tree_lock(sch);

	fq_log = q->fq_trees_log;

	if (tb[TCA_FQ_BUCKETS_LOG]) {
		u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);

		if (nval >= 1 && nval <= ilog2(256*1024))
			fq_log = nval;
		else
			err = -EINVAL;
	}
	if (tb[TCA_FQ_PLIMIT])
		sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);

	if (tb[TCA_FQ_FLOW_PLIMIT])
		q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);

723 724 725 726 727 728 729 730
	if (tb[TCA_FQ_QUANTUM]) {
		u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);

		if (quantum > 0)
			q->quantum = quantum;
		else
			err = -EINVAL;
	}
731 732

	if (tb[TCA_FQ_INITIAL_QUANTUM])
733
		q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
734 735

	if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
736 737
		pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
				    nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
738 739 740 741

	if (tb[TCA_FQ_FLOW_MAX_RATE])
		q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);

742 743 744 745
	if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
		q->low_rate_threshold =
			nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);

746 747 748 749 750 751 752 753 754
	if (tb[TCA_FQ_RATE_ENABLE]) {
		u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);

		if (enable <= 1)
			q->rate_enable = enable;
		else
			err = -EINVAL;
	}

755 756 757 758 759 760
	if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
		u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;

		q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
	}

761 762 763
	if (tb[TCA_FQ_ORPHAN_MASK])
		q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);

764 765
	if (!err) {
		sch_tree_unlock(sch);
766
		err = fq_resize(sch, fq_log);
767 768
		sch_tree_lock(sch);
	}
769 770 771
	while (sch->q.qlen > sch->limit) {
		struct sk_buff *skb = fq_dequeue(sch);

772 773
		if (!skb)
			break;
774
		drop_len += qdisc_pkt_len(skb);
775
		rtnl_kfree_skbs(skb, skb);
776 777
		drop_count++;
	}
778
	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
779 780 781 782 783 784 785 786 787

	sch_tree_unlock(sch);
	return err;
}

static void fq_destroy(struct Qdisc *sch)
{
	struct fq_sched_data *q = qdisc_priv(sch);

788
	fq_reset(sch);
789
	fq_free(q->fq_root);
790 791 792 793 794 795 796 797 798 799 800 801
	qdisc_watchdog_cancel(&q->watchdog);
}

static int fq_init(struct Qdisc *sch, struct nlattr *opt)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	int err;

	sch->limit		= 10000;
	q->flow_plimit		= 100;
	q->quantum		= 2 * psched_mtu(qdisc_dev(sch));
	q->initial_quantum	= 10 * psched_mtu(qdisc_dev(sch));
802
	q->flow_refill_delay	= msecs_to_jiffies(40);
803
	q->flow_max_rate	= ~0U;
804
	q->time_next_delayed_flow = ~0ULL;
805 806 807 808 809 810
	q->rate_enable		= 1;
	q->new_flows.first	= NULL;
	q->old_flows.first	= NULL;
	q->delayed		= RB_ROOT;
	q->fq_root		= NULL;
	q->fq_trees_log		= ilog2(1024);
811
	q->orphan_mask		= 1024 - 1;
812
	q->low_rate_threshold	= 550000 / 8;
813 814 815 816 817
	qdisc_watchdog_init(&q->watchdog, sch);

	if (opt)
		err = fq_change(sch, opt);
	else
818
		err = fq_resize(sch, q->fq_trees_log);
819 820 821 822 823 824 825 826 827 828 829 830 831

	return err;
}

static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct nlattr *opts;

	opts = nla_nest_start(skb, TCA_OPTIONS);
	if (opts == NULL)
		goto nla_put_failure;

832 833
	/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */

834 835 836 837 838 839
	if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
	    nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
	    nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
	    nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
	    nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
	    nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
840 841
	    nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
			jiffies_to_usecs(q->flow_refill_delay)) ||
842
	    nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
843 844
	    nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
			q->low_rate_threshold) ||
845 846 847
	    nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
		goto nla_put_failure;

848
	return nla_nest_end(skb, opts);
849 850 851 852 853 854 855 856

nla_put_failure:
	return -1;
}

static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
	struct fq_sched_data *q = qdisc_priv(sch);
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
	struct tc_fq_qd_stats st;

	sch_tree_lock(sch);

	st.gc_flows		  = q->stat_gc_flows;
	st.highprio_packets	  = q->stat_internal_packets;
	st.tcp_retrans		  = q->stat_tcp_retrans;
	st.throttled		  = q->stat_throttled;
	st.flows_plimit		  = q->stat_flows_plimit;
	st.pkts_too_long	  = q->stat_pkts_too_long;
	st.allocation_errors	  = q->stat_allocation_errors;
	st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
	st.flows		  = q->flows;
	st.inactive_flows	  = q->inactive_flows;
	st.throttled_flows	  = q->throttled_flows;
872 873
	st.unthrottle_latency_ns  = min_t(unsigned long,
					  q->unthrottle_latency_ns, ~0U);
874
	sch_tree_unlock(sch);
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

	return gnet_stats_copy_app(d, &st, sizeof(st));
}

static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
	.id		=	"fq",
	.priv_size	=	sizeof(struct fq_sched_data),

	.enqueue	=	fq_enqueue,
	.dequeue	=	fq_dequeue,
	.peek		=	qdisc_peek_dequeued,
	.init		=	fq_init,
	.reset		=	fq_reset,
	.destroy	=	fq_destroy,
	.change		=	fq_change,
	.dump		=	fq_dump,
	.dump_stats	=	fq_dump_stats,
	.owner		=	THIS_MODULE,
};

static int __init fq_module_init(void)
{
	int ret;

	fq_flow_cachep = kmem_cache_create("fq_flow_cache",
					   sizeof(struct fq_flow),
					   0, 0, NULL);
	if (!fq_flow_cachep)
		return -ENOMEM;

	ret = register_qdisc(&fq_qdisc_ops);
	if (ret)
		kmem_cache_destroy(fq_flow_cachep);
	return ret;
}

static void __exit fq_module_exit(void)
{
	unregister_qdisc(&fq_qdisc_ops);
	kmem_cache_destroy(fq_flow_cachep);
}

module_init(fq_module_init)
module_exit(fq_module_exit)
MODULE_AUTHOR("Eric Dumazet");
MODULE_LICENSE("GPL");