sch_generic.c 24.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * net/sched/sch_generic.c	Generic packet scheduler routines.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
 *              - Ingress support
 */

#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
27
#include <linux/slab.h>
28
#include <linux/if_vlan.h>
29
#include <net/sch_generic.h>
L
Linus Torvalds 已提交
30
#include <net/pkt_sched.h>
E
Eric Dumazet 已提交
31
#include <net/dst.h>
L
Linus Torvalds 已提交
32

33 34 35 36
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);

L
Linus Torvalds 已提交
37 38
/* Main transmission queue. */

39
/* Modifications to data participating in scheduling must be protected with
40
 * qdisc_lock(qdisc) spinlock.
41 42
 *
 * The idea is the following:
43 44
 * - enqueue, dequeue are serialized via qdisc root lock
 * - ingress filtering is also serialized via qdisc root lock
45
 * - updates to tree and tree walking are only done under the rtnl mutex.
L
Linus Torvalds 已提交
46 47
 */

48
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
49
{
50
	q->gso_skb = skb;
51
	q->qstats.requeues++;
52
	qdisc_qstats_backlog_inc(q, skb);
53
	q->q.qlen++;	/* it's still part of the queue */
54
	__netif_schedule(q);
55

56 57 58
	return 0;
}

59 60
static void try_bulk_dequeue_skb(struct Qdisc *q,
				 struct sk_buff *skb,
61 62
				 const struct netdev_queue *txq,
				 int *packets)
63
{
64
	int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
65 66

	while (bytelimit > 0) {
67
		struct sk_buff *nskb = q->dequeue(q);
68

69
		if (!nskb)
70 71
			break;

72 73 74
		bytelimit -= nskb->len; /* covers GSO len */
		skb->next = nskb;
		skb = nskb;
75
		(*packets)++; /* GSO counts as one pkt */
76
	}
77
	skb->next = NULL;
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
/* This variant of try_bulk_dequeue_skb() makes sure
 * all skbs in the chain are for the same txq
 */
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
				      struct sk_buff *skb,
				      int *packets)
{
	int mapping = skb_get_queue_mapping(skb);
	struct sk_buff *nskb;
	int cnt = 0;

	do {
		nskb = q->dequeue(q);
		if (!nskb)
			break;
		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
			q->skb_bad_txq = nskb;
			qdisc_qstats_backlog_inc(q, nskb);
			q->q.qlen++;
			break;
		}
		skb->next = nskb;
		skb = nskb;
	} while (++cnt < 8);
	(*packets) += cnt;
	skb->next = NULL;
}

108 109 110
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
 * A requeued skb (via q->gso_skb) can also be a SKB list.
 */
111 112
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
				   int *packets)
113
{
114
	struct sk_buff *skb = q->gso_skb;
115
	const struct netdev_queue *txq = q->dev_queue;
116

117
	*packets = 1;
118
	if (unlikely(skb)) {
119 120
		/* skb in gso_skb were already validated */
		*validate = false;
121
		/* check the reason of requeuing without tx lock first */
122
		txq = skb_get_tx_queue(txq->dev, skb);
123
		if (!netif_xmit_frozen_or_stopped(txq)) {
124
			q->gso_skb = NULL;
125
			qdisc_qstats_backlog_dec(q, skb);
126 127
			q->q.qlen--;
		} else
128
			skb = NULL;
129 130 131 132 133 134 135 136 137 138 139 140
		return skb;
	}
	*validate = true;
	skb = q->skb_bad_txq;
	if (unlikely(skb)) {
		/* check the reason of requeuing without tx lock first */
		txq = skb_get_tx_queue(txq->dev, skb);
		if (!netif_xmit_frozen_or_stopped(txq)) {
			q->skb_bad_txq = NULL;
			qdisc_qstats_backlog_dec(q, skb);
			q->q.qlen--;
			goto bulk;
141
		}
142 143 144 145 146 147 148 149 150 151 152
		return NULL;
	}
	if (!(q->flags & TCQ_F_ONETXQUEUE) ||
	    !netif_xmit_frozen_or_stopped(txq))
		skb = q->dequeue(q);
	if (skb) {
bulk:
		if (qdisc_may_bulk(q))
			try_bulk_dequeue_skb(q, skb, txq, packets);
		else
			try_bulk_dequeue_skb_slow(q, skb, packets);
153
	}
154 155 156
	return skb;
}

157
/*
158
 * Transmit possibly several skbs, and handle the return status as
159
 * required. Owning running seqcount bit guarantees that
160
 * only one CPU can execute this function.
161 162 163 164 165
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 */
166 167
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
		    struct net_device *dev, struct netdev_queue *txq,
168
		    spinlock_t *root_lock, bool validate)
L
Linus Torvalds 已提交
169
{
170
	int ret = NETDEV_TX_BUSY;
171 172 173

	/* And release qdisc */
	spin_unlock(root_lock);
174

175 176 177
	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
	if (validate)
		skb = validate_xmit_skb_list(skb, dev);
178

179
	if (likely(skb)) {
180 181 182
		HARD_TX_LOCK(dev, txq, smp_processor_id());
		if (!netif_xmit_frozen_or_stopped(txq))
			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
183

184
		HARD_TX_UNLOCK(dev, txq);
185
	} else {
186
		spin_lock(root_lock);
187
		return qdisc_qlen(q);
188
	}
189
	spin_lock(root_lock);
190

191 192
	if (dev_xmit_complete(ret)) {
		/* Driver sent out skb successfully or skb was consumed */
193
		ret = qdisc_qlen(q);
194
	} else {
195
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
196 197 198
		if (unlikely(ret != NETDEV_TX_BUSY))
			net_warn_ratelimited("BUG %s code %d qlen %d\n",
					     dev->name, ret, q->q.qlen);
199

200
		ret = dev_requeue_skb(skb, q);
201
	}
202

203
	if (ret && netif_xmit_frozen_or_stopped(txq))
204 205
		ret = 0;

206
	return ret;
L
Linus Torvalds 已提交
207 208
}

209 210 211
/*
 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 *
212
 * running seqcount guarantees only one CPU can process
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 * this queue.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
228
static inline int qdisc_restart(struct Qdisc *q, int *packets)
229 230 231 232 233
{
	struct netdev_queue *txq;
	struct net_device *dev;
	spinlock_t *root_lock;
	struct sk_buff *skb;
234
	bool validate;
235 236

	/* Dequeue packet */
237
	skb = dequeue_skb(q, &validate, packets);
238 239
	if (unlikely(!skb))
		return 0;
240

241 242
	root_lock = qdisc_lock(q);
	dev = qdisc_dev(q);
243
	txq = skb_get_tx_queue(dev, skb);
244

245
	return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
246 247
}

248
void __qdisc_run(struct Qdisc *q)
H
Herbert Xu 已提交
249
{
250
	int quota = dev_tx_weight;
251
	int packets;
252

253
	while (qdisc_restart(q, &packets)) {
254
		/*
J
jamal 已提交
255 256 257
		 * Ordered by possible occurrence: Postpone processing if
		 * 1. we've exceeded packet quota
		 * 2. another process needs the CPU;
258
		 */
259 260
		quota -= packets;
		if (quota <= 0 || need_resched()) {
261
			__netif_schedule(q);
262
			break;
263 264
		}
	}
H
Herbert Xu 已提交
265

266
	qdisc_run_end(q);
H
Herbert Xu 已提交
267 268
}

269 270
unsigned long dev_trans_start(struct net_device *dev)
{
271
	unsigned long val, res;
272 273
	unsigned int i;

274 275
	if (is_vlan_dev(dev))
		dev = vlan_dev_real_dev(dev);
F
Florian Westphal 已提交
276 277
	res = netdev_get_tx_queue(dev, 0)->trans_start;
	for (i = 1; i < dev->num_tx_queues; i++) {
278 279 280 281
		val = netdev_get_tx_queue(dev, i)->trans_start;
		if (val && time_after(val, res))
			res = val;
	}
282

283 284 285 286
	return res;
}
EXPORT_SYMBOL(dev_trans_start);

L
Linus Torvalds 已提交
287 288 289 290
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

H
Herbert Xu 已提交
291
	netif_tx_lock(dev);
292
	if (!qdisc_tx_is_noop(dev)) {
L
Linus Torvalds 已提交
293 294 295
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
296
			int some_queue_timedout = 0;
297
			unsigned int i;
298
			unsigned long trans_start;
299 300 301 302 303

			for (i = 0; i < dev->num_tx_queues; i++) {
				struct netdev_queue *txq;

				txq = netdev_get_tx_queue(dev, i);
F
Florian Westphal 已提交
304
				trans_start = txq->trans_start;
305
				if (netif_xmit_stopped(txq) &&
306 307 308
				    time_after(jiffies, (trans_start +
							 dev->watchdog_timeo))) {
					some_queue_timedout = 1;
309
					txq->trans_timeout++;
310 311 312
					break;
				}
			}
313

314 315
			if (some_queue_timedout) {
				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
316
				       dev->name, netdev_drivername(dev), i);
317
				dev->netdev_ops->ndo_tx_timeout(dev);
L
Linus Torvalds 已提交
318
			}
319 320 321
			if (!mod_timer(&dev->watchdog_timer,
				       round_jiffies(jiffies +
						     dev->watchdog_timeo)))
L
Linus Torvalds 已提交
322 323 324
				dev_hold(dev);
		}
	}
H
Herbert Xu 已提交
325
	netif_tx_unlock(dev);
L
Linus Torvalds 已提交
326 327 328 329 330 331

	dev_put(dev);
}

void __netdev_watchdog_up(struct net_device *dev)
{
332
	if (dev->netdev_ops->ndo_tx_timeout) {
L
Linus Torvalds 已提交
333 334
		if (dev->watchdog_timeo <= 0)
			dev->watchdog_timeo = 5*HZ;
335 336
		if (!mod_timer(&dev->watchdog_timer,
			       round_jiffies(jiffies + dev->watchdog_timeo)))
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347
			dev_hold(dev);
	}
}

static void dev_watchdog_up(struct net_device *dev)
{
	__netdev_watchdog_up(dev);
}

static void dev_watchdog_down(struct net_device *dev)
{
H
Herbert Xu 已提交
348
	netif_tx_lock_bh(dev);
L
Linus Torvalds 已提交
349
	if (del_timer(&dev->watchdog_timer))
350
		dev_put(dev);
H
Herbert Xu 已提交
351
	netif_tx_unlock_bh(dev);
L
Linus Torvalds 已提交
352 353
}

354 355 356 357 358 359
/**
 *	netif_carrier_on - set carrier
 *	@dev: network device
 *
 * Device has detected that carrier.
 */
360 361
void netif_carrier_on(struct net_device *dev)
{
J
Jeff Garzik 已提交
362
	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
363 364
		if (dev->reg_state == NETREG_UNINITIALIZED)
			return;
365
		atomic_inc(&dev->carrier_changes);
366
		linkwatch_fire_event(dev);
J
Jeff Garzik 已提交
367 368 369
		if (netif_running(dev))
			__netdev_watchdog_up(dev);
	}
370
}
371
EXPORT_SYMBOL(netif_carrier_on);
372

373 374 375 376 377 378
/**
 *	netif_carrier_off - clear carrier
 *	@dev: network device
 *
 * Device has detected loss of carrier.
 */
379 380
void netif_carrier_off(struct net_device *dev)
{
381 382 383
	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
		if (dev->reg_state == NETREG_UNINITIALIZED)
			return;
384
		atomic_inc(&dev->carrier_changes);
385
		linkwatch_fire_event(dev);
386
	}
387
}
388
EXPORT_SYMBOL(netif_carrier_off);
389

L
Linus Torvalds 已提交
390 391 392 393 394
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
   under all circumstances. It is difficult to invent anything faster or
   cheaper.
 */

395 396
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
			struct sk_buff **to_free)
L
Linus Torvalds 已提交
397
{
398
	__qdisc_drop(skb, to_free);
L
Linus Torvalds 已提交
399 400 401
	return NET_XMIT_CN;
}

402
static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
L
Linus Torvalds 已提交
403 404 405 406
{
	return NULL;
}

407
struct Qdisc_ops noop_qdisc_ops __read_mostly = {
L
Linus Torvalds 已提交
408 409 410 411
	.id		=	"noop",
	.priv_size	=	0,
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
412
	.peek		=	noop_dequeue,
L
Linus Torvalds 已提交
413 414 415
	.owner		=	THIS_MODULE,
};

416 417
static struct netdev_queue noop_netdev_queue = {
	.qdisc		=	&noop_qdisc,
418
	.qdisc_sleeping	=	&noop_qdisc,
419 420
};

L
Linus Torvalds 已提交
421 422 423 424
struct Qdisc noop_qdisc = {
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
425
	.ops		=	&noop_qdisc_ops,
426
	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
427
	.dev_queue	=	&noop_netdev_queue,
428
	.running	=	SEQCNT_ZERO(noop_qdisc.running),
429
	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
L
Linus Torvalds 已提交
430
};
431
EXPORT_SYMBOL(noop_qdisc);
L
Linus Torvalds 已提交
432

P
Phil Sutter 已提交
433 434 435 436 437 438 439 440 441 442
static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
{
	/* register_qdisc() assigns a default of noop_enqueue if unset,
	 * but __dev_queue_xmit() treats noqueue only as such
	 * if this is NULL - so clear it here. */
	qdisc->enqueue = NULL;
	return 0;
}

struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
L
Linus Torvalds 已提交
443 444
	.id		=	"noqueue",
	.priv_size	=	0,
P
Phil Sutter 已提交
445
	.init		=	noqueue_init,
L
Linus Torvalds 已提交
446 447
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
448
	.peek		=	noop_dequeue,
L
Linus Torvalds 已提交
449 450 451
	.owner		=	THIS_MODULE,
};

E
Eric Dumazet 已提交
452 453 454
static const u8 prio2band[TC_PRIO_MAX + 1] = {
	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
455 456 457 458 459 460 461

/* 3-band FIFO queue: old style, but should be a bit faster than
   generic prio+fifo combination.
 */

#define PFIFO_FAST_BANDS 3

462 463 464 465 466 467 468
/*
 * Private data for a pfifo_fast scheduler containing:
 * 	- queues for the three band
 * 	- bitmap indicating which of the bands contain skbs
 */
struct pfifo_fast_priv {
	u32 bitmap;
469
	struct qdisc_skb_head q[PFIFO_FAST_BANDS];
470 471 472 473 474 475 476 477 478 479
};

/*
 * Convert a bitmap to the first band number where an skb is queued, where:
 * 	bitmap=0 means there are no skbs on any band.
 * 	bitmap=1 means there is an skb on band 0.
 *	bitmap=7 means there are skbs on all 3 bands, etc.
 */
static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};

480
static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
481
					     int band)
482
{
483
	return priv->q + band;
484 485
}

486 487
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
			      struct sk_buff **to_free)
488
{
489
	if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
490 491
		int band = prio2band[skb->priority & TC_PRIO_MAX];
		struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
492
		struct qdisc_skb_head *list = band2list(priv, band);
L
Linus Torvalds 已提交
493

494
		priv->bitmap |= (1 << band);
495
		qdisc->q.qlen++;
496
		return __qdisc_enqueue_tail(skb, qdisc, list);
497
	}
498

499
	return qdisc_drop(skb, qdisc, to_free);
L
Linus Torvalds 已提交
500 501
}

E
Eric Dumazet 已提交
502
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
L
Linus Torvalds 已提交
503
{
504 505
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
	int band = bitmap2band[priv->bitmap];
L
Linus Torvalds 已提交
506

507
	if (likely(band >= 0)) {
508 509
		struct qdisc_skb_head *qh = band2list(priv, band);
		struct sk_buff *skb = __qdisc_dequeue_head(qh);
510 511 512 513 514

		if (likely(skb != NULL)) {
			qdisc_qstats_backlog_dec(qdisc, skb);
			qdisc_bstats_update(qdisc, skb);
		}
515 516

		qdisc->q.qlen--;
517
		if (qh->qlen == 0)
518 519 520
			priv->bitmap &= ~(1 << band);

		return skb;
521
	}
522

L
Linus Torvalds 已提交
523 524 525
	return NULL;
}

E
Eric Dumazet 已提交
526
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
527
{
528 529 530 531
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
	int band = bitmap2band[priv->bitmap];

	if (band >= 0) {
532
		struct qdisc_skb_head *qh = band2list(priv, band);
533

534
		return qh->head;
535 536 537 538 539
	}

	return NULL;
}

E
Eric Dumazet 已提交
540
static void pfifo_fast_reset(struct Qdisc *qdisc)
L
Linus Torvalds 已提交
541
{
542
	int prio;
543
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
544 545

	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
546
		__qdisc_reset_queue(band2list(priv, prio));
547

548
	priv->bitmap = 0;
549
	qdisc->qstats.backlog = 0;
550
	qdisc->q.qlen = 0;
L
Linus Torvalds 已提交
551 552
}

553 554 555 556
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };

E
Eric Dumazet 已提交
557
	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
558 559
	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
		goto nla_put_failure;
560 561 562 563 564 565 566 567 568
	return skb->len;

nla_put_failure:
	return -1;
}

static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
	int prio;
569
	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
570 571

	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
572
		qdisc_skb_head_init(band2list(priv, prio));
573

574 575
	/* Can by-pass the queue discipline */
	qdisc->flags |= TCQ_F_CAN_BYPASS;
576 577 578
	return 0;
}

579
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
580
	.id		=	"pfifo_fast",
581
	.priv_size	=	sizeof(struct pfifo_fast_priv),
582 583
	.enqueue	=	pfifo_fast_enqueue,
	.dequeue	=	pfifo_fast_dequeue,
584
	.peek		=	pfifo_fast_peek,
585 586 587
	.init		=	pfifo_fast_init,
	.reset		=	pfifo_fast_reset,
	.dump		=	pfifo_fast_dump,
L
Linus Torvalds 已提交
588 589
	.owner		=	THIS_MODULE,
};
590
EXPORT_SYMBOL(pfifo_fast_ops);
L
Linus Torvalds 已提交
591

592
static struct lock_class_key qdisc_tx_busylock;
593
static struct lock_class_key qdisc_running_key;
594

595
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
596
			  const struct Qdisc_ops *ops)
L
Linus Torvalds 已提交
597 598 599
{
	void *p;
	struct Qdisc *sch;
E
Eric Dumazet 已提交
600
	unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
601
	int err = -ENOBUFS;
602
	struct net_device *dev = dev_queue->dev;
L
Linus Torvalds 已提交
603

604 605 606
	p = kzalloc_node(size, GFP_KERNEL,
			 netdev_queue_numa_node_read(dev_queue));

L
Linus Torvalds 已提交
607
	if (!p)
608 609
		goto errout;
	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
E
Eric Dumazet 已提交
610 611 612 613 614 615 616 617 618 619
	/* if we got non aligned memory, ask more and do alignment ourself */
	if (sch != p) {
		kfree(p);
		p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
				 netdev_queue_numa_node_read(dev_queue));
		if (!p)
			goto errout;
		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
		sch->padded = (char *) sch - (char *) p;
	}
620 621
	qdisc_skb_head_init(&sch->q);
	spin_lock_init(&sch->q.lock);
622

623
	spin_lock_init(&sch->busylock);
624 625 626
	lockdep_set_class(&sch->busylock,
			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);

627 628 629 630
	seqcount_init(&sch->running);
	lockdep_set_class(&sch->running,
			  dev->qdisc_running_key ?: &qdisc_running_key);

L
Linus Torvalds 已提交
631 632 633
	sch->ops = ops;
	sch->enqueue = ops->enqueue;
	sch->dequeue = ops->dequeue;
634
	sch->dev_queue = dev_queue;
635
	dev_hold(dev);
636
	refcount_set(&sch->refcnt, 1);
637 638 639

	return sch;
errout:
640
	return ERR_PTR(err);
641 642
}

643
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
644 645
				const struct Qdisc_ops *ops,
				unsigned int parentid)
646 647
{
	struct Qdisc *sch;
648

649
	if (!try_module_get(ops->owner))
650
		return NULL;
651

652
	sch = qdisc_alloc(dev_queue, ops);
653 654 655 656
	if (IS_ERR(sch)) {
		module_put(ops->owner);
		return NULL;
	}
657
	sch->parent = parentid;
658

L
Linus Torvalds 已提交
659 660 661
	if (!ops->init || ops->init(sch, NULL) == 0)
		return sch;

662
	qdisc_destroy(sch);
L
Linus Torvalds 已提交
663 664
	return NULL;
}
665
EXPORT_SYMBOL(qdisc_create_dflt);
L
Linus Torvalds 已提交
666

667
/* Under qdisc_lock(qdisc) and BH! */
L
Linus Torvalds 已提交
668 669 670

void qdisc_reset(struct Qdisc *qdisc)
{
671
	const struct Qdisc_ops *ops = qdisc->ops;
L
Linus Torvalds 已提交
672 673 674

	if (ops->reset)
		ops->reset(qdisc);
675

676 677 678
	kfree_skb(qdisc->skb_bad_txq);
	qdisc->skb_bad_txq = NULL;

679
	if (qdisc->gso_skb) {
680
		kfree_skb_list(qdisc->gso_skb);
681 682
		qdisc->gso_skb = NULL;
	}
683
	qdisc->q.qlen = 0;
L
Linus Torvalds 已提交
684
}
685
EXPORT_SYMBOL(qdisc_reset);
L
Linus Torvalds 已提交
686

E
Eric Dumazet 已提交
687 688 689 690
static void qdisc_rcu_free(struct rcu_head *head)
{
	struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);

691
	if (qdisc_is_percpu_stats(qdisc)) {
692
		free_percpu(qdisc->cpu_bstats);
693 694
		free_percpu(qdisc->cpu_qstats);
	}
695

E
Eric Dumazet 已提交
696 697 698
	kfree((char *) qdisc - qdisc->padded);
}

699
void qdisc_destroy(struct Qdisc *qdisc)
L
Linus Torvalds 已提交
700
{
701 702
	const struct Qdisc_ops  *ops = qdisc->ops;

703
	if (qdisc->flags & TCQ_F_BUILTIN ||
704
	    !refcount_dec_and_test(&qdisc->refcnt))
705 706
		return;

707
#ifdef CONFIG_NET_SCHED
708
	qdisc_hash_del(qdisc);
709

E
Eric Dumazet 已提交
710
	qdisc_put_stab(rtnl_dereference(qdisc->stab));
711
#endif
712
	gen_kill_estimator(&qdisc->rate_est);
713 714 715 716 717 718 719 720
	if (ops->reset)
		ops->reset(qdisc);
	if (ops->destroy)
		ops->destroy(qdisc);

	module_put(ops->owner);
	dev_put(qdisc_dev(qdisc));

721
	kfree_skb_list(qdisc->gso_skb);
722
	kfree_skb(qdisc->skb_bad_txq);
E
Eric Dumazet 已提交
723 724 725 726 727
	/*
	 * gen_estimator est_timer() might access qdisc->q.lock,
	 * wait a RCU grace period before freeing qdisc.
	 */
	call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
L
Linus Torvalds 已提交
728
}
729
EXPORT_SYMBOL(qdisc_destroy);
L
Linus Torvalds 已提交
730

731 732 733 734 735 736 737 738 739 740 741
/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
			      struct Qdisc *qdisc)
{
	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
	spinlock_t *root_lock;

	root_lock = qdisc_lock(oqdisc);
	spin_lock_bh(root_lock);

	/* Prune old scheduler */
742
	if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
743 744 745 746 747 748 749 750 751 752 753 754
		qdisc_reset(oqdisc);

	/* ... and graft new one */
	if (qdisc == NULL)
		qdisc = &noop_qdisc;
	dev_queue->qdisc_sleeping = qdisc;
	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);

	spin_unlock_bh(root_lock);

	return oqdisc;
}
755
EXPORT_SYMBOL(dev_graft_qdisc);
756

757 758 759 760
static void attach_one_default_qdisc(struct net_device *dev,
				     struct netdev_queue *dev_queue,
				     void *_unused)
{
761 762
	struct Qdisc *qdisc;
	const struct Qdisc_ops *ops = default_qdisc_ops;
763

764 765 766 767 768 769 770
	if (dev->priv_flags & IFF_NO_QUEUE)
		ops = &noqueue_qdisc_ops;

	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
	if (!qdisc) {
		netdev_info(dev, "activation failed\n");
		return;
771
	}
772
	if (!netif_is_multiqueue(dev))
773
		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
774 775 776
	dev_queue->qdisc_sleeping = qdisc;
}

777 778 779 780 781 782 783
static void attach_default_qdiscs(struct net_device *dev)
{
	struct netdev_queue *txq;
	struct Qdisc *qdisc;

	txq = netdev_get_tx_queue(dev, 0);

784 785
	if (!netif_is_multiqueue(dev) ||
	    dev->priv_flags & IFF_NO_QUEUE) {
786 787
		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
		dev->qdisc = txq->qdisc_sleeping;
788
		refcount_inc(&dev->qdisc->refcnt);
789
	} else {
790
		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
791 792
		if (qdisc) {
			dev->qdisc = qdisc;
793
			qdisc->ops->attach(qdisc);
794 795
		}
	}
796
#ifdef CONFIG_NET_SCHED
797
	if (dev->qdisc != &noop_qdisc)
798
		qdisc_hash_add(dev->qdisc, false);
799
#endif
800 801
}

802 803 804 805
static void transition_one_qdisc(struct net_device *dev,
				 struct netdev_queue *dev_queue,
				 void *_need_watchdog)
{
806
	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
807 808
	int *need_watchdog_p = _need_watchdog;

809 810 811
	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);

812
	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
813
	if (need_watchdog_p) {
814
		dev_queue->trans_start = 0;
815
		*need_watchdog_p = 1;
816
	}
817 818
}

L
Linus Torvalds 已提交
819 820
void dev_activate(struct net_device *dev)
{
821
	int need_watchdog;
822

L
Linus Torvalds 已提交
823
	/* No queueing discipline is attached to device;
824 825
	 * create default one for devices, which need queueing
	 * and noqueue_qdisc for virtual interfaces
L
Linus Torvalds 已提交
826 827
	 */

828 829
	if (dev->qdisc == &noop_qdisc)
		attach_default_qdiscs(dev);
830

831 832 833 834
	if (!netif_carrier_ok(dev))
		/* Delay activation until next carrier-on event */
		return;

835 836
	need_watchdog = 0;
	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
837 838
	if (dev_ingress_queue(dev))
		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
839 840

	if (need_watchdog) {
841
		netif_trans_update(dev);
L
Linus Torvalds 已提交
842 843
		dev_watchdog_up(dev);
	}
844
}
845
EXPORT_SYMBOL(dev_activate);
846

847 848 849
static void dev_deactivate_queue(struct net_device *dev,
				 struct netdev_queue *dev_queue,
				 void *_qdisc_default)
850
{
851
	struct Qdisc *qdisc_default = _qdisc_default;
852 853
	struct Qdisc *qdisc;

854
	qdisc = rtnl_dereference(dev_queue->qdisc);
855
	if (qdisc) {
856 857
		spin_lock_bh(qdisc_lock(qdisc));

858 859 860
		if (!(qdisc->flags & TCQ_F_BUILTIN))
			set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);

861
		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
862
		qdisc_reset(qdisc);
863

864
		spin_unlock_bh(qdisc_lock(qdisc));
865
	}
L
Linus Torvalds 已提交
866 867
}

868
static bool some_qdisc_is_busy(struct net_device *dev)
869 870 871 872 873
{
	unsigned int i;

	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *dev_queue;
874
		spinlock_t *root_lock;
875
		struct Qdisc *q;
876 877 878
		int val;

		dev_queue = netdev_get_tx_queue(dev, i);
879
		q = dev_queue->qdisc_sleeping;
880
		root_lock = qdisc_lock(q);
881

882
		spin_lock_bh(root_lock);
883

884
		val = (qdisc_is_running(q) ||
885
		       test_bit(__QDISC_STATE_SCHED, &q->state));
886

887
		spin_unlock_bh(root_lock);
888 889 890 891 892 893 894

		if (val)
			return true;
	}
	return false;
}

895 896 897 898 899 900 901
/**
 * 	dev_deactivate_many - deactivate transmissions on several devices
 * 	@head: list of devices to deactivate
 *
 *	This function returns only when all outstanding transmissions
 *	have completed, unless all devices are in dismantle phase.
 */
902
void dev_deactivate_many(struct list_head *head)
L
Linus Torvalds 已提交
903
{
904
	struct net_device *dev;
905
	bool sync_needed = false;
906

907
	list_for_each_entry(dev, head, close_list) {
908 909 910 911 912 913 914
		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
					 &noop_qdisc);
		if (dev_ingress_queue(dev))
			dev_deactivate_queue(dev, dev_ingress_queue(dev),
					     &noop_qdisc);

		dev_watchdog_down(dev);
915
		sync_needed |= !dev->dismantle;
916
	}
L
Linus Torvalds 已提交
917

918 919 920 921 922 923
	/* Wait for outstanding qdisc-less dev_queue_xmit calls.
	 * This is avoided if all devices are in dismantle phase :
	 * Caller will call synchronize_net() for us
	 */
	if (sync_needed)
		synchronize_net();
L
Linus Torvalds 已提交
924

925
	/* Wait for outstanding qdisc_run calls. */
926
	list_for_each_entry(dev, head, close_list)
927 928 929 930 931 932 933 934
		while (some_qdisc_is_busy(dev))
			yield();
}

void dev_deactivate(struct net_device *dev)
{
	LIST_HEAD(single);

935
	list_add(&dev->close_list, &single);
936
	dev_deactivate_many(&single);
937
	list_del(&single);
L
Linus Torvalds 已提交
938
}
939
EXPORT_SYMBOL(dev_deactivate);
L
Linus Torvalds 已提交
940

941 942
static void dev_init_scheduler_queue(struct net_device *dev,
				     struct netdev_queue *dev_queue,
943
				     void *_qdisc)
944
{
945 946
	struct Qdisc *qdisc = _qdisc;

947
	rcu_assign_pointer(dev_queue->qdisc, qdisc);
948 949 950
	dev_queue->qdisc_sleeping = qdisc;
}

L
Linus Torvalds 已提交
951 952
void dev_init_scheduler(struct net_device *dev)
{
953
	dev->qdisc = &noop_qdisc;
954
	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
955 956
	if (dev_ingress_queue(dev))
		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
L
Linus Torvalds 已提交
957

958
	setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
L
Linus Torvalds 已提交
959 960
}

961 962 963
static void shutdown_scheduler_queue(struct net_device *dev,
				     struct netdev_queue *dev_queue,
				     void *_qdisc_default)
L
Linus Torvalds 已提交
964
{
965
	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
966
	struct Qdisc *qdisc_default = _qdisc_default;
967 968

	if (qdisc) {
969
		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
970
		dev_queue->qdisc_sleeping = qdisc_default;
L
Linus Torvalds 已提交
971 972

		qdisc_destroy(qdisc);
973
	}
974 975 976 977
}

void dev_shutdown(struct net_device *dev)
{
978
	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
979 980
	if (dev_ingress_queue(dev))
		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
981 982 983
	qdisc_destroy(dev->qdisc);
	dev->qdisc = &noop_qdisc;

984
	WARN_ON(timer_pending(&dev->watchdog_timer));
L
Linus Torvalds 已提交
985
}
986

987
void psched_ratecfg_precompute(struct psched_ratecfg *r,
988 989
			       const struct tc_ratespec *conf,
			       u64 rate64)
990
{
991 992
	memset(r, 0, sizeof(*r));
	r->overhead = conf->overhead;
993
	r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
994
	r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
995 996
	r->mult = 1;
	/*
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	 * The deal here is to replace a divide by a reciprocal one
	 * in fast path (a reciprocal divide is a multiply and a shift)
	 *
	 * Normal formula would be :
	 *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps
	 *
	 * We compute mult/shift to use instead :
	 *  time_in_ns = (len * mult) >> shift;
	 *
	 * We try to get the highest possible mult value for accuracy,
	 * but have to make sure no overflows will ever happen.
1008
	 */
1009 1010 1011 1012 1013 1014
	if (r->rate_bytes_ps > 0) {
		u64 factor = NSEC_PER_SEC;

		for (;;) {
			r->mult = div64_u64(factor, r->rate_bytes_ps);
			if (r->mult & (1U << 31) || factor & (1ULL << 63))
1015
				break;
1016 1017
			factor <<= 1;
			r->shift++;
1018 1019 1020 1021
		}
	}
}
EXPORT_SYMBOL(psched_ratecfg_precompute);