sch_generic.c 14.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/*
 * net/sched/sch_generic.c	Generic packet scheduler routines.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
 *              - Ingress support
 */

#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <net/sock.h>
#include <net/pkt_sched.h>

/* Main transmission queue. */

39 40 41 42 43 44
/* Modifications to data participating in scheduling must be protected with
 * dev->queue_lock spinlock.
 *
 * The idea is the following:
 * - enqueue, dequeue are serialized via top level device
 *   spinlock dev->queue_lock.
45 46
 * - ingress filtering is serialized via top level device
 *   spinlock dev->ingress_lock.
47
 * - updates to tree and tree walking are only done under the rtnl mutex.
L
Linus Torvalds 已提交
48 49 50 51 52
 */

void qdisc_lock_tree(struct net_device *dev)
{
	spin_lock_bh(&dev->queue_lock);
53
	spin_lock(&dev->ingress_lock);
L
Linus Torvalds 已提交
54 55 56 57
}

void qdisc_unlock_tree(struct net_device *dev)
{
58
	spin_unlock(&dev->ingress_lock);
L
Linus Torvalds 已提交
59 60 61
	spin_unlock_bh(&dev->queue_lock);
}

62 63 64 65 66
static inline int qdisc_qlen(struct Qdisc *q)
{
	return q->q.qlen;
}

67 68
static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
				  struct Qdisc *q)
69 70 71 72 73
{
	if (unlikely(skb->next))
		dev->gso_skb = skb;
	else
		q->ops->requeue(skb, q);
74

75 76 77 78
	netif_schedule(dev);
	return 0;
}

79 80
static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
					      struct Qdisc *q)
81
{
82
	struct sk_buff *skb;
83

84
	if ((skb = dev->gso_skb))
85 86 87 88 89 90 91
		dev->gso_skb = NULL;
	else
		skb = q->dequeue(q);

	return skb;
}

92 93 94
static inline int handle_dev_cpu_collision(struct sk_buff *skb,
					   struct net_device *dev,
					   struct Qdisc *q)
95
{
96
	int ret;
97

98 99 100 101 102 103 104
	if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
		/*
		 * Same CPU holding the lock. It may be a transient
		 * configuration error, when hard_start_xmit() recurses. We
		 * detect it by checking xmit owner and drop the packet when
		 * deadloop is detected. Return OK to try the next skb.
		 */
105
		kfree_skb(skb);
106 107 108 109 110 111 112 113 114 115 116
		if (net_ratelimit())
			printk(KERN_WARNING "Dead loop on netdevice %s, "
			       "fix it urgently!\n", dev->name);
		ret = qdisc_qlen(q);
	} else {
		/*
		 * Another cpu is holding lock, requeue & delay xmits for
		 * some time.
		 */
		__get_cpu_var(netdev_rx_stat).cpu_collision++;
		ret = dev_requeue_skb(skb, dev, q);
117 118
	}

119
	return ret;
120 121
}

122
/*
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
 * NOTE: Called under dev->queue_lock with locally disabled BH.
 *
 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
 * device at a time. dev->queue_lock serializes queue accesses for
 * this device AND dev->qdisc pointer itself.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  dev->queue_lock and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
H
Herbert Xu 已提交
141
static inline int qdisc_restart(struct net_device *dev)
L
Linus Torvalds 已提交
142 143 144
{
	struct Qdisc *q = dev->qdisc;
	struct sk_buff *skb;
145
	unsigned lockless;
146
	int ret;
L
Linus Torvalds 已提交
147

148 149
	/* Dequeue packet */
	if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
150
		return 0;
151

152 153 154 155 156 157 158 159 160 161 162 163
	/*
	 * When the driver has LLTX set, it does its own locking in
	 * start_xmit. These checks are worth it because even uncongested
	 * locks can be quite expensive. The driver can do a trylock, as
	 * is being done here; in case of lock contention it should return
	 * NETDEV_TX_LOCKED and the packet will be requeued.
	 */
	lockless = (dev->features & NETIF_F_LLTX);

	if (!lockless && !netif_tx_trylock(dev)) {
		/* Another CPU grabbed the driver tx lock */
		return handle_dev_cpu_collision(skb, dev, q);
L
Linus Torvalds 已提交
164
	}
165 166

	/* And release queue */
167 168
	spin_unlock(&dev->queue_lock);

169
	ret = dev_hard_start_xmit(skb, dev);
170 171 172 173 174 175 176

	if (!lockless)
		netif_tx_unlock(dev);

	spin_lock(&dev->queue_lock);
	q = dev->qdisc;

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	switch (ret) {
	case NETDEV_TX_OK:
		/* Driver sent out skb successfully */
		ret = qdisc_qlen(q);
		break;

	case NETDEV_TX_LOCKED:
		/* Driver try lock failed */
		ret = handle_dev_cpu_collision(skb, dev, q);
		break;

	default:
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
		if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
			printk(KERN_WARNING "BUG %s code %d qlen %d\n",
			       dev->name, ret, q->q.qlen);

		ret = dev_requeue_skb(skb, dev, q);
		break;
	}
197

198
	return ret;
L
Linus Torvalds 已提交
199 200
}

H
Herbert Xu 已提交
201 202
void __qdisc_run(struct net_device *dev)
{
203 204 205 206
	do {
		if (!qdisc_restart(dev))
			break;
	} while (!netif_queue_stopped(dev));
H
Herbert Xu 已提交
207 208 209 210

	clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
}

L
Linus Torvalds 已提交
211 212 213 214
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

H
Herbert Xu 已提交
215
	netif_tx_lock(dev);
L
Linus Torvalds 已提交
216 217 218 219 220
	if (dev->qdisc != &noop_qdisc) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			if (netif_queue_stopped(dev) &&
221 222 223 224
			    time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {

				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
				       dev->name);
L
Linus Torvalds 已提交
225 226
				dev->tx_timeout(dev);
			}
227
			if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
L
Linus Torvalds 已提交
228 229 230
				dev_hold(dev);
		}
	}
H
Herbert Xu 已提交
231
	netif_tx_unlock(dev);
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247

	dev_put(dev);
}

static void dev_watchdog_init(struct net_device *dev)
{
	init_timer(&dev->watchdog_timer);
	dev->watchdog_timer.data = (unsigned long)dev;
	dev->watchdog_timer.function = dev_watchdog;
}

void __netdev_watchdog_up(struct net_device *dev)
{
	if (dev->tx_timeout) {
		if (dev->watchdog_timeo <= 0)
			dev->watchdog_timeo = 5*HZ;
248 249
		if (!mod_timer(&dev->watchdog_timer,
			       round_jiffies(jiffies + dev->watchdog_timeo)))
L
Linus Torvalds 已提交
250 251 252 253 254 255 256 257 258 259 260
			dev_hold(dev);
	}
}

static void dev_watchdog_up(struct net_device *dev)
{
	__netdev_watchdog_up(dev);
}

static void dev_watchdog_down(struct net_device *dev)
{
H
Herbert Xu 已提交
261
	netif_tx_lock_bh(dev);
L
Linus Torvalds 已提交
262
	if (del_timer(&dev->watchdog_timer))
263
		dev_put(dev);
H
Herbert Xu 已提交
264
	netif_tx_unlock_bh(dev);
L
Linus Torvalds 已提交
265 266
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280
void netif_carrier_on(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
		linkwatch_fire_event(dev);
	if (netif_running(dev))
		__netdev_watchdog_up(dev);
}

void netif_carrier_off(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
		linkwatch_fire_event(dev);
}

L
Linus Torvalds 已提交
281 282 283 284 285
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
   under all circumstances. It is difficult to invent anything faster or
   cheaper.
 */

286
static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
L
Linus Torvalds 已提交
287 288 289 290 291
{
	kfree_skb(skb);
	return NET_XMIT_CN;
}

292
static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
L
Linus Torvalds 已提交
293 294 295 296
{
	return NULL;
}

297
static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
L
Linus Torvalds 已提交
298 299
{
	if (net_ratelimit())
300 301
		printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
		       skb->dev->name);
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	kfree_skb(skb);
	return NET_XMIT_CN;
}

struct Qdisc_ops noop_qdisc_ops = {
	.id		=	"noop",
	.priv_size	=	0,
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.requeue	=	noop_requeue,
	.owner		=	THIS_MODULE,
};

struct Qdisc noop_qdisc = {
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
319
	.ops		=	&noop_qdisc_ops,
L
Linus Torvalds 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
};

static struct Qdisc_ops noqueue_qdisc_ops = {
	.id		=	"noqueue",
	.priv_size	=	0,
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.requeue	=	noop_requeue,
	.owner		=	THIS_MODULE,
};

static struct Qdisc noqueue_qdisc = {
	.enqueue	=	NULL,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
	.ops		=	&noqueue_qdisc_ops,
	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list),
};


static const u8 prio2band[TC_PRIO_MAX+1] =
	{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };

/* 3-band FIFO queue: old style, but should be a bit faster than
   generic prio+fifo combination.
 */

348 349
#define PFIFO_FAST_BANDS 3

350 351
static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
					     struct Qdisc *qdisc)
L
Linus Torvalds 已提交
352 353
{
	struct sk_buff_head *list = qdisc_priv(qdisc);
354 355
	return list + prio2band[skb->priority & TC_PRIO_MAX];
}
L
Linus Torvalds 已提交
356

357
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
358 359
{
	struct sk_buff_head *list = prio2list(skb, qdisc);
L
Linus Torvalds 已提交
360

361
	if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
L
Linus Torvalds 已提交
362
		qdisc->q.qlen++;
363
		return __qdisc_enqueue_tail(skb, qdisc, list);
L
Linus Torvalds 已提交
364
	}
365 366

	return qdisc_drop(skb, qdisc);
L
Linus Torvalds 已提交
367 368
}

369
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
L
Linus Torvalds 已提交
370 371 372 373
{
	int prio;
	struct sk_buff_head *list = qdisc_priv(qdisc);

374 375
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
		if (!skb_queue_empty(list + prio)) {
L
Linus Torvalds 已提交
376
			qdisc->q.qlen--;
377
			return __qdisc_dequeue_head(qdisc, list + prio);
L
Linus Torvalds 已提交
378 379
		}
	}
380

L
Linus Torvalds 已提交
381 382 383
	return NULL;
}

384
static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
L
Linus Torvalds 已提交
385 386
{
	qdisc->q.qlen++;
387
	return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
L
Linus Torvalds 已提交
388 389
}

390
static void pfifo_fast_reset(struct Qdisc* qdisc)
L
Linus Torvalds 已提交
391 392 393 394
{
	int prio;
	struct sk_buff_head *list = qdisc_priv(qdisc);

395
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
396 397 398
		__qdisc_reset_queue(qdisc, list + prio);

	qdisc->qstats.backlog = 0;
L
Linus Torvalds 已提交
399 400 401 402 403
	qdisc->q.qlen = 0;
}

static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
404
	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
L
Linus Torvalds 已提交
405 406 407 408 409 410 411 412 413 414 415

	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
	return skb->len;

rtattr_failure:
	return -1;
}

static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
{
416
	int prio;
L
Linus Torvalds 已提交
417 418
	struct sk_buff_head *list = qdisc_priv(qdisc);

419 420
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
		skb_queue_head_init(list + prio);
L
Linus Torvalds 已提交
421 422 423 424 425 426

	return 0;
}

static struct Qdisc_ops pfifo_fast_ops = {
	.id		=	"pfifo_fast",
427
	.priv_size	=	PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
L
Linus Torvalds 已提交
428 429 430 431 432 433 434 435 436
	.enqueue	=	pfifo_fast_enqueue,
	.dequeue	=	pfifo_fast_dequeue,
	.requeue	=	pfifo_fast_requeue,
	.init		=	pfifo_fast_init,
	.reset		=	pfifo_fast_reset,
	.dump		=	pfifo_fast_dump,
	.owner		=	THIS_MODULE,
};

437
struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
L
Linus Torvalds 已提交
438 439 440
{
	void *p;
	struct Qdisc *sch;
441 442
	unsigned int size;
	int err = -ENOBUFS;
L
Linus Torvalds 已提交
443 444

	/* ensure that the Qdisc and the private data are 32-byte aligned */
445 446
	size = QDISC_ALIGN(sizeof(*sch));
	size += ops->priv_size + (QDISC_ALIGNTO - 1);
L
Linus Torvalds 已提交
447

448
	p = kzalloc(size, GFP_KERNEL);
L
Linus Torvalds 已提交
449
	if (!p)
450 451 452
		goto errout;
	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
	sch->padded = (char *) sch - (char *) p;
L
Linus Torvalds 已提交
453 454 455 456 457 458 459 460 461

	INIT_LIST_HEAD(&sch->list);
	skb_queue_head_init(&sch->q);
	sch->ops = ops;
	sch->enqueue = ops->enqueue;
	sch->dequeue = ops->dequeue;
	sch->dev = dev;
	dev_hold(dev);
	atomic_set(&sch->refcnt, 1);
462 463 464 465 466 467

	return sch;
errout:
	return ERR_PTR(-err);
}

468 469
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
				 unsigned int parentid)
470 471
{
	struct Qdisc *sch;
472

473 474 475
	sch = qdisc_alloc(dev, ops);
	if (IS_ERR(sch))
		goto errout;
476
	sch->stats_lock = &dev->queue_lock;
477
	sch->parent = parentid;
478

L
Linus Torvalds 已提交
479 480 481
	if (!ops->init || ops->init(sch, NULL) == 0)
		return sch;

482
	qdisc_destroy(sch);
483
errout:
L
Linus Torvalds 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496
	return NULL;
}

/* Under dev->queue_lock and BH! */

void qdisc_reset(struct Qdisc *qdisc)
{
	struct Qdisc_ops *ops = qdisc->ops;

	if (ops->reset)
		ops->reset(qdisc);
}

497
/* this is the rcu callback function to clean up a qdisc when there
L
Linus Torvalds 已提交
498 499 500 501 502 503 504 505 506 507 508 509
 * are no further references to it */

static void __qdisc_destroy(struct rcu_head *head)
{
	struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
	kfree((char *) qdisc - qdisc->padded);
}

/* Under dev->queue_lock and BH! */

void qdisc_destroy(struct Qdisc *qdisc)
{
510
	struct Qdisc_ops  *ops = qdisc->ops;
L
Linus Torvalds 已提交
511 512

	if (qdisc->flags & TCQ_F_BUILTIN ||
513
	    !atomic_dec_and_test(&qdisc->refcnt))
L
Linus Torvalds 已提交
514 515
		return;

516 517 518 519 520 521
	list_del(&qdisc->list);
	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
	if (ops->reset)
		ops->reset(qdisc);
	if (ops->destroy)
		ops->destroy(qdisc);
L
Linus Torvalds 已提交
522

523 524
	module_put(ops->owner);
	dev_put(qdisc->dev);
L
Linus Torvalds 已提交
525 526 527 528 529 530 531 532 533 534 535 536 537 538
	call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}

void dev_activate(struct net_device *dev)
{
	/* No queueing discipline is attached to device;
	   create default one i.e. pfifo_fast for devices,
	   which need queueing and noqueue_qdisc for
	   virtual interfaces
	 */

	if (dev->qdisc_sleeping == &noop_qdisc) {
		struct Qdisc *qdisc;
		if (dev->tx_queue_len) {
539 540
			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
						  TC_H_ROOT);
L
Linus Torvalds 已提交
541 542 543 544 545 546 547 548 549 550 551
			if (qdisc == NULL) {
				printk(KERN_INFO "%s: activation failed\n", dev->name);
				return;
			}
			list_add_tail(&qdisc->list, &dev->qdisc_list);
		} else {
			qdisc =  &noqueue_qdisc;
		}
		dev->qdisc_sleeping = qdisc;
	}

552 553 554 555
	if (!netif_carrier_ok(dev))
		/* Delay activation until next carrier-on event */
		return;

L
Linus Torvalds 已提交
556 557 558 559 560 561 562 563 564 565 566 567
	spin_lock_bh(&dev->queue_lock);
	rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
	if (dev->qdisc != &noqueue_qdisc) {
		dev->trans_start = jiffies;
		dev_watchdog_up(dev);
	}
	spin_unlock_bh(&dev->queue_lock);
}

void dev_deactivate(struct net_device *dev)
{
	struct Qdisc *qdisc;
568
	struct sk_buff *skb;
L
Linus Torvalds 已提交
569 570 571 572 573 574 575

	spin_lock_bh(&dev->queue_lock);
	qdisc = dev->qdisc;
	dev->qdisc = &noop_qdisc;

	qdisc_reset(qdisc);

576 577
	skb = dev->gso_skb;
	dev->gso_skb = NULL;
L
Linus Torvalds 已提交
578 579
	spin_unlock_bh(&dev->queue_lock);

580 581
	kfree_skb(skb);

L
Linus Torvalds 已提交
582 583
	dev_watchdog_down(dev);

584 585
	/* Wait for outstanding dev_queue_xmit calls. */
	synchronize_rcu();
L
Linus Torvalds 已提交
586

587 588 589
	/* Wait for outstanding qdisc_run calls. */
	while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
		yield();
L
Linus Torvalds 已提交
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
}

void dev_init_scheduler(struct net_device *dev)
{
	qdisc_lock_tree(dev);
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	INIT_LIST_HEAD(&dev->qdisc_list);
	qdisc_unlock_tree(dev);

	dev_watchdog_init(dev);
}

void dev_shutdown(struct net_device *dev)
{
	struct Qdisc *qdisc;

	qdisc_lock_tree(dev);
	qdisc = dev->qdisc_sleeping;
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	qdisc_destroy(qdisc);
#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
613
	if ((qdisc = dev->qdisc_ingress) != NULL) {
L
Linus Torvalds 已提交
614 615
		dev->qdisc_ingress = NULL;
		qdisc_destroy(qdisc);
616
	}
L
Linus Torvalds 已提交
617 618 619 620 621
#endif
	BUG_TRAP(!timer_pending(&dev->watchdog_timer));
	qdisc_unlock_tree(dev);
}

622 623
EXPORT_SYMBOL(netif_carrier_on);
EXPORT_SYMBOL(netif_carrier_off);
L
Linus Torvalds 已提交
624 625 626 627 628 629
EXPORT_SYMBOL(noop_qdisc);
EXPORT_SYMBOL(qdisc_create_dflt);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);