sch_generic.c 14.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * net/sched/sch_generic.c	Generic packet scheduler routines.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
 *              - Ingress support
 */

#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <net/pkt_sched.h>

/* Main transmission queue. */

31 32 33 34 35 36
/* Modifications to data participating in scheduling must be protected with
 * dev->queue_lock spinlock.
 *
 * The idea is the following:
 * - enqueue, dequeue are serialized via top level device
 *   spinlock dev->queue_lock.
37 38
 * - ingress filtering is serialized via top level device
 *   spinlock dev->ingress_lock.
39
 * - updates to tree and tree walking are only done under the rtnl mutex.
L
Linus Torvalds 已提交
40 41 42 43 44
 */

void qdisc_lock_tree(struct net_device *dev)
{
	spin_lock_bh(&dev->queue_lock);
45
	spin_lock(&dev->ingress_lock);
L
Linus Torvalds 已提交
46 47 48 49
}

void qdisc_unlock_tree(struct net_device *dev)
{
50
	spin_unlock(&dev->ingress_lock);
L
Linus Torvalds 已提交
51 52 53
	spin_unlock_bh(&dev->queue_lock);
}

54 55 56 57 58
static inline int qdisc_qlen(struct Qdisc *q)
{
	return q->q.qlen;
}

59 60
static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
				  struct Qdisc *q)
61 62 63 64 65
{
	if (unlikely(skb->next))
		dev->gso_skb = skb;
	else
		q->ops->requeue(skb, q);
66

67 68 69 70
	netif_schedule(dev);
	return 0;
}

71 72
static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
					      struct Qdisc *q)
73
{
74
	struct sk_buff *skb;
75

76
	if ((skb = dev->gso_skb))
77 78 79 80 81 82 83
		dev->gso_skb = NULL;
	else
		skb = q->dequeue(q);

	return skb;
}

84 85 86
static inline int handle_dev_cpu_collision(struct sk_buff *skb,
					   struct net_device *dev,
					   struct Qdisc *q)
87
{
88
	int ret;
89

90 91 92 93 94 95 96
	if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
		/*
		 * Same CPU holding the lock. It may be a transient
		 * configuration error, when hard_start_xmit() recurses. We
		 * detect it by checking xmit owner and drop the packet when
		 * deadloop is detected. Return OK to try the next skb.
		 */
97
		kfree_skb(skb);
98 99 100 101 102 103 104 105 106 107 108
		if (net_ratelimit())
			printk(KERN_WARNING "Dead loop on netdevice %s, "
			       "fix it urgently!\n", dev->name);
		ret = qdisc_qlen(q);
	} else {
		/*
		 * Another cpu is holding lock, requeue & delay xmits for
		 * some time.
		 */
		__get_cpu_var(netdev_rx_stat).cpu_collision++;
		ret = dev_requeue_skb(skb, dev, q);
109 110
	}

111
	return ret;
112 113
}

114
/*
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
 * NOTE: Called under dev->queue_lock with locally disabled BH.
 *
 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
 * device at a time. dev->queue_lock serializes queue accesses for
 * this device AND dev->qdisc pointer itself.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  dev->queue_lock and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
H
Herbert Xu 已提交
133
static inline int qdisc_restart(struct net_device *dev)
L
Linus Torvalds 已提交
134 135 136
{
	struct Qdisc *q = dev->qdisc;
	struct sk_buff *skb;
137
	unsigned lockless;
138
	int ret;
L
Linus Torvalds 已提交
139

140 141
	/* Dequeue packet */
	if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
142
		return 0;
143

144 145 146 147 148 149 150 151 152 153 154 155
	/*
	 * When the driver has LLTX set, it does its own locking in
	 * start_xmit. These checks are worth it because even uncongested
	 * locks can be quite expensive. The driver can do a trylock, as
	 * is being done here; in case of lock contention it should return
	 * NETDEV_TX_LOCKED and the packet will be requeued.
	 */
	lockless = (dev->features & NETIF_F_LLTX);

	if (!lockless && !netif_tx_trylock(dev)) {
		/* Another CPU grabbed the driver tx lock */
		return handle_dev_cpu_collision(skb, dev, q);
L
Linus Torvalds 已提交
156
	}
157 158

	/* And release queue */
159 160
	spin_unlock(&dev->queue_lock);

161
	ret = dev_hard_start_xmit(skb, dev);
162 163 164 165 166 167 168

	if (!lockless)
		netif_tx_unlock(dev);

	spin_lock(&dev->queue_lock);
	q = dev->qdisc;

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	switch (ret) {
	case NETDEV_TX_OK:
		/* Driver sent out skb successfully */
		ret = qdisc_qlen(q);
		break;

	case NETDEV_TX_LOCKED:
		/* Driver try lock failed */
		ret = handle_dev_cpu_collision(skb, dev, q);
		break;

	default:
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
		if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
			printk(KERN_WARNING "BUG %s code %d qlen %d\n",
			       dev->name, ret, q->q.qlen);

		ret = dev_requeue_skb(skb, dev, q);
		break;
	}
189

190
	return ret;
L
Linus Torvalds 已提交
191 192
}

H
Herbert Xu 已提交
193 194
void __qdisc_run(struct net_device *dev)
{
195 196 197 198
	do {
		if (!qdisc_restart(dev))
			break;
	} while (!netif_queue_stopped(dev));
H
Herbert Xu 已提交
199 200 201 202

	clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
}

L
Linus Torvalds 已提交
203 204 205 206
static void dev_watchdog(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;

H
Herbert Xu 已提交
207
	netif_tx_lock(dev);
L
Linus Torvalds 已提交
208 209 210 211 212
	if (dev->qdisc != &noop_qdisc) {
		if (netif_device_present(dev) &&
		    netif_running(dev) &&
		    netif_carrier_ok(dev)) {
			if (netif_queue_stopped(dev) &&
213 214 215 216
			    time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {

				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
				       dev->name);
L
Linus Torvalds 已提交
217 218
				dev->tx_timeout(dev);
			}
219
			if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
L
Linus Torvalds 已提交
220 221 222
				dev_hold(dev);
		}
	}
H
Herbert Xu 已提交
223
	netif_tx_unlock(dev);
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

	dev_put(dev);
}

static void dev_watchdog_init(struct net_device *dev)
{
	init_timer(&dev->watchdog_timer);
	dev->watchdog_timer.data = (unsigned long)dev;
	dev->watchdog_timer.function = dev_watchdog;
}

void __netdev_watchdog_up(struct net_device *dev)
{
	if (dev->tx_timeout) {
		if (dev->watchdog_timeo <= 0)
			dev->watchdog_timeo = 5*HZ;
240 241
		if (!mod_timer(&dev->watchdog_timer,
			       round_jiffies(jiffies + dev->watchdog_timeo)))
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251 252
			dev_hold(dev);
	}
}

static void dev_watchdog_up(struct net_device *dev)
{
	__netdev_watchdog_up(dev);
}

static void dev_watchdog_down(struct net_device *dev)
{
H
Herbert Xu 已提交
253
	netif_tx_lock_bh(dev);
L
Linus Torvalds 已提交
254
	if (del_timer(&dev->watchdog_timer))
255
		dev_put(dev);
H
Herbert Xu 已提交
256
	netif_tx_unlock_bh(dev);
L
Linus Torvalds 已提交
257 258
}

259 260 261 262 263 264 265 266 267 268 269 270 271 272
void netif_carrier_on(struct net_device *dev)
{
	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
		linkwatch_fire_event(dev);
	if (netif_running(dev))
		__netdev_watchdog_up(dev);
}

void netif_carrier_off(struct net_device *dev)
{
	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
		linkwatch_fire_event(dev);
}

L
Linus Torvalds 已提交
273 274 275 276 277
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
   under all circumstances. It is difficult to invent anything faster or
   cheaper.
 */

278
static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
L
Linus Torvalds 已提交
279 280 281 282 283
{
	kfree_skb(skb);
	return NET_XMIT_CN;
}

284
static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
L
Linus Torvalds 已提交
285 286 287 288
{
	return NULL;
}

289
static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
L
Linus Torvalds 已提交
290 291
{
	if (net_ratelimit())
292 293
		printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
		       skb->dev->name);
L
Linus Torvalds 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	kfree_skb(skb);
	return NET_XMIT_CN;
}

struct Qdisc_ops noop_qdisc_ops = {
	.id		=	"noop",
	.priv_size	=	0,
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.requeue	=	noop_requeue,
	.owner		=	THIS_MODULE,
};

struct Qdisc noop_qdisc = {
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
311
	.ops		=	&noop_qdisc_ops,
L
Linus Torvalds 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
};

static struct Qdisc_ops noqueue_qdisc_ops = {
	.id		=	"noqueue",
	.priv_size	=	0,
	.enqueue	=	noop_enqueue,
	.dequeue	=	noop_dequeue,
	.requeue	=	noop_requeue,
	.owner		=	THIS_MODULE,
};

static struct Qdisc noqueue_qdisc = {
	.enqueue	=	NULL,
	.dequeue	=	noop_dequeue,
	.flags		=	TCQ_F_BUILTIN,
	.ops		=	&noqueue_qdisc_ops,
	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list),
};


static const u8 prio2band[TC_PRIO_MAX+1] =
	{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };

/* 3-band FIFO queue: old style, but should be a bit faster than
   generic prio+fifo combination.
 */

340 341
#define PFIFO_FAST_BANDS 3

342 343
static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
					     struct Qdisc *qdisc)
L
Linus Torvalds 已提交
344 345
{
	struct sk_buff_head *list = qdisc_priv(qdisc);
346 347
	return list + prio2band[skb->priority & TC_PRIO_MAX];
}
L
Linus Torvalds 已提交
348

349
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
350 351
{
	struct sk_buff_head *list = prio2list(skb, qdisc);
L
Linus Torvalds 已提交
352

353
	if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
L
Linus Torvalds 已提交
354
		qdisc->q.qlen++;
355
		return __qdisc_enqueue_tail(skb, qdisc, list);
L
Linus Torvalds 已提交
356
	}
357 358

	return qdisc_drop(skb, qdisc);
L
Linus Torvalds 已提交
359 360
}

361
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
L
Linus Torvalds 已提交
362 363 364 365
{
	int prio;
	struct sk_buff_head *list = qdisc_priv(qdisc);

366 367
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
		if (!skb_queue_empty(list + prio)) {
L
Linus Torvalds 已提交
368
			qdisc->q.qlen--;
369
			return __qdisc_dequeue_head(qdisc, list + prio);
L
Linus Torvalds 已提交
370 371
		}
	}
372

L
Linus Torvalds 已提交
373 374 375
	return NULL;
}

376
static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
L
Linus Torvalds 已提交
377 378
{
	qdisc->q.qlen++;
379
	return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
L
Linus Torvalds 已提交
380 381
}

382
static void pfifo_fast_reset(struct Qdisc* qdisc)
L
Linus Torvalds 已提交
383 384 385 386
{
	int prio;
	struct sk_buff_head *list = qdisc_priv(qdisc);

387
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
388 389 390
		__qdisc_reset_queue(qdisc, list + prio);

	qdisc->qstats.backlog = 0;
L
Linus Torvalds 已提交
391 392 393 394 395
	qdisc->q.qlen = 0;
}

static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
396
	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
L
Linus Torvalds 已提交
397 398 399 400 401 402 403 404 405 406 407

	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
	return skb->len;

rtattr_failure:
	return -1;
}

static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
{
408
	int prio;
L
Linus Torvalds 已提交
409 410
	struct sk_buff_head *list = qdisc_priv(qdisc);

411 412
	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
		skb_queue_head_init(list + prio);
L
Linus Torvalds 已提交
413 414 415 416 417 418

	return 0;
}

static struct Qdisc_ops pfifo_fast_ops = {
	.id		=	"pfifo_fast",
419
	.priv_size	=	PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428
	.enqueue	=	pfifo_fast_enqueue,
	.dequeue	=	pfifo_fast_dequeue,
	.requeue	=	pfifo_fast_requeue,
	.init		=	pfifo_fast_init,
	.reset		=	pfifo_fast_reset,
	.dump		=	pfifo_fast_dump,
	.owner		=	THIS_MODULE,
};

429
struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
L
Linus Torvalds 已提交
430 431 432
{
	void *p;
	struct Qdisc *sch;
433 434
	unsigned int size;
	int err = -ENOBUFS;
L
Linus Torvalds 已提交
435 436

	/* ensure that the Qdisc and the private data are 32-byte aligned */
437 438
	size = QDISC_ALIGN(sizeof(*sch));
	size += ops->priv_size + (QDISC_ALIGNTO - 1);
L
Linus Torvalds 已提交
439

440
	p = kzalloc(size, GFP_KERNEL);
L
Linus Torvalds 已提交
441
	if (!p)
442 443 444
		goto errout;
	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
	sch->padded = (char *) sch - (char *) p;
L
Linus Torvalds 已提交
445 446 447 448 449 450 451 452 453

	INIT_LIST_HEAD(&sch->list);
	skb_queue_head_init(&sch->q);
	sch->ops = ops;
	sch->enqueue = ops->enqueue;
	sch->dequeue = ops->dequeue;
	sch->dev = dev;
	dev_hold(dev);
	atomic_set(&sch->refcnt, 1);
454 455 456 457 458 459

	return sch;
errout:
	return ERR_PTR(-err);
}

460 461
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
				 unsigned int parentid)
462 463
{
	struct Qdisc *sch;
464

465 466 467
	sch = qdisc_alloc(dev, ops);
	if (IS_ERR(sch))
		goto errout;
468
	sch->stats_lock = &dev->queue_lock;
469
	sch->parent = parentid;
470

L
Linus Torvalds 已提交
471 472 473
	if (!ops->init || ops->init(sch, NULL) == 0)
		return sch;

474
	qdisc_destroy(sch);
475
errout:
L
Linus Torvalds 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488
	return NULL;
}

/* Under dev->queue_lock and BH! */

void qdisc_reset(struct Qdisc *qdisc)
{
	struct Qdisc_ops *ops = qdisc->ops;

	if (ops->reset)
		ops->reset(qdisc);
}

489
/* this is the rcu callback function to clean up a qdisc when there
L
Linus Torvalds 已提交
490 491 492 493 494 495 496 497 498 499 500 501
 * are no further references to it */

static void __qdisc_destroy(struct rcu_head *head)
{
	struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
	kfree((char *) qdisc - qdisc->padded);
}

/* Under dev->queue_lock and BH! */

void qdisc_destroy(struct Qdisc *qdisc)
{
502
	struct Qdisc_ops  *ops = qdisc->ops;
L
Linus Torvalds 已提交
503 504

	if (qdisc->flags & TCQ_F_BUILTIN ||
505
	    !atomic_dec_and_test(&qdisc->refcnt))
L
Linus Torvalds 已提交
506 507
		return;

508 509 510 511 512 513
	list_del(&qdisc->list);
	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
	if (ops->reset)
		ops->reset(qdisc);
	if (ops->destroy)
		ops->destroy(qdisc);
L
Linus Torvalds 已提交
514

515 516
	module_put(ops->owner);
	dev_put(qdisc->dev);
L
Linus Torvalds 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530
	call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}

void dev_activate(struct net_device *dev)
{
	/* No queueing discipline is attached to device;
	   create default one i.e. pfifo_fast for devices,
	   which need queueing and noqueue_qdisc for
	   virtual interfaces
	 */

	if (dev->qdisc_sleeping == &noop_qdisc) {
		struct Qdisc *qdisc;
		if (dev->tx_queue_len) {
531 532
			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
						  TC_H_ROOT);
L
Linus Torvalds 已提交
533 534 535 536 537 538 539 540 541 542 543
			if (qdisc == NULL) {
				printk(KERN_INFO "%s: activation failed\n", dev->name);
				return;
			}
			list_add_tail(&qdisc->list, &dev->qdisc_list);
		} else {
			qdisc =  &noqueue_qdisc;
		}
		dev->qdisc_sleeping = qdisc;
	}

544 545 546 547
	if (!netif_carrier_ok(dev))
		/* Delay activation until next carrier-on event */
		return;

L
Linus Torvalds 已提交
548 549 550 551 552 553 554 555 556 557 558 559
	spin_lock_bh(&dev->queue_lock);
	rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
	if (dev->qdisc != &noqueue_qdisc) {
		dev->trans_start = jiffies;
		dev_watchdog_up(dev);
	}
	spin_unlock_bh(&dev->queue_lock);
}

void dev_deactivate(struct net_device *dev)
{
	struct Qdisc *qdisc;
560
	struct sk_buff *skb;
L
Linus Torvalds 已提交
561 562 563 564 565 566 567

	spin_lock_bh(&dev->queue_lock);
	qdisc = dev->qdisc;
	dev->qdisc = &noop_qdisc;

	qdisc_reset(qdisc);

568 569
	skb = dev->gso_skb;
	dev->gso_skb = NULL;
L
Linus Torvalds 已提交
570 571
	spin_unlock_bh(&dev->queue_lock);

572 573
	kfree_skb(skb);

L
Linus Torvalds 已提交
574 575
	dev_watchdog_down(dev);

576 577
	/* Wait for outstanding dev_queue_xmit calls. */
	synchronize_rcu();
L
Linus Torvalds 已提交
578

579 580 581
	/* Wait for outstanding qdisc_run calls. */
	while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
		yield();
L
Linus Torvalds 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
}

void dev_init_scheduler(struct net_device *dev)
{
	qdisc_lock_tree(dev);
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	INIT_LIST_HEAD(&dev->qdisc_list);
	qdisc_unlock_tree(dev);

	dev_watchdog_init(dev);
}

void dev_shutdown(struct net_device *dev)
{
	struct Qdisc *qdisc;

	qdisc_lock_tree(dev);
	qdisc = dev->qdisc_sleeping;
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	qdisc_destroy(qdisc);
#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
605
	if ((qdisc = dev->qdisc_ingress) != NULL) {
L
Linus Torvalds 已提交
606 607
		dev->qdisc_ingress = NULL;
		qdisc_destroy(qdisc);
608
	}
L
Linus Torvalds 已提交
609 610 611 612 613
#endif
	BUG_TRAP(!timer_pending(&dev->watchdog_timer));
	qdisc_unlock_tree(dev);
}

614 615
EXPORT_SYMBOL(netif_carrier_on);
EXPORT_SYMBOL(netif_carrier_off);
L
Linus Torvalds 已提交
616 617 618 619 620 621
EXPORT_SYMBOL(noop_qdisc);
EXPORT_SYMBOL(qdisc_create_dflt);
EXPORT_SYMBOL(qdisc_destroy);
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);