sch_generic.h 14.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
#ifndef __NET_SCHED_GENERIC_H
#define __NET_SCHED_GENERIC_H

#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
#include <net/gen_stats.h>
11
#include <net/rtnetlink.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17

struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;

E
Eric Dumazet 已提交
18
struct qdisc_rate_table {
L
Linus Torvalds 已提交
19 20 21 22 23 24
	struct tc_ratespec rate;
	u32		data[256];
	struct qdisc_rate_table *next;
	int		refcnt;
};

E
Eric Dumazet 已提交
25
enum qdisc_state_t {
26
	__QDISC_STATE_SCHED,
27
	__QDISC_STATE_DEACTIVATED,
28 29
};

30 31 32 33 34 35 36
/*
 * following bits are only changed while qdisc lock is held
 */
enum qdisc___state_t {
	__QDISC___STATE_RUNNING,
};

37 38 39 40 41 42 43
struct qdisc_size_table {
	struct list_head	list;
	struct tc_sizespec	szopts;
	int			refcnt;
	u16			data[];
};

E
Eric Dumazet 已提交
44
struct Qdisc {
L
Linus Torvalds 已提交
45 46 47
	int 			(*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
	unsigned		flags;
48 49 50
#define TCQ_F_BUILTIN		1
#define TCQ_F_THROTTLED		2
#define TCQ_F_INGRESS		4
51
#define TCQ_F_CAN_BYPASS	8
52
#define TCQ_F_MQROOT		16
53
#define TCQ_F_WARN_NONWC	(1 << 16)
L
Linus Torvalds 已提交
54 55
	int			padded;
	struct Qdisc_ops	*ops;
56
	struct qdisc_size_table	*stab;
57
	struct list_head	list;
L
Linus Torvalds 已提交
58 59 60 61 62 63 64
	u32			handle;
	u32			parent;
	atomic_t		refcnt;
	struct gnet_stats_rate_est	rate_est;
	int			(*reshape_fail)(struct sk_buff *skb,
					struct Qdisc *q);

65 66
	void			*u32_node;

L
Linus Torvalds 已提交
67 68 69 70
	/* This field is deprecated, but it is still used by CBQ
	 * and it will live until better solution will be invented.
	 */
	struct Qdisc		*__parent;
71 72 73 74 75 76 77 78 79
	struct netdev_queue	*dev_queue;
	struct Qdisc		*next_sched;

	struct sk_buff		*gso_skb;
	/*
	 * For performance sake on SMP, we put highly modified fields at the end
	 */
	unsigned long		state;
	struct sk_buff_head	q;
80
	struct gnet_stats_basic_packed bstats;
81
	unsigned long		__state;
82
	struct gnet_stats_queue	qstats;
E
Eric Dumazet 已提交
83
	struct rcu_head     rcu_head;
L
Linus Torvalds 已提交
84 85
};

86 87
static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
88
	return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
89 90 91 92
}

static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
93
	return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
94 95 96 97
}

static inline void qdisc_run_end(struct Qdisc *qdisc)
{
98
	__clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
99 100
}

E
Eric Dumazet 已提交
101
struct Qdisc_class_ops {
L
Linus Torvalds 已提交
102
	/* Child qdisc manipulation */
103
	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
L
Linus Torvalds 已提交
104 105 106
	int			(*graft)(struct Qdisc *, unsigned long cl,
					struct Qdisc *, struct Qdisc **);
	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
107
	void			(*qlen_notify)(struct Qdisc *, unsigned long);
L
Linus Torvalds 已提交
108 109 110 111 112

	/* Class manipulation routines */
	unsigned long		(*get)(struct Qdisc *, u32 classid);
	void			(*put)(struct Qdisc *, unsigned long);
	int			(*change)(struct Qdisc *, u32, u32,
113
					struct nlattr **, unsigned long *);
L
Linus Torvalds 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	int			(*delete)(struct Qdisc *, unsigned long);
	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);

	/* Filter manipulation */
	struct tcf_proto **	(*tcf_chain)(struct Qdisc *, unsigned long);
	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
					u32 classid);
	void			(*unbind_tcf)(struct Qdisc *, unsigned long);

	/* rtnetlink specific */
	int			(*dump)(struct Qdisc *, unsigned long,
					struct sk_buff *skb, struct tcmsg*);
	int			(*dump_stats)(struct Qdisc *, unsigned long,
					struct gnet_dump *);
};

E
Eric Dumazet 已提交
130
struct Qdisc_ops {
L
Linus Torvalds 已提交
131
	struct Qdisc_ops	*next;
132
	const struct Qdisc_class_ops	*cl_ops;
L
Linus Torvalds 已提交
133 134 135 136 137
	char			id[IFNAMSIZ];
	int			priv_size;

	int 			(*enqueue)(struct sk_buff *, struct Qdisc *);
	struct sk_buff *	(*dequeue)(struct Qdisc *);
138
	struct sk_buff *	(*peek)(struct Qdisc *);
L
Linus Torvalds 已提交
139 140
	unsigned int		(*drop)(struct Qdisc *);

141
	int			(*init)(struct Qdisc *, struct nlattr *arg);
L
Linus Torvalds 已提交
142 143
	void			(*reset)(struct Qdisc *);
	void			(*destroy)(struct Qdisc *);
144
	int			(*change)(struct Qdisc *, struct nlattr *arg);
145
	void			(*attach)(struct Qdisc *);
L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153

	int			(*dump)(struct Qdisc *, struct sk_buff *);
	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);

	struct module		*owner;
};


E
Eric Dumazet 已提交
154
struct tcf_result {
L
Linus Torvalds 已提交
155 156 157 158
	unsigned long	class;
	u32		classid;
};

E
Eric Dumazet 已提交
159
struct tcf_proto_ops {
L
Linus Torvalds 已提交
160 161 162 163 164 165 166 167 168 169 170
	struct tcf_proto_ops	*next;
	char			kind[IFNAMSIZ];

	int			(*classify)(struct sk_buff*, struct tcf_proto*,
					struct tcf_result *);
	int			(*init)(struct tcf_proto*);
	void			(*destroy)(struct tcf_proto*);

	unsigned long		(*get)(struct tcf_proto*, u32 handle);
	void			(*put)(struct tcf_proto*, unsigned long);
	int			(*change)(struct tcf_proto*, unsigned long,
171
					u32 handle, struct nlattr **,
L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182
					unsigned long *);
	int			(*delete)(struct tcf_proto*, unsigned long);
	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);

	/* rtnetlink specific */
	int			(*dump)(struct tcf_proto*, unsigned long,
					struct sk_buff *skb, struct tcmsg*);

	struct module		*owner;
};

E
Eric Dumazet 已提交
183
struct tcf_proto {
L
Linus Torvalds 已提交
184 185 186 187 188
	/* Fast access part */
	struct tcf_proto	*next;
	void			*root;
	int			(*classify)(struct sk_buff*, struct tcf_proto*,
					struct tcf_result *);
A
Al Viro 已提交
189
	__be16			protocol;
L
Linus Torvalds 已提交
190 191 192 193 194 195 196 197 198

	/* All the rest */
	u32			prio;
	u32			classid;
	struct Qdisc		*q;
	void			*data;
	struct tcf_proto_ops	*ops;
};

199 200 201 202 203
struct qdisc_skb_cb {
	unsigned int		pkt_len;
	char			data[];
};

204 205 206 207 208
static inline int qdisc_qlen(struct Qdisc *q)
{
	return q->q.qlen;
}

209 210 211 212 213
static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
{
	return (struct qdisc_skb_cb *)skb->cb;
}

214 215 216 217 218
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
	return &qdisc->q.lock;
}

219 220 221 222 223
static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->qdisc;
}

224 225 226 227 228
static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->qdisc_sleeping;
}

229 230 231 232 233 234 235 236 237 238 239
/* The qdisc root lock is a mechanism by which to top level
 * of a qdisc tree can be locked from any qdisc node in the
 * forest.  This allows changing the configuration of some
 * aspect of the qdisc tree while blocking out asynchronous
 * qdisc access in the packet processing paths.
 *
 * It is only legal to do this when the root will not change
 * on us.  Otherwise we'll potentially lock the wrong qdisc
 * root.  This is enforced by holding the RTNL semaphore, which
 * all users of this lock accessor must do.
 */
240 241 242 243
static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root(qdisc);

244
	ASSERT_RTNL();
245
	return qdisc_lock(root);
246 247
}

248 249 250 251 252 253 254 255
static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return qdisc_lock(root);
}

256 257 258 259
static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->dev;
}
L
Linus Torvalds 已提交
260

261 262
static inline void sch_tree_lock(struct Qdisc *q)
{
J
Jarek Poplawski 已提交
263
	spin_lock_bh(qdisc_root_sleeping_lock(q));
264 265 266 267
}

static inline void sch_tree_unlock(struct Qdisc *q)
{
J
Jarek Poplawski 已提交
268
	spin_unlock_bh(qdisc_root_sleeping_lock(q));
269 270 271 272
}

#define tcf_tree_lock(tp)	sch_tree_lock((tp)->q)
#define tcf_tree_unlock(tp)	sch_tree_unlock((tp)->q)
L
Linus Torvalds 已提交
273

274 275
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
276 277
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
278

E
Eric Dumazet 已提交
279
struct Qdisc_class_common {
280 281 282 283
	u32			classid;
	struct hlist_node	hnode;
};

E
Eric Dumazet 已提交
284
struct Qdisc_class_hash {
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	struct hlist_head	*hash;
	unsigned int		hashsize;
	unsigned int		hashmask;
	unsigned int		hashelems;
};

static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
{
	id ^= id >> 8;
	id ^= id >> 4;
	return id & mask;
}

static inline struct Qdisc_class_common *
qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
{
	struct Qdisc_class_common *cl;
	struct hlist_node *n;
	unsigned int h;

	h = qdisc_class_hash(id, hash->hashmask);
	hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
		if (cl->classid == id)
			return cl;
	}
	return NULL;
}

extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);

319 320 321 322
extern void dev_init_scheduler(struct net_device *dev);
extern void dev_shutdown(struct net_device *dev);
extern void dev_activate(struct net_device *dev);
extern void dev_deactivate(struct net_device *dev);
323 324
extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
				     struct Qdisc *qdisc);
325 326
extern void qdisc_reset(struct Qdisc *qdisc);
extern void qdisc_destroy(struct Qdisc *qdisc);
327
extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
328
extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
329
				 struct Qdisc_ops *ops);
330
extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
331
				       struct netdev_queue *dev_queue,
332
				       struct Qdisc_ops *ops, u32 parentid);
333 334
extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
				   struct qdisc_size_table *stab);
335
extern void tcf_destroy(struct tcf_proto *tp);
336
extern void tcf_destroy_chain(struct tcf_proto **fl);
L
Linus Torvalds 已提交
337

338 339 340
/* Reset all TX qdiscs of a device.  */
static inline void qdisc_reset_all_tx(struct net_device *dev)
{
341 342 343
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++)
		qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
344 345
}

346 347 348
/* Are all TX queues of the device empty?  */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
349 350 351 352
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		const struct Qdisc *q = txq->qdisc;
353

354 355 356 357
		if (q->q.qlen)
			return false;
	}
	return true;
358 359
}

360 361 362
/* Are any of the TX qdiscs changing?  */
static inline bool qdisc_tx_changing(struct net_device *dev)
{
363 364 365 366 367 368 369
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		if (txq->qdisc != txq->qdisc_sleeping)
			return true;
	}
	return false;
370 371
}

372
/* Is the device using the noop qdisc on all queues?  */
373 374
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
375 376 377 378 379 380 381
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		if (txq->qdisc != &noop_qdisc)
			return false;
	}
	return true;
382 383
}

384 385
static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
{
386
	return qdisc_skb_cb(skb)->pkt_len;
387 388
}

389
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
390 391
enum net_xmit_qdisc_t {
	__NET_XMIT_STOLEN = 0x00010000,
392
	__NET_XMIT_BYPASS = 0x00020000,
393 394
};

395
#ifdef CONFIG_NET_CLS_ACT
396 397 398 399 400
#define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else
#define net_xmit_drop_count(e)	(1)
#endif

401 402
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
403
#ifdef CONFIG_NET_SCHED
404 405
	if (sch->stab)
		qdisc_calculate_pkt_len(skb, sch->stab);
406
#endif
407 408 409 410 411
	return sch->enqueue(skb, sch);
}

static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
{
412
	qdisc_skb_cb(skb)->pkt_len = skb->len;
413
	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
414 415
}

416 417 418 419 420 421
static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
{
	sch->bstats.bytes += len;
	sch->bstats.packets++;
}

422 423 424 425
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
				       struct sk_buff_head *list)
{
	__skb_queue_tail(list, skb);
426
	sch->qstats.backlog += qdisc_pkt_len(skb);
427
	__qdisc_update_bstats(sch, qdisc_pkt_len(skb));
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442

	return NET_XMIT_SUCCESS;
}

static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
	return __qdisc_enqueue_tail(skb, sch, &sch->q);
}

static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
						   struct sk_buff_head *list)
{
	struct sk_buff *skb = __skb_dequeue(list);

	if (likely(skb != NULL))
443
		sch->qstats.backlog -= qdisc_pkt_len(skb);
444 445 446 447 448 449 450

	return skb;
}

static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
	return __qdisc_dequeue_head(sch, &sch->q);
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
}

static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
					      struct sk_buff_head *list)
{
	struct sk_buff *skb = __qdisc_dequeue_head(sch, list);

	if (likely(skb != NULL)) {
		unsigned int len = qdisc_pkt_len(skb);
		kfree_skb(skb);
		return len;
	}

	return 0;
}

static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
{
	return __qdisc_queue_drop_head(sch, &sch->q);
470 471 472 473 474 475 476 477
}

static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
						   struct sk_buff_head *list)
{
	struct sk_buff *skb = __skb_dequeue_tail(list);

	if (likely(skb != NULL))
478
		sch->qstats.backlog -= qdisc_pkt_len(skb);
479 480 481 482 483 484 485 486 487

	return skb;
}

static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
{
	return __qdisc_dequeue_tail(sch, &sch->q);
}

488 489 490 491 492
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
	return skb_peek(&sch->q);
}

493 494 495 496
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
497
	if (!sch->gso_skb) {
498
		sch->gso_skb = sch->dequeue(sch);
499 500 501 502
		if (sch->gso_skb)
			/* it's still part of the queue */
			sch->q.qlen++;
	}
503 504 505 506 507 508 509 510 511

	return sch->gso_skb;
}

/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
	struct sk_buff *skb = sch->gso_skb;

512
	if (skb) {
513
		sch->gso_skb = NULL;
514 515
		sch->q.qlen--;
	} else {
516
		skb = sch->dequeue(sch);
517
	}
518 519 520 521

	return skb;
}

522 523 524 525 526 527 528
static inline void __qdisc_reset_queue(struct Qdisc *sch,
				       struct sk_buff_head *list)
{
	/*
	 * We do not know the backlog in bytes of this list, it
	 * is up to the caller to correct it
	 */
529
	__skb_queue_purge(list);
530 531 532 533 534 535 536 537 538 539 540 541 542 543
}

static inline void qdisc_reset_queue(struct Qdisc *sch)
{
	__qdisc_reset_queue(sch, &sch->q);
	sch->qstats.backlog = 0;
}

static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
					      struct sk_buff_head *list)
{
	struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);

	if (likely(skb != NULL)) {
544
		unsigned int len = qdisc_pkt_len(skb);
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
		kfree_skb(skb);
		return len;
	}

	return 0;
}

static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
{
	return __qdisc_queue_drop(sch, &sch->q);
}

static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
	kfree_skb(skb);
	sch->qstats.drops++;

	return NET_XMIT_DROP;
}

static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
	sch->qstats.drops++;

569
#ifdef CONFIG_NET_CLS_ACT
570 571 572 573 574 575 576 577 578 579 580
	if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
		goto drop;

	return NET_XMIT_SUCCESS;

drop:
#endif
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

581 582 583 584 585
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
   long it will take to send a packet given its size.
 */
static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
{
586 587 588
	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
	if (slot < 0)
		slot = 0;
589 590 591 592 593 594
	slot >>= rtab->rate.cell_log;
	if (slot > 255)
		return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
	return rtab->data[slot];
}

595 596 597 598 599 600 601 602 603 604 605 606 607 608
#ifdef CONFIG_NET_CLS_ACT
static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
	struct sk_buff *n = skb_clone(skb, gfp_mask);

	if (n) {
		n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
		n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
		n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
	}
	return n;
}
#endif

L
Linus Torvalds 已提交
609
#endif