sch_generic.h 13.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
#ifndef __NET_SCHED_GENERIC_H
#define __NET_SCHED_GENERIC_H

#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
#include <net/gen_stats.h>
11
#include <net/rtnetlink.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25

struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;

struct qdisc_rate_table
{
	struct tc_ratespec rate;
	u32		data[256];
	struct qdisc_rate_table *next;
	int		refcnt;
};

26 27 28
enum qdisc_state_t
{
	__QDISC_STATE_RUNNING,
29
	__QDISC_STATE_SCHED,
30
	__QDISC_STATE_DEACTIVATED,
31 32
};

33 34 35 36 37 38 39
struct qdisc_size_table {
	struct list_head	list;
	struct tc_sizespec	szopts;
	int			refcnt;
	u16			data[];
};

L
Linus Torvalds 已提交
40 41 42 43 44
struct Qdisc
{
	int 			(*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
	unsigned		flags;
45 46 47
#define TCQ_F_BUILTIN		1
#define TCQ_F_THROTTLED		2
#define TCQ_F_INGRESS		4
48
#define TCQ_F_CAN_BYPASS	8
49
#define TCQ_F_WARN_NONWC	(1 << 16)
L
Linus Torvalds 已提交
50 51
	int			padded;
	struct Qdisc_ops	*ops;
52
	struct qdisc_size_table	*stab;
53
	struct list_head	list;
L
Linus Torvalds 已提交
54 55 56 57 58 59 60
	u32			handle;
	u32			parent;
	atomic_t		refcnt;
	struct gnet_stats_rate_est	rate_est;
	int			(*reshape_fail)(struct sk_buff *skb,
					struct Qdisc *q);

61 62
	void			*u32_node;

L
Linus Torvalds 已提交
63 64 65 66
	/* This field is deprecated, but it is still used by CBQ
	 * and it will live until better solution will be invented.
	 */
	struct Qdisc		*__parent;
67 68 69 70 71 72 73 74 75
	struct netdev_queue	*dev_queue;
	struct Qdisc		*next_sched;

	struct sk_buff		*gso_skb;
	/*
	 * For performance sake on SMP, we put highly modified fields at the end
	 */
	unsigned long		state;
	struct sk_buff_head	q;
76
	struct gnet_stats_basic_packed bstats;
77
	struct gnet_stats_queue	qstats;
L
Linus Torvalds 已提交
78 79 80 81 82 83 84 85
};

struct Qdisc_class_ops
{
	/* Child qdisc manipulation */
	int			(*graft)(struct Qdisc *, unsigned long cl,
					struct Qdisc *, struct Qdisc **);
	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
86
	void			(*qlen_notify)(struct Qdisc *, unsigned long);
L
Linus Torvalds 已提交
87 88 89 90 91

	/* Class manipulation routines */
	unsigned long		(*get)(struct Qdisc *, u32 classid);
	void			(*put)(struct Qdisc *, unsigned long);
	int			(*change)(struct Qdisc *, u32, u32,
92
					struct nlattr **, unsigned long *);
L
Linus Torvalds 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	int			(*delete)(struct Qdisc *, unsigned long);
	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);

	/* Filter manipulation */
	struct tcf_proto **	(*tcf_chain)(struct Qdisc *, unsigned long);
	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
					u32 classid);
	void			(*unbind_tcf)(struct Qdisc *, unsigned long);

	/* rtnetlink specific */
	int			(*dump)(struct Qdisc *, unsigned long,
					struct sk_buff *skb, struct tcmsg*);
	int			(*dump_stats)(struct Qdisc *, unsigned long,
					struct gnet_dump *);
};

struct Qdisc_ops
{
	struct Qdisc_ops	*next;
112
	const struct Qdisc_class_ops	*cl_ops;
L
Linus Torvalds 已提交
113 114 115 116 117
	char			id[IFNAMSIZ];
	int			priv_size;

	int 			(*enqueue)(struct sk_buff *, struct Qdisc *);
	struct sk_buff *	(*dequeue)(struct Qdisc *);
118
	struct sk_buff *	(*peek)(struct Qdisc *);
L
Linus Torvalds 已提交
119 120
	unsigned int		(*drop)(struct Qdisc *);

121
	int			(*init)(struct Qdisc *, struct nlattr *arg);
L
Linus Torvalds 已提交
122 123
	void			(*reset)(struct Qdisc *);
	void			(*destroy)(struct Qdisc *);
124
	int			(*change)(struct Qdisc *, struct nlattr *arg);
L
Linus Torvalds 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

	int			(*dump)(struct Qdisc *, struct sk_buff *);
	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);

	struct module		*owner;
};


struct tcf_result
{
	unsigned long	class;
	u32		classid;
};

struct tcf_proto_ops
{
	struct tcf_proto_ops	*next;
	char			kind[IFNAMSIZ];

	int			(*classify)(struct sk_buff*, struct tcf_proto*,
					struct tcf_result *);
	int			(*init)(struct tcf_proto*);
	void			(*destroy)(struct tcf_proto*);

	unsigned long		(*get)(struct tcf_proto*, u32 handle);
	void			(*put)(struct tcf_proto*, unsigned long);
	int			(*change)(struct tcf_proto*, unsigned long,
152
					u32 handle, struct nlattr **,
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
					unsigned long *);
	int			(*delete)(struct tcf_proto*, unsigned long);
	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);

	/* rtnetlink specific */
	int			(*dump)(struct tcf_proto*, unsigned long,
					struct sk_buff *skb, struct tcmsg*);

	struct module		*owner;
};

struct tcf_proto
{
	/* Fast access part */
	struct tcf_proto	*next;
	void			*root;
	int			(*classify)(struct sk_buff*, struct tcf_proto*,
					struct tcf_result *);
A
Al Viro 已提交
171
	__be16			protocol;
L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180

	/* All the rest */
	u32			prio;
	u32			classid;
	struct Qdisc		*q;
	void			*data;
	struct tcf_proto_ops	*ops;
};

181 182 183 184 185
struct qdisc_skb_cb {
	unsigned int		pkt_len;
	char			data[];
};

186 187 188 189 190
static inline int qdisc_qlen(struct Qdisc *q)
{
	return q->q.qlen;
}

191 192 193 194 195
static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
{
	return (struct qdisc_skb_cb *)skb->cb;
}

196 197 198 199 200
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
	return &qdisc->q.lock;
}

201 202 203 204 205
static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->qdisc;
}

206 207 208 209 210
static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->qdisc_sleeping;
}

211 212 213 214 215 216 217 218 219 220 221
/* The qdisc root lock is a mechanism by which to top level
 * of a qdisc tree can be locked from any qdisc node in the
 * forest.  This allows changing the configuration of some
 * aspect of the qdisc tree while blocking out asynchronous
 * qdisc access in the packet processing paths.
 *
 * It is only legal to do this when the root will not change
 * on us.  Otherwise we'll potentially lock the wrong qdisc
 * root.  This is enforced by holding the RTNL semaphore, which
 * all users of this lock accessor must do.
 */
222 223 224 225
static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root(qdisc);

226
	ASSERT_RTNL();
227
	return qdisc_lock(root);
228 229
}

230 231 232 233 234 235 236 237
static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return qdisc_lock(root);
}

238 239 240 241
static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
{
	return qdisc->dev_queue->dev;
}
L
Linus Torvalds 已提交
242

243 244
static inline void sch_tree_lock(struct Qdisc *q)
{
J
Jarek Poplawski 已提交
245
	spin_lock_bh(qdisc_root_sleeping_lock(q));
246 247 248 249
}

static inline void sch_tree_unlock(struct Qdisc *q)
{
J
Jarek Poplawski 已提交
250
	spin_unlock_bh(qdisc_root_sleeping_lock(q));
251 252 253 254
}

#define tcf_tree_lock(tp)	sch_tree_lock((tp)->q)
#define tcf_tree_unlock(tp)	sch_tree_unlock((tp)->q)
L
Linus Torvalds 已提交
255

256 257 258
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
struct Qdisc_class_common
{
	u32			classid;
	struct hlist_node	hnode;
};

struct Qdisc_class_hash
{
	struct hlist_head	*hash;
	unsigned int		hashsize;
	unsigned int		hashmask;
	unsigned int		hashelems;
};

static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
{
	id ^= id >> 8;
	id ^= id >> 4;
	return id & mask;
}

static inline struct Qdisc_class_common *
qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
{
	struct Qdisc_class_common *cl;
	struct hlist_node *n;
	unsigned int h;

	h = qdisc_class_hash(id, hash->hashmask);
	hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
		if (cl->classid == id)
			return cl;
	}
	return NULL;
}

extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);

301 302 303 304
extern void dev_init_scheduler(struct net_device *dev);
extern void dev_shutdown(struct net_device *dev);
extern void dev_activate(struct net_device *dev);
extern void dev_deactivate(struct net_device *dev);
305 306
extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
				     struct Qdisc *qdisc);
307 308
extern void qdisc_reset(struct Qdisc *qdisc);
extern void qdisc_destroy(struct Qdisc *qdisc);
309
extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
310
extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
311
				 struct Qdisc_ops *ops);
312
extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
313
				       struct netdev_queue *dev_queue,
314
				       struct Qdisc_ops *ops, u32 parentid);
315 316
extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
				   struct qdisc_size_table *stab);
317
extern void tcf_destroy(struct tcf_proto *tp);
318
extern void tcf_destroy_chain(struct tcf_proto **fl);
L
Linus Torvalds 已提交
319

320 321 322
/* Reset all TX qdiscs of a device.  */
static inline void qdisc_reset_all_tx(struct net_device *dev)
{
323 324 325
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++)
		qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
326 327
}

328 329 330
/* Are all TX queues of the device empty?  */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
331 332 333 334
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		const struct Qdisc *q = txq->qdisc;
335

336 337 338 339
		if (q->q.qlen)
			return false;
	}
	return true;
340 341
}

342 343 344
/* Are any of the TX qdiscs changing?  */
static inline bool qdisc_tx_changing(struct net_device *dev)
{
345 346 347 348 349 350 351
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		if (txq->qdisc != txq->qdisc_sleeping)
			return true;
	}
	return false;
352 353
}

354
/* Is the device using the noop qdisc on all queues?  */
355 356
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
357 358 359 360 361 362 363
	unsigned int i;
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
		if (txq->qdisc != &noop_qdisc)
			return false;
	}
	return true;
364 365
}

366 367
static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
{
368
	return qdisc_skb_cb(skb)->pkt_len;
369 370
}

371
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
372 373
enum net_xmit_qdisc_t {
	__NET_XMIT_STOLEN = 0x00010000,
374
	__NET_XMIT_BYPASS = 0x00020000,
375 376
};

377
#ifdef CONFIG_NET_CLS_ACT
378 379 380 381 382
#define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else
#define net_xmit_drop_count(e)	(1)
#endif

383 384
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
385
#ifdef CONFIG_NET_SCHED
386 387
	if (sch->stab)
		qdisc_calculate_pkt_len(skb, sch->stab);
388
#endif
389 390 391 392 393
	return sch->enqueue(skb, sch);
}

static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
{
394
	qdisc_skb_cb(skb)->pkt_len = skb->len;
395
	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
396 397
}

398 399 400 401 402 403
static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
{
	sch->bstats.bytes += len;
	sch->bstats.packets++;
}

404 405 406 407
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
				       struct sk_buff_head *list)
{
	__skb_queue_tail(list, skb);
408
	sch->qstats.backlog += qdisc_pkt_len(skb);
409
	__qdisc_update_bstats(sch, qdisc_pkt_len(skb));
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424

	return NET_XMIT_SUCCESS;
}

static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
	return __qdisc_enqueue_tail(skb, sch, &sch->q);
}

static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
						   struct sk_buff_head *list)
{
	struct sk_buff *skb = __skb_dequeue(list);

	if (likely(skb != NULL))
425
		sch->qstats.backlog -= qdisc_pkt_len(skb);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

	return skb;
}

static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
	return __qdisc_dequeue_head(sch, &sch->q);
}

static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
						   struct sk_buff_head *list)
{
	struct sk_buff *skb = __skb_dequeue_tail(list);

	if (likely(skb != NULL))
441
		sch->qstats.backlog -= qdisc_pkt_len(skb);
442 443 444 445 446 447 448 449 450

	return skb;
}

static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
{
	return __qdisc_dequeue_tail(sch, &sch->q);
}

451 452 453 454 455
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
	return skb_peek(&sch->q);
}

456 457 458 459
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
460
	if (!sch->gso_skb) {
461
		sch->gso_skb = sch->dequeue(sch);
462 463 464 465
		if (sch->gso_skb)
			/* it's still part of the queue */
			sch->q.qlen++;
	}
466 467 468 469 470 471 472 473 474

	return sch->gso_skb;
}

/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
	struct sk_buff *skb = sch->gso_skb;

475
	if (skb) {
476
		sch->gso_skb = NULL;
477 478
		sch->q.qlen--;
	} else {
479
		skb = sch->dequeue(sch);
480
	}
481 482 483 484

	return skb;
}

485 486 487 488 489 490 491
static inline void __qdisc_reset_queue(struct Qdisc *sch,
				       struct sk_buff_head *list)
{
	/*
	 * We do not know the backlog in bytes of this list, it
	 * is up to the caller to correct it
	 */
492
	__skb_queue_purge(list);
493 494 495 496 497 498 499 500 501 502 503 504 505 506
}

static inline void qdisc_reset_queue(struct Qdisc *sch)
{
	__qdisc_reset_queue(sch, &sch->q);
	sch->qstats.backlog = 0;
}

static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
					      struct sk_buff_head *list)
{
	struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);

	if (likely(skb != NULL)) {
507
		unsigned int len = qdisc_pkt_len(skb);
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
		kfree_skb(skb);
		return len;
	}

	return 0;
}

static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
{
	return __qdisc_queue_drop(sch, &sch->q);
}

static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
	kfree_skb(skb);
	sch->qstats.drops++;

	return NET_XMIT_DROP;
}

static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{
	sch->qstats.drops++;

532
#ifdef CONFIG_NET_CLS_ACT
533 534 535 536 537 538 539 540 541 542 543
	if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
		goto drop;

	return NET_XMIT_SUCCESS;

drop:
#endif
	kfree_skb(skb);
	return NET_XMIT_DROP;
}

544 545 546 547 548
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
   long it will take to send a packet given its size.
 */
static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
{
549 550 551
	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
	if (slot < 0)
		slot = 0;
552 553 554 555 556 557
	slot >>= rtab->rate.cell_log;
	if (slot > 255)
		return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
	return rtab->data[slot];
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571
#ifdef CONFIG_NET_CLS_ACT
static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
	struct sk_buff *n = skb_clone(skb, gfp_mask);

	if (n) {
		n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
		n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
		n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
	}
	return n;
}
#endif

L
Linus Torvalds 已提交
572
#endif