sch_generic.h 22.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9
#ifndef __NET_SCHED_GENERIC_H
#define __NET_SCHED_GENERIC_H

#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
10
#include <linux/percpu.h>
11
#include <linux/dynamic_queue_limits.h>
12
#include <linux/list.h>
13
#include <linux/refcount.h>
14
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
15
#include <net/gen_stats.h>
16
#include <net/rtnetlink.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22

struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;

E
Eric Dumazet 已提交
23
struct qdisc_rate_table {
L
Linus Torvalds 已提交
24 25 26 27 28 29
	struct tc_ratespec rate;
	u32		data[256];
	struct qdisc_rate_table *next;
	int		refcnt;
};

E
Eric Dumazet 已提交
30
enum qdisc_state_t {
31
	__QDISC_STATE_SCHED,
32
	__QDISC_STATE_DEACTIVATED,
33 34
};

35
struct qdisc_size_table {
E
Eric Dumazet 已提交
36
	struct rcu_head		rcu;
37 38 39 40 41 42
	struct list_head	list;
	struct tc_sizespec	szopts;
	int			refcnt;
	u16			data[];
};

43 44 45 46 47 48 49 50
/* similar to sk_buff_head, but skb->prev pointer is undefined. */
struct qdisc_skb_head {
	struct sk_buff	*head;
	struct sk_buff	*tail;
	__u32		qlen;
	spinlock_t	lock;
};

E
Eric Dumazet 已提交
51
struct Qdisc {
52 53 54 55
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   struct sk_buff **to_free);
	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
56
	unsigned int		flags;
57
#define TCQ_F_BUILTIN		1
58 59 60
#define TCQ_F_INGRESS		2
#define TCQ_F_CAN_BYPASS	4
#define TCQ_F_MQROOT		8
61 62 63 64 65 66 67
#define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
				      * q->dev_queue : It can test
				      * netif_xmit_frozen_or_stopped() before
				      * dequeueing next packet.
				      * Its true for MQ/MQPRIO slaves, or non
				      * multiqueue device.
				      */
68
#define TCQ_F_WARN_NONWC	(1 << 16)
69
#define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
70 71 72
#define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
				      * qdisc_tree_decrease_qlen() should stop.
				      */
73
#define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
Y
Yuval Mintz 已提交
74
#define TCQ_F_OFFLOADED		0x200 /* qdisc is offloaded to HW */
75
	u32			limit;
76
	const struct Qdisc_ops	*ops;
E
Eric Dumazet 已提交
77
	struct qdisc_size_table	__rcu *stab;
78
	struct hlist_node       hash;
L
Linus Torvalds 已提交
79 80
	u32			handle;
	u32			parent;
81

82 83
	struct netdev_queue	*dev_queue;

84
	struct net_rate_estimator __rcu *rate_est;
85 86 87
	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
	struct gnet_stats_queue	__percpu *cpu_qstats;

88 89 90
	/*
	 * For performance sake on SMP, we put highly modified fields at the end
	 */
91
	struct sk_buff		*gso_skb ____cacheline_aligned_in_smp;
92
	struct qdisc_skb_head	q;
93
	struct gnet_stats_basic_packed bstats;
94
	seqcount_t		running;
95
	struct gnet_stats_queue	qstats;
96 97 98
	unsigned long		state;
	struct Qdisc            *next_sched;
	struct sk_buff		*skb_bad_txq;
99
	int			padded;
100
	refcount_t		refcnt;
101 102

	spinlock_t		busylock ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
103 104
};

105 106 107 108 109 110 111
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
{
	if (qdisc->flags & TCQ_F_BUILTIN)
		return;
	refcount_inc(&qdisc->refcnt);
}

112
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
113
{
114
	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
115 116 117 118
}

static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
119 120
	if (qdisc_is_running(qdisc))
		return false;
121 122 123 124 125
	/* Variant of write_seqcount_begin() telling lockdep a trylock
	 * was attempted.
	 */
	raw_write_seqcount_begin(&qdisc->running);
	seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
126
	return true;
127 128 129 130
}

static inline void qdisc_run_end(struct Qdisc *qdisc)
{
131
	write_seqcount_end(&qdisc->running);
132 133
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
{
	return qdisc->flags & TCQ_F_ONETXQUEUE;
}

static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
#ifdef CONFIG_BQL
	/* Non-BQL migrated drivers will return 0, too. */
	return dql_avail(&txq->dql);
#else
	return 0;
#endif
}

E
Eric Dumazet 已提交
149
struct Qdisc_class_ops {
L
Linus Torvalds 已提交
150
	/* Child qdisc manipulation */
151
	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
L
Linus Torvalds 已提交
152 153 154
	int			(*graft)(struct Qdisc *, unsigned long cl,
					struct Qdisc *, struct Qdisc **);
	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
155
	void			(*qlen_notify)(struct Qdisc *, unsigned long);
L
Linus Torvalds 已提交
156 157

	/* Class manipulation routines */
158
	unsigned long		(*find)(struct Qdisc *, u32 classid);
L
Linus Torvalds 已提交
159
	int			(*change)(struct Qdisc *, u32, u32,
160
					struct nlattr **, unsigned long *);
L
Linus Torvalds 已提交
161 162 163 164
	int			(*delete)(struct Qdisc *, unsigned long);
	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);

	/* Filter manipulation */
165
	struct tcf_block *	(*tcf_block)(struct Qdisc *, unsigned long);
L
Linus Torvalds 已提交
166 167 168 169 170 171 172 173 174 175 176
	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
					u32 classid);
	void			(*unbind_tcf)(struct Qdisc *, unsigned long);

	/* rtnetlink specific */
	int			(*dump)(struct Qdisc *, unsigned long,
					struct sk_buff *skb, struct tcmsg*);
	int			(*dump_stats)(struct Qdisc *, unsigned long,
					struct gnet_dump *);
};

E
Eric Dumazet 已提交
177
struct Qdisc_ops {
L
Linus Torvalds 已提交
178
	struct Qdisc_ops	*next;
179
	const struct Qdisc_class_ops	*cl_ops;
L
Linus Torvalds 已提交
180 181 182
	char			id[IFNAMSIZ];
	int			priv_size;

183 184 185
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   struct sk_buff **to_free);
L
Linus Torvalds 已提交
186
	struct sk_buff *	(*dequeue)(struct Qdisc *);
187
	struct sk_buff *	(*peek)(struct Qdisc *);
L
Linus Torvalds 已提交
188

189
	int			(*init)(struct Qdisc *, struct nlattr *arg);
L
Linus Torvalds 已提交
190 191
	void			(*reset)(struct Qdisc *);
	void			(*destroy)(struct Qdisc *);
192
	int			(*change)(struct Qdisc *, struct nlattr *arg);
193
	void			(*attach)(struct Qdisc *);
L
Linus Torvalds 已提交
194 195 196 197 198 199 200 201

	int			(*dump)(struct Qdisc *, struct sk_buff *);
	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);

	struct module		*owner;
};


E
Eric Dumazet 已提交
202
struct tcf_result {
203 204 205 206 207 208 209
	union {
		struct {
			unsigned long	class;
			u32		classid;
		};
		const struct tcf_proto *goto_tp;
	};
L
Linus Torvalds 已提交
210 211
};

E
Eric Dumazet 已提交
212
struct tcf_proto_ops {
213
	struct list_head	head;
L
Linus Torvalds 已提交
214 215
	char			kind[IFNAMSIZ];

216 217 218
	int			(*classify)(struct sk_buff *,
					    const struct tcf_proto *,
					    struct tcf_result *);
L
Linus Torvalds 已提交
219
	int			(*init)(struct tcf_proto*);
220
	void			(*destroy)(struct tcf_proto*);
L
Linus Torvalds 已提交
221

222
	void*			(*get)(struct tcf_proto*, u32 handle);
223
	int			(*change)(struct net *net, struct sk_buff *,
224
					struct tcf_proto*, unsigned long,
225
					u32 handle, struct nlattr **,
226 227
					void **, bool);
	int			(*delete)(struct tcf_proto*, void *, bool*);
L
Linus Torvalds 已提交
228
	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);
229
	void			(*bind_class)(void *, u32, unsigned long);
L
Linus Torvalds 已提交
230 231

	/* rtnetlink specific */
232
	int			(*dump)(struct net*, struct tcf_proto*, void *,
L
Linus Torvalds 已提交
233 234 235 236 237
					struct sk_buff *skb, struct tcmsg*);

	struct module		*owner;
};

E
Eric Dumazet 已提交
238
struct tcf_proto {
L
Linus Torvalds 已提交
239
	/* Fast access part */
J
John Fastabend 已提交
240 241
	struct tcf_proto __rcu	*next;
	void __rcu		*root;
242 243 244
	int			(*classify)(struct sk_buff *,
					    const struct tcf_proto *,
					    struct tcf_result *);
A
Al Viro 已提交
245
	__be16			protocol;
L
Linus Torvalds 已提交
246 247 248 249 250 251

	/* All the rest */
	u32			prio;
	u32			classid;
	struct Qdisc		*q;
	void			*data;
252
	const struct tcf_proto_ops	*ops;
253
	struct tcf_chain	*chain;
J
John Fastabend 已提交
254
	struct rcu_head		rcu;
L
Linus Torvalds 已提交
255 256
};

257 258
struct qdisc_skb_cb {
	unsigned int		pkt_len;
259
	u16			slave_dev_queue_mapping;
260
	u16			tc_classid;
261 262
#define QDISC_CB_PRIV_LEN 20
	unsigned char		data[QDISC_CB_PRIV_LEN];
263 264
};

265 266
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);

267 268
struct tcf_chain {
	struct tcf_proto __rcu *filter_chain;
269 270
	tcf_chain_head_change_t *chain_head_change;
	void *chain_head_change_priv;
271 272 273 274
	struct list_head list;
	struct tcf_block *block;
	u32 index; /* chain index */
	unsigned int refcnt;
275 276
};

277
struct tcf_block {
278
	struct list_head chain_list;
279
	struct net *net;
280
	struct Qdisc *q;
281
	struct list_head cb_list;
282
	struct work_struct work;
283 284
};

285 286 287
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
	struct qdisc_skb_cb *qcb;
288 289

	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
290 291 292
	BUILD_BUG_ON(sizeof(qcb->data) < sz);
}

293
static inline int qdisc_qlen(const struct Qdisc *q)
294 295 296 297
{
	return q->q.qlen;
}

298
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
299 300 301 302
{
	return (struct qdisc_skb_cb *)skb->cb;
}

303 304 305 306 307
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
	return &qdisc->q.lock;
}

308
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
309
{
310 311 312
	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);

	return q;
313 314
}

315
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
316 317 318 319
{
	return qdisc->dev_queue->qdisc_sleeping;
}

320 321 322 323 324 325 326 327 328 329 330
/* The qdisc root lock is a mechanism by which to top level
 * of a qdisc tree can be locked from any qdisc node in the
 * forest.  This allows changing the configuration of some
 * aspect of the qdisc tree while blocking out asynchronous
 * qdisc access in the packet processing paths.
 *
 * It is only legal to do this when the root will not change
 * on us.  Otherwise we'll potentially lock the wrong qdisc
 * root.  This is enforced by holding the RTNL semaphore, which
 * all users of this lock accessor must do.
 */
331
static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
332 333 334
{
	struct Qdisc *root = qdisc_root(qdisc);

335
	ASSERT_RTNL();
336
	return qdisc_lock(root);
337 338
}

339
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
340 341 342 343 344 345 346
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return qdisc_lock(root);
}

347 348 349 350 351 352 353 354
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return &root->running;
}

355
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
356 357 358
{
	return qdisc->dev_queue->dev;
}
L
Linus Torvalds 已提交
359

360
static inline void sch_tree_lock(const struct Qdisc *q)
361
{
J
Jarek Poplawski 已提交
362
	spin_lock_bh(qdisc_root_sleeping_lock(q));
363 364
}

365
static inline void sch_tree_unlock(const struct Qdisc *q)
366
{
J
Jarek Poplawski 已提交
367
	spin_unlock_bh(qdisc_root_sleeping_lock(q));
368 369
}

370 371
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
372 373
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
P
Phil Sutter 已提交
374
extern struct Qdisc_ops noqueue_qdisc_ops;
375
extern const struct Qdisc_ops *default_qdisc_ops;
376 377 378 379 380 381
static inline const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device *dev, int ntx)
{
	return ntx < dev->real_num_tx_queues ?
			default_qdisc_ops : &pfifo_fast_ops;
}
382

E
Eric Dumazet 已提交
383
struct Qdisc_class_common {
384 385 386 387
	u32			classid;
	struct hlist_node	hnode;
};

E
Eric Dumazet 已提交
388
struct Qdisc_class_hash {
389 390 391 392 393 394 395 396 397 398 399 400 401 402
	struct hlist_head	*hash;
	unsigned int		hashsize;
	unsigned int		hashmask;
	unsigned int		hashelems;
};

static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
{
	id ^= id >> 8;
	id ^= id >> 4;
	return id & mask;
}

static inline struct Qdisc_class_common *
403
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
404 405 406 407
{
	struct Qdisc_class_common *cl;
	unsigned int h;

408 409 410
	if (!id)
		return NULL;

411
	h = qdisc_class_hash(id, hash->hashmask);
412
	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
413 414 415 416 417 418
		if (cl->classid == id)
			return cl;
	}
	return NULL;
}

419 420 421 422 423 424 425
static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
	u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;

	return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
}

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
int qdisc_class_hash_init(struct Qdisc_class_hash *);
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
			     struct Qdisc_class_common *);
void qdisc_class_hash_remove(struct Qdisc_class_hash *,
			     struct Qdisc_class_common *);
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);

void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
void dev_deactivate(struct net_device *dev);
void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
			      struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
443 444
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
			       unsigned int len);
445
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
446
			  const struct Qdisc_ops *ops);
447
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
448
				const struct Qdisc_ops *ops, u32 parentid);
449 450
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
			       const struct qdisc_size_table *stab);
451
int skb_do_redirect(struct sk_buff *);
L
Linus Torvalds 已提交
452

453 454 455
static inline void skb_reset_tc(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
456
	skb->tc_redirected = 0;
457 458 459
#endif
}

460 461 462
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
463
	return skb->tc_at_ingress;
464 465 466 467 468
#else
	return false;
#endif
}

469 470 471 472 473 474 475 476 477 478 479
static inline bool skb_skip_tc_classify(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_skip_classify) {
		skb->tc_skip_classify = 0;
		return true;
	}
#endif
	return false;
}

480 481
/* Reset all TX qdiscs greater then index of a device.  */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
482
{
483 484
	struct Qdisc *qdisc;

485
	for (; i < dev->num_tx_queues; i++) {
486
		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
487 488 489 490 491 492
		if (qdisc) {
			spin_lock_bh(qdisc_lock(qdisc));
			qdisc_reset(qdisc);
			spin_unlock_bh(qdisc_lock(qdisc));
		}
	}
493 494 495 496
}

static inline void qdisc_reset_all_tx(struct net_device *dev)
{
497
	qdisc_reset_all_tx_gt(dev, 0);
498 499
}

500 501 502
/* Are all TX queues of the device empty?  */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
503
	unsigned int i;
504 505

	rcu_read_lock();
506 507
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
508
		const struct Qdisc *q = rcu_dereference(txq->qdisc);
509

510 511
		if (q->q.qlen) {
			rcu_read_unlock();
512
			return false;
513
		}
514
	}
515
	rcu_read_unlock();
516
	return true;
517 518
}

519
/* Are any of the TX qdiscs changing?  */
520
static inline bool qdisc_tx_changing(const struct net_device *dev)
521
{
522
	unsigned int i;
523

524 525
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
526
		if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
527 528 529
			return true;
	}
	return false;
530 531
}

532
/* Is the device using the noop qdisc on all queues?  */
533 534
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
535
	unsigned int i;
536

537 538
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
539
		if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
540 541 542
			return false;
	}
	return true;
543 544
}

545
static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
546
{
547
	return qdisc_skb_cb(skb)->pkt_len;
548 549
}

550
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
551 552
enum net_xmit_qdisc_t {
	__NET_XMIT_STOLEN = 0x00010000,
553
	__NET_XMIT_BYPASS = 0x00020000,
554 555
};

556
#ifdef CONFIG_NET_CLS_ACT
557 558 559 560 561
#define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else
#define net_xmit_drop_count(e)	(1)
#endif

E
Eric Dumazet 已提交
562 563
static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
					   const struct Qdisc *sch)
564
{
565
#ifdef CONFIG_NET_SCHED
E
Eric Dumazet 已提交
566 567 568 569
	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);

	if (stab)
		__qdisc_calculate_pkt_len(skb, stab);
570
#endif
E
Eric Dumazet 已提交
571 572
}

573 574
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
				struct sk_buff **to_free)
E
Eric Dumazet 已提交
575 576
{
	qdisc_calculate_pkt_len(skb, sch);
577
	return sch->enqueue(skb, sch, to_free);
578 579
}

580 581 582 583
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
	return q->flags & TCQ_F_CPUSTATS;
}
584

585 586 587 588 589 590 591
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
				  __u64 bytes, __u32 packets)
{
	bstats->bytes += bytes;
	bstats->packets += packets;
}

592 593 594
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
				 const struct sk_buff *skb)
{
595 596 597 598 599 600 601 602 603 604 605
	_bstats_update(bstats,
		       qdisc_pkt_len(skb),
		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}

static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
				      __u64 bytes, __u32 packets)
{
	u64_stats_update_begin(&bstats->syncp);
	_bstats_update(&bstats->bstats, bytes, packets);
	u64_stats_update_end(&bstats->syncp);
606 607
}

608 609
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
				     const struct sk_buff *skb)
610 611 612 613 614 615
{
	u64_stats_update_begin(&bstats->syncp);
	bstats_update(&bstats->bstats, skb);
	u64_stats_update_end(&bstats->syncp);
}

616 617 618 619 620 621
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
					   const struct sk_buff *skb)
{
	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
}

622 623
static inline void qdisc_bstats_update(struct Qdisc *sch,
				       const struct sk_buff *skb)
624
{
625
	bstats_update(&sch->bstats, skb);
626 627
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
					    const struct sk_buff *skb)
{
	sch->qstats.backlog -= qdisc_pkt_len(skb);
}

static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
					    const struct sk_buff *skb)
{
	sch->qstats.backlog += qdisc_pkt_len(skb);
}

static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
	sch->qstats.drops += count;
}

645
static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
646
{
647
	qstats->drops++;
648 649
}

650
static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
651
{
652 653
	qstats->overlimits++;
}
654

655 656 657 658 659 660 661
static inline void qdisc_qstats_drop(struct Qdisc *sch)
{
	qstats_drop_inc(&sch->qstats);
}

static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
662
	this_cpu_inc(sch->cpu_qstats->drops);
663 664
}

665 666 667 668 669
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
{
	sch->qstats.overlimits++;
}

670 671 672 673 674 675 676
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
	qh->head = NULL;
	qh->tail = NULL;
	qh->qlen = 0;
}

677
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
678
				       struct qdisc_skb_head *qh)
679
{
680 681 682 683 684 685 686 687 688 689 690
	struct sk_buff *last = qh->tail;

	if (last) {
		skb->next = NULL;
		last->next = skb;
		qh->tail = skb;
	} else {
		qh->tail = skb;
		qh->head = skb;
	}
	qh->qlen++;
691
	qdisc_qstats_backlog_inc(sch, skb);
692 693 694 695 696 697 698 699 700

	return NET_XMIT_SUCCESS;
}

static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
	return __qdisc_enqueue_tail(skb, sch, &sch->q);
}

701
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
702
{
703 704 705 706 707 708 709 710 711
	struct sk_buff *skb = qh->head;

	if (likely(skb != NULL)) {
		qh->head = skb->next;
		qh->qlen--;
		if (qh->head == NULL)
			qh->tail = NULL;
		skb->next = NULL;
	}
712

713 714 715 716 717 718 719
	return skb;
}

static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);

720
	if (likely(skb != NULL)) {
721
		qdisc_qstats_backlog_dec(sch, skb);
722 723
		qdisc_bstats_update(sch, skb);
	}
724 725 726 727

	return skb;
}

728 729 730 731 732 733 734 735 736
/* Instead of calling kfree_skb() while root qdisc lock is held,
 * queue the skb for future freeing at end of __dev_xmit_skb()
 */
static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
{
	skb->next = *to_free;
	*to_free = skb;
}

737
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
738
						   struct qdisc_skb_head *qh,
739
						   struct sk_buff **to_free)
740
{
741
	struct sk_buff *skb = __qdisc_dequeue_head(qh);
742 743 744

	if (likely(skb != NULL)) {
		unsigned int len = qdisc_pkt_len(skb);
745

746
		qdisc_qstats_backlog_dec(sch, skb);
747
		__qdisc_drop(skb, to_free);
748 749 750 751 752 753
		return len;
	}

	return 0;
}

754 755
static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
						 struct sk_buff **to_free)
756
{
757
	return __qdisc_queue_drop_head(sch, &sch->q, to_free);
758 759
}

760 761
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
762 763 764
	const struct qdisc_skb_head *qh = &sch->q;

	return qh->head;
765 766
}

767 768 769 770
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
771
	if (!sch->gso_skb) {
772
		sch->gso_skb = sch->dequeue(sch);
773
		if (sch->gso_skb) {
774
			/* it's still part of the queue */
775
			qdisc_qstats_backlog_inc(sch, sch->gso_skb);
776
			sch->q.qlen++;
777
		}
778
	}
779 780 781 782 783 784 785 786 787

	return sch->gso_skb;
}

/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
	struct sk_buff *skb = sch->gso_skb;

788
	if (skb) {
789
		sch->gso_skb = NULL;
790
		qdisc_qstats_backlog_dec(sch, skb);
791 792
		sch->q.qlen--;
	} else {
793
		skb = sch->dequeue(sch);
794
	}
795 796 797 798

	return skb;
}

799
static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
800 801 802 803 804
{
	/*
	 * We do not know the backlog in bytes of this list, it
	 * is up to the caller to correct it
	 */
805 806 807 808 809 810 811
	ASSERT_RTNL();
	if (qh->qlen) {
		rtnl_kfree_skbs(qh->head, qh->tail);

		qh->head = NULL;
		qh->tail = NULL;
		qh->qlen = 0;
812
	}
813 814 815 816
}

static inline void qdisc_reset_queue(struct Qdisc *sch)
{
817
	__qdisc_reset_queue(&sch->q);
818 819 820
	sch->qstats.backlog = 0;
}

821 822 823 824 825 826 827 828 829
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
					  struct Qdisc **pold)
{
	struct Qdisc *old;

	sch_tree_lock(sch);
	old = *pold;
	*pold = new;
	if (old != NULL) {
830 831 832
		unsigned int qlen = old->q.qlen;
		unsigned int backlog = old->qstats.backlog;

833
		qdisc_reset(old);
834
		qdisc_tree_reduce_backlog(old, qlen, backlog);
835 836 837 838 839 840
	}
	sch_tree_unlock(sch);

	return old;
}

841 842 843 844 845 846
static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
	rtnl_kfree_skbs(skb, skb);
	qdisc_qstats_drop(sch);
}

847 848 849

static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
			     struct sk_buff **to_free)
850
{
851
	__qdisc_drop(skb, to_free);
852
	qdisc_qstats_drop(sch);
853 854 855 856

	return NET_XMIT_DROP;
}

857 858 859 860 861
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
   long it will take to send a packet given its size.
 */
static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
{
862 863 864
	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
	if (slot < 0)
		slot = 0;
865 866
	slot >>= rtab->rate.cell_log;
	if (slot > 255)
E
Eric Dumazet 已提交
867
		return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
868 869 870
	return rtab->data[slot];
}

871
struct psched_ratecfg {
872
	u64	rate_bytes_ps; /* bytes per second */
873 874
	u32	mult;
	u16	overhead;
875
	u8	linklayer;
876
	u8	shift;
877 878 879 880 881
};

static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
				unsigned int len)
{
882 883 884 885 886 887
	len += r->overhead;

	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;

	return ((u64)len * r->mult) >> r->shift;
888 889
}

890
void psched_ratecfg_precompute(struct psched_ratecfg *r,
891 892
			       const struct tc_ratespec *conf,
			       u64 rate64);
893

894 895
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
					  const struct psched_ratecfg *r)
896
{
897
	memset(res, 0, sizeof(*res));
898 899 900 901 902 903 904

	/* legacy struct tc_ratespec has a 32bit @rate field
	 * Qdisc using 64bit rate should add new attributes
	 * in order to maintain compatibility.
	 */
	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);

905
	res->overhead = r->overhead;
906
	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
907 908
}

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
 * The fast path only needs to access filter list and to update stats
 */
struct mini_Qdisc {
	struct tcf_proto *filter_list;
	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
	struct gnet_stats_queue	__percpu *cpu_qstats;
	struct rcu_head rcu;
};

static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
						const struct sk_buff *skb)
{
	bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}

static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
{
	this_cpu_inc(miniq->cpu_qstats->drops);
}

struct mini_Qdisc_pair {
	struct mini_Qdisc miniq1;
	struct mini_Qdisc miniq2;
	struct mini_Qdisc __rcu **p_miniq;
};

void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
			  struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
			  struct mini_Qdisc __rcu **p_miniq);

L
Linus Torvalds 已提交
941
#endif