sch_generic.h 32.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9
#ifndef __NET_SCHED_GENERIC_H
#define __NET_SCHED_GENERIC_H

#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
10
#include <linux/percpu.h>
11
#include <linux/dynamic_queue_limits.h>
12
#include <linux/list.h>
13
#include <linux/refcount.h>
14
#include <linux/workqueue.h>
15
#include <linux/mutex.h>
16
#include <linux/rwsem.h>
17
#include <linux/atomic.h>
L
Linus Torvalds 已提交
18
#include <net/gen_stats.h>
19
#include <net/rtnetlink.h>
20
#include <net/flow_offload.h>
L
Linus Torvalds 已提交
21 22 23 24 25

struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;
26
struct bpf_flow_keys;
L
Linus Torvalds 已提交
27

E
Eric Dumazet 已提交
28
struct qdisc_rate_table {
L
Linus Torvalds 已提交
29 30 31 32 33 34
	struct tc_ratespec rate;
	u32		data[256];
	struct qdisc_rate_table *next;
	int		refcnt;
};

E
Eric Dumazet 已提交
35
enum qdisc_state_t {
36
	__QDISC_STATE_SCHED,
37
	__QDISC_STATE_DEACTIVATED,
38 39
};

40
struct qdisc_size_table {
E
Eric Dumazet 已提交
41
	struct rcu_head		rcu;
42 43 44 45 46 47
	struct list_head	list;
	struct tc_sizespec	szopts;
	int			refcnt;
	u16			data[];
};

48 49 50 51
/* similar to sk_buff_head, but skb->prev pointer is undefined. */
struct qdisc_skb_head {
	struct sk_buff	*head;
	struct sk_buff	*tail;
52
	__u32		qlen;
53 54 55
	spinlock_t	lock;
};

E
Eric Dumazet 已提交
56
struct Qdisc {
57 58 59 60
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   struct sk_buff **to_free);
	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
61
	unsigned int		flags;
62
#define TCQ_F_BUILTIN		1
63 64 65
#define TCQ_F_INGRESS		2
#define TCQ_F_CAN_BYPASS	4
#define TCQ_F_MQROOT		8
66 67 68 69 70 71 72
#define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
				      * q->dev_queue : It can test
				      * netif_xmit_frozen_or_stopped() before
				      * dequeueing next packet.
				      * Its true for MQ/MQPRIO slaves, or non
				      * multiqueue device.
				      */
73
#define TCQ_F_WARN_NONWC	(1 << 16)
74
#define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
75 76 77
#define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
				      * qdisc_tree_decrease_qlen() should stop.
				      */
78
#define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
79
#define TCQ_F_NOLOCK		0x100 /* qdisc does not require locking */
Y
Yuval Mintz 已提交
80
#define TCQ_F_OFFLOADED		0x200 /* qdisc is offloaded to HW */
81
	u32			limit;
82
	const struct Qdisc_ops	*ops;
E
Eric Dumazet 已提交
83
	struct qdisc_size_table	__rcu *stab;
84
	struct hlist_node       hash;
L
Linus Torvalds 已提交
85 86
	u32			handle;
	u32			parent;
87

88 89
	struct netdev_queue	*dev_queue;

90
	struct net_rate_estimator __rcu *rate_est;
91 92
	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
	struct gnet_stats_queue	__percpu *cpu_qstats;
P
Paolo Abeni 已提交
93 94
	int			padded;
	refcount_t		refcnt;
95

96 97 98
	/*
	 * For performance sake on SMP, we put highly modified fields at the end
	 */
99
	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
100
	struct qdisc_skb_head	q;
101
	struct gnet_stats_basic_packed bstats;
102
	seqcount_t		running;
103
	struct gnet_stats_queue	qstats;
104 105
	unsigned long		state;
	struct Qdisc            *next_sched;
106
	struct sk_buff_head	skb_bad_txq;
107 108

	spinlock_t		busylock ____cacheline_aligned_in_smp;
109
	spinlock_t		seqlock;
110 111 112

	/* for NOLOCK qdisc, true if there are no enqueued skbs */
	bool			empty;
V
Vlad Buslov 已提交
113
	struct rcu_head		rcu;
L
Linus Torvalds 已提交
114 115
};

116 117 118 119 120 121 122
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
{
	if (qdisc->flags & TCQ_F_BUILTIN)
		return;
	refcount_inc(&qdisc->refcnt);
}

123 124 125 126 127 128 129 130 131 132 133 134 135
/* Intended to be used by unlocked users, when concurrent qdisc release is
 * possible.
 */

static inline struct Qdisc *qdisc_refcount_inc_nz(struct Qdisc *qdisc)
{
	if (qdisc->flags & TCQ_F_BUILTIN)
		return qdisc;
	if (refcount_inc_not_zero(&qdisc->refcnt))
		return qdisc;
	return NULL;
}

136
static inline bool qdisc_is_running(struct Qdisc *qdisc)
137
{
138
	if (qdisc->flags & TCQ_F_NOLOCK)
139
		return spin_is_locked(&qdisc->seqlock);
140
	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
141 142
}

143 144 145 146 147
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
	return q->flags & TCQ_F_CPUSTATS;
}

148 149
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
150
	if (qdisc_is_percpu_stats(qdisc))
151 152 153 154
		return qdisc->empty;
	return !qdisc->q.qlen;
}

155 156
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
157
	if (qdisc->flags & TCQ_F_NOLOCK) {
158
		if (!spin_trylock(&qdisc->seqlock))
159
			return false;
160
		qdisc->empty = false;
161
	} else if (qdisc_is_running(qdisc)) {
162
		return false;
163
	}
164 165 166 167 168
	/* Variant of write_seqcount_begin() telling lockdep a trylock
	 * was attempted.
	 */
	raw_write_seqcount_begin(&qdisc->running);
	seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
169
	return true;
170 171 172 173
}

static inline void qdisc_run_end(struct Qdisc *qdisc)
{
174
	write_seqcount_end(&qdisc->running);
175
	if (qdisc->flags & TCQ_F_NOLOCK)
176
		spin_unlock(&qdisc->seqlock);
177 178
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
{
	return qdisc->flags & TCQ_F_ONETXQUEUE;
}

static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
#ifdef CONFIG_BQL
	/* Non-BQL migrated drivers will return 0, too. */
	return dql_avail(&txq->dql);
#else
	return 0;
#endif
}

E
Eric Dumazet 已提交
194
struct Qdisc_class_ops {
195
	unsigned int		flags;
L
Linus Torvalds 已提交
196
	/* Child qdisc manipulation */
197
	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
L
Linus Torvalds 已提交
198
	int			(*graft)(struct Qdisc *, unsigned long cl,
199 200
					struct Qdisc *, struct Qdisc **,
					struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
201
	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
202
	void			(*qlen_notify)(struct Qdisc *, unsigned long);
L
Linus Torvalds 已提交
203 204

	/* Class manipulation routines */
205
	unsigned long		(*find)(struct Qdisc *, u32 classid);
L
Linus Torvalds 已提交
206
	int			(*change)(struct Qdisc *, u32, u32,
207 208
					struct nlattr **, unsigned long *,
					struct netlink_ext_ack *);
L
Linus Torvalds 已提交
209 210 211 212
	int			(*delete)(struct Qdisc *, unsigned long);
	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);

	/* Filter manipulation */
213
	struct tcf_block *	(*tcf_block)(struct Qdisc *sch,
214 215
					     unsigned long arg,
					     struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
216 217 218 219 220 221 222 223 224 225 226
	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
					u32 classid);
	void			(*unbind_tcf)(struct Qdisc *, unsigned long);

	/* rtnetlink specific */
	int			(*dump)(struct Qdisc *, unsigned long,
					struct sk_buff *skb, struct tcmsg*);
	int			(*dump_stats)(struct Qdisc *, unsigned long,
					struct gnet_dump *);
};

227 228 229 230 231 232 233
/* Qdisc_class_ops flag values */

/* Implements API that doesn't require rtnl lock */
enum qdisc_class_ops_flags {
	QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
};

E
Eric Dumazet 已提交
234
struct Qdisc_ops {
L
Linus Torvalds 已提交
235
	struct Qdisc_ops	*next;
236
	const struct Qdisc_class_ops	*cl_ops;
L
Linus Torvalds 已提交
237 238
	char			id[IFNAMSIZ];
	int			priv_size;
239
	unsigned int		static_flags;
L
Linus Torvalds 已提交
240

241 242 243
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   struct sk_buff **to_free);
L
Linus Torvalds 已提交
244
	struct sk_buff *	(*dequeue)(struct Qdisc *);
245
	struct sk_buff *	(*peek)(struct Qdisc *);
L
Linus Torvalds 已提交
246

247 248
	int			(*init)(struct Qdisc *sch, struct nlattr *arg,
					struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
249 250
	void			(*reset)(struct Qdisc *);
	void			(*destroy)(struct Qdisc *);
251
	int			(*change)(struct Qdisc *sch,
252 253
					  struct nlattr *arg,
					  struct netlink_ext_ack *extack);
254
	void			(*attach)(struct Qdisc *sch);
255
	int			(*change_tx_queue_len)(struct Qdisc *, unsigned int);
L
Linus Torvalds 已提交
256 257 258 259

	int			(*dump)(struct Qdisc *, struct sk_buff *);
	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);

260 261 262 263 264 265 266
	void			(*ingress_block_set)(struct Qdisc *sch,
						     u32 block_index);
	void			(*egress_block_set)(struct Qdisc *sch,
						    u32 block_index);
	u32			(*ingress_block_get)(struct Qdisc *sch);
	u32			(*egress_block_get)(struct Qdisc *sch);

L
Linus Torvalds 已提交
267 268 269 270
	struct module		*owner;
};


E
Eric Dumazet 已提交
271
struct tcf_result {
272 273 274 275 276 277
	union {
		struct {
			unsigned long	class;
			u32		classid;
		};
		const struct tcf_proto *goto_tp;
P
Paolo Abeni 已提交
278

279
		/* used in the skb_tc_reinsert function */
P
Paolo Abeni 已提交
280 281 282 283
		struct {
			bool		ingress;
			struct gnet_stats_queue *qstats;
		};
284
	};
L
Linus Torvalds 已提交
285 286
};

287 288
struct tcf_chain;

E
Eric Dumazet 已提交
289
struct tcf_proto_ops {
290
	struct list_head	head;
L
Linus Torvalds 已提交
291 292
	char			kind[IFNAMSIZ];

293 294 295
	int			(*classify)(struct sk_buff *,
					    const struct tcf_proto *,
					    struct tcf_result *);
L
Linus Torvalds 已提交
296
	int			(*init)(struct tcf_proto*);
297
	void			(*destroy)(struct tcf_proto *tp, bool rtnl_held,
298
					   struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
299

300
	void*			(*get)(struct tcf_proto*, u32 handle);
301
	void			(*put)(struct tcf_proto *tp, void *f);
302
	int			(*change)(struct net *net, struct sk_buff *,
303
					struct tcf_proto*, unsigned long,
304
					u32 handle, struct nlattr **,
305
					void **, bool, bool,
306
					struct netlink_ext_ack *);
307
	int			(*delete)(struct tcf_proto *tp, void *arg,
308
					  bool *last, bool rtnl_held,
309
					  struct netlink_ext_ack *);
310 311
	void			(*walk)(struct tcf_proto *tp,
					struct tcf_walker *arg, bool rtnl_held);
312
	int			(*reoffload)(struct tcf_proto *tp, bool add,
313
					     flow_setup_cb_t *cb, void *cb_priv,
314
					     struct netlink_ext_ack *extack);
315
	void			(*bind_class)(void *, u32, unsigned long);
316 317 318 319 320
	void *			(*tmplt_create)(struct net *net,
						struct tcf_chain *chain,
						struct nlattr **tca,
						struct netlink_ext_ack *extack);
	void			(*tmplt_destroy)(void *tmplt_priv);
L
Linus Torvalds 已提交
321 322

	/* rtnetlink specific */
323
	int			(*dump)(struct net*, struct tcf_proto*, void *,
324 325
					struct sk_buff *skb, struct tcmsg*,
					bool);
326 327 328
	int			(*tmplt_dump)(struct sk_buff *skb,
					      struct net *net,
					      void *tmplt_priv);
L
Linus Torvalds 已提交
329 330

	struct module		*owner;
331 332 333 334 335
	int			flags;
};

enum tcf_proto_ops_flags {
	TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
L
Linus Torvalds 已提交
336 337
};

E
Eric Dumazet 已提交
338
struct tcf_proto {
L
Linus Torvalds 已提交
339
	/* Fast access part */
J
John Fastabend 已提交
340 341
	struct tcf_proto __rcu	*next;
	void __rcu		*root;
342 343

	/* called under RCU BH lock*/
344 345 346
	int			(*classify)(struct sk_buff *,
					    const struct tcf_proto *,
					    struct tcf_result *);
A
Al Viro 已提交
347
	__be16			protocol;
L
Linus Torvalds 已提交
348 349 350 351

	/* All the rest */
	u32			prio;
	void			*data;
352
	const struct tcf_proto_ops	*ops;
353
	struct tcf_chain	*chain;
354 355 356 357 358
	/* Lock protects tcf_proto shared state and can be used by unlocked
	 * classifiers to protect their private data.
	 */
	spinlock_t		lock;
	bool			deleting;
359
	refcount_t		refcnt;
J
John Fastabend 已提交
360
	struct rcu_head		rcu;
L
Linus Torvalds 已提交
361 362
};

363
struct qdisc_skb_cb {
364 365 366 367
	struct {
		unsigned int		pkt_len;
		u16			slave_dev_queue_mapping;
		u16			tc_classid;
368
	};
369 370
#define QDISC_CB_PRIV_LEN 20
	unsigned char		data[QDISC_CB_PRIV_LEN];
371 372
};

373 374
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);

375
struct tcf_chain {
376 377
	/* Protects filter_chain. */
	struct mutex filter_chain_lock;
378
	struct tcf_proto __rcu *filter_chain;
379 380 381 382
	struct list_head list;
	struct tcf_block *block;
	u32 index; /* chain index */
	unsigned int refcnt;
383
	unsigned int action_refcnt;
384
	bool explicitly_created;
385
	bool flushing;
386 387
	const struct tcf_proto_ops *tmplt_ops;
	void *tmplt_priv;
388
	struct rcu_head rcu;
389 390
};

391
struct tcf_block {
392 393 394 395
	/* Lock protects tcf_block and lifetime-management data of chains
	 * attached to the block (refcnt, action_refcnt, explicitly_created).
	 */
	struct mutex lock;
396
	struct list_head chain_list;
397
	u32 index; /* block index for shared blocks */
398
	refcount_t refcnt;
399
	struct net *net;
400
	struct Qdisc *q;
401
	struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
402
	struct flow_block flow_block;
403 404
	struct list_head owner_list;
	bool keep_dst;
405
	atomic_t offloadcnt; /* Number of oddloaded filters */
406
	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
407 408 409 410
	struct {
		struct tcf_chain *chain;
		struct list_head filter_chain_list;
	} chain0;
411
	struct rcu_head rcu;
412 413
};

414 415 416 417 418
#ifdef CONFIG_PROVE_LOCKING
static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
	return lockdep_is_held(&chain->filter_chain_lock);
}
419 420 421 422 423

static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
	return lockdep_is_held(&tp->lock);
}
424 425 426 427 428
#else
static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
{
	return true;
}
429 430 431 432 433

static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
	return true;
}
434 435 436 437 438
#endif /* #ifdef CONFIG_PROVE_LOCKING */

#define tcf_chain_dereference(p, chain)					\
	rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))

439 440 441
#define tcf_proto_dereference(p, tp)					\
	rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))

442 443 444 445 446
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
	if (*flags & TCA_CLS_FLAGS_IN_HW)
		return;
	*flags |= TCA_CLS_FLAGS_IN_HW;
447
	atomic_inc(&block->offloadcnt);
448 449 450 451 452 453 454
}

static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
{
	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
		return;
	*flags &= ~TCA_CLS_FLAGS_IN_HW;
455
	atomic_dec(&block->offloadcnt);
456 457
}

458
static inline void
459
tc_cls_offload_cnt_update(struct tcf_block *block, u32 *cnt,
460 461 462 463 464 465 466 467 468 469 470 471 472
			  u32 *flags, bool add)
{
	if (add) {
		if (!*cnt)
			tcf_block_offload_inc(block, flags);
		(*cnt)++;
	} else {
		(*cnt)--;
		if (!*cnt)
			tcf_block_offload_dec(block, flags);
	}
}

473 474 475
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
	struct qdisc_skb_cb *qcb;
476 477

	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
478 479 480
	BUILD_BUG_ON(sizeof(qcb->data) < sz);
}

481 482 483 484 485
static inline int qdisc_qlen_cpu(const struct Qdisc *q)
{
	return this_cpu_ptr(q->cpu_qstats)->qlen;
}

486
static inline int qdisc_qlen(const struct Qdisc *q)
487 488 489 490
{
	return q->q.qlen;
}

491
static inline int qdisc_qlen_sum(const struct Qdisc *q)
492
{
493 494
	__u32 qlen = q->qstats.qlen;
	int i;
495

496 497 498 499
	if (qdisc_is_percpu_stats(q)) {
		for_each_possible_cpu(i)
			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
	} else {
500
		qlen += q->q.qlen;
501
	}
502 503 504 505

	return qlen;
}

506
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
507 508 509 510
{
	return (struct qdisc_skb_cb *)skb->cb;
}

511 512 513 514 515
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
	return &qdisc->q.lock;
}

516
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
517
{
518 519 520
	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);

	return q;
521 522
}

523
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
524 525 526 527
{
	return qdisc->dev_queue->qdisc_sleeping;
}

528 529 530 531 532 533 534 535 536 537 538
/* The qdisc root lock is a mechanism by which to top level
 * of a qdisc tree can be locked from any qdisc node in the
 * forest.  This allows changing the configuration of some
 * aspect of the qdisc tree while blocking out asynchronous
 * qdisc access in the packet processing paths.
 *
 * It is only legal to do this when the root will not change
 * on us.  Otherwise we'll potentially lock the wrong qdisc
 * root.  This is enforced by holding the RTNL semaphore, which
 * all users of this lock accessor must do.
 */
539
static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
540 541 542
{
	struct Qdisc *root = qdisc_root(qdisc);

543
	ASSERT_RTNL();
544
	return qdisc_lock(root);
545 546
}

547
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
548 549 550 551 552 553 554
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return qdisc_lock(root);
}

555 556 557 558 559 560 561 562
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return &root->running;
}

563
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
564 565 566
{
	return qdisc->dev_queue->dev;
}
L
Linus Torvalds 已提交
567

568
static inline void sch_tree_lock(const struct Qdisc *q)
569
{
J
Jarek Poplawski 已提交
570
	spin_lock_bh(qdisc_root_sleeping_lock(q));
571 572
}

573
static inline void sch_tree_unlock(const struct Qdisc *q)
574
{
J
Jarek Poplawski 已提交
575
	spin_unlock_bh(qdisc_root_sleeping_lock(q));
576 577
}

578 579
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
580 581
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
P
Phil Sutter 已提交
582
extern struct Qdisc_ops noqueue_qdisc_ops;
583
extern const struct Qdisc_ops *default_qdisc_ops;
584 585 586 587 588 589
static inline const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device *dev, int ntx)
{
	return ntx < dev->real_num_tx_queues ?
			default_qdisc_ops : &pfifo_fast_ops;
}
590

E
Eric Dumazet 已提交
591
struct Qdisc_class_common {
592 593 594 595
	u32			classid;
	struct hlist_node	hnode;
};

E
Eric Dumazet 已提交
596
struct Qdisc_class_hash {
597 598 599 600 601 602 603 604 605 606 607 608 609 610
	struct hlist_head	*hash;
	unsigned int		hashsize;
	unsigned int		hashmask;
	unsigned int		hashelems;
};

static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
{
	id ^= id >> 8;
	id ^= id >> 4;
	return id & mask;
}

static inline struct Qdisc_class_common *
611
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
612 613 614 615
{
	struct Qdisc_class_common *cl;
	unsigned int h;

616 617 618
	if (!id)
		return NULL;

619
	h = qdisc_class_hash(id, hash->hashmask);
620
	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
621 622 623 624 625 626
		if (cl->classid == id)
			return cl;
	}
	return NULL;
}

627 628 629 630 631 632 633
static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
	u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;

	return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
}

634 635 636 637 638 639 640 641
int qdisc_class_hash_init(struct Qdisc_class_hash *);
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
			     struct Qdisc_class_common *);
void qdisc_class_hash_remove(struct Qdisc_class_hash *,
			     struct Qdisc_class_common *);
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);

642
int dev_qdisc_change_tx_queue_len(struct net_device *dev);
643 644 645 646 647 648 649 650
void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
void dev_deactivate(struct net_device *dev);
void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
			      struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
651
void qdisc_put(struct Qdisc *qdisc);
V
Vlad Buslov 已提交
652
void qdisc_put_unlocked(struct Qdisc *qdisc);
653
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
654 655 656
#ifdef CONFIG_NET_SCHED
int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
			      void *type_data);
657 658 659 660
void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
				struct Qdisc *new, struct Qdisc *old,
				enum tc_setup_type type, void *type_data,
				struct netlink_ext_ack *extack);
661 662 663 664 665 666 667 668
#else
static inline int
qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
			  void *type_data)
{
	q->flags &= ~TCQ_F_OFFLOADED;
	return 0;
}
669 670 671 672 673 674 675 676

static inline void
qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
			   struct Qdisc *new, struct Qdisc *old,
			   enum tc_setup_type type, void *type_data,
			   struct netlink_ext_ack *extack)
{
}
677
#endif
678
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
679 680
			  const struct Qdisc_ops *ops,
			  struct netlink_ext_ack *extack);
681
void qdisc_free(struct Qdisc *qdisc);
682
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
683 684
				const struct Qdisc_ops *ops, u32 parentid,
				struct netlink_ext_ack *extack);
685 686
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
			       const struct qdisc_size_table *stab);
687
int skb_do_redirect(struct sk_buff *);
L
Linus Torvalds 已提交
688

689 690 691
static inline void skb_reset_tc(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
692
	skb->tc_redirected = 0;
693 694 695
#endif
}

P
Paolo Abeni 已提交
696 697 698 699 700 701 702 703 704
static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
	return skb->tc_redirected;
#else
	return false;
#endif
}

705 706 707
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
708
	return skb->tc_at_ingress;
709 710 711 712 713
#else
	return false;
#endif
}

714 715 716 717 718 719 720 721 722 723 724
static inline bool skb_skip_tc_classify(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_skip_classify) {
		skb->tc_skip_classify = 0;
		return true;
	}
#endif
	return false;
}

725
/* Reset all TX qdiscs greater than index of a device.  */
726
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
727
{
728 729
	struct Qdisc *qdisc;

730
	for (; i < dev->num_tx_queues; i++) {
731
		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
732 733 734 735 736 737
		if (qdisc) {
			spin_lock_bh(qdisc_lock(qdisc));
			qdisc_reset(qdisc);
			spin_unlock_bh(qdisc_lock(qdisc));
		}
	}
738 739 740 741
}

static inline void qdisc_reset_all_tx(struct net_device *dev)
{
742
	qdisc_reset_all_tx_gt(dev, 0);
743 744
}

745 746 747
/* Are all TX queues of the device empty?  */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
748
	unsigned int i;
749 750

	rcu_read_lock();
751 752
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
753
		const struct Qdisc *q = rcu_dereference(txq->qdisc);
754

755
		if (!qdisc_is_empty(q)) {
756
			rcu_read_unlock();
757
			return false;
758
		}
759
	}
760
	rcu_read_unlock();
761
	return true;
762 763
}

764
/* Are any of the TX qdiscs changing?  */
765
static inline bool qdisc_tx_changing(const struct net_device *dev)
766
{
767
	unsigned int i;
768

769 770
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
771
		if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
772 773 774
			return true;
	}
	return false;
775 776
}

777
/* Is the device using the noop qdisc on all queues?  */
778 779
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
780
	unsigned int i;
781

782 783
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
784
		if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
785 786 787
			return false;
	}
	return true;
788 789
}

790
static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
791
{
792
	return qdisc_skb_cb(skb)->pkt_len;
793 794
}

795
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
796 797
enum net_xmit_qdisc_t {
	__NET_XMIT_STOLEN = 0x00010000,
798
	__NET_XMIT_BYPASS = 0x00020000,
799 800
};

801
#ifdef CONFIG_NET_CLS_ACT
802 803 804 805 806
#define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else
#define net_xmit_drop_count(e)	(1)
#endif

E
Eric Dumazet 已提交
807 808
static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
					   const struct Qdisc *sch)
809
{
810
#ifdef CONFIG_NET_SCHED
E
Eric Dumazet 已提交
811 812 813 814
	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);

	if (stab)
		__qdisc_calculate_pkt_len(skb, stab);
815
#endif
E
Eric Dumazet 已提交
816 817
}

818 819
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
				struct sk_buff **to_free)
E
Eric Dumazet 已提交
820 821
{
	qdisc_calculate_pkt_len(skb, sch);
822
	return sch->enqueue(skb, sch, to_free);
823 824
}

825 826 827 828 829 830 831
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
				  __u64 bytes, __u32 packets)
{
	bstats->bytes += bytes;
	bstats->packets += packets;
}

832 833 834
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
				 const struct sk_buff *skb)
{
835 836 837 838 839 840 841 842 843 844 845
	_bstats_update(bstats,
		       qdisc_pkt_len(skb),
		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}

static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
				      __u64 bytes, __u32 packets)
{
	u64_stats_update_begin(&bstats->syncp);
	_bstats_update(&bstats->bstats, bytes, packets);
	u64_stats_update_end(&bstats->syncp);
846 847
}

848 849
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
				     const struct sk_buff *skb)
850 851 852 853 854 855
{
	u64_stats_update_begin(&bstats->syncp);
	bstats_update(&bstats->bstats, skb);
	u64_stats_update_end(&bstats->syncp);
}

856 857 858 859 860 861
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
					   const struct sk_buff *skb)
{
	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
}

862 863
static inline void qdisc_bstats_update(struct Qdisc *sch,
				       const struct sk_buff *skb)
864
{
865
	bstats_update(&sch->bstats, skb);
866 867
}

868 869 870 871 872 873
static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
					    const struct sk_buff *skb)
{
	sch->qstats.backlog -= qdisc_pkt_len(skb);
}

874 875 876 877 878 879
static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
						const struct sk_buff *skb)
{
	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}

880 881 882 883 884 885
static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
					    const struct sk_buff *skb)
{
	sch->qstats.backlog += qdisc_pkt_len(skb);
}

886 887 888 889 890 891
static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
						const struct sk_buff *skb)
{
	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}

892
static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
893
{
894
	this_cpu_inc(sch->cpu_qstats->qlen);
895 896
}

897
static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
898
{
899
	this_cpu_dec(sch->cpu_qstats->qlen);
900 901 902 903 904 905 906
}

static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
{
	this_cpu_inc(sch->cpu_qstats->requeues);
}

907 908 909 910 911
static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
	sch->qstats.drops += count;
}

912
static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
913
{
914
	qstats->drops++;
915 916
}

917
static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
918
{
919 920
	qstats->overlimits++;
}
921

922 923 924 925 926 927 928
static inline void qdisc_qstats_drop(struct Qdisc *sch)
{
	qstats_drop_inc(&sch->qstats);
}

static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
929
	this_cpu_inc(sch->cpu_qstats->drops);
930 931
}

932 933 934 935 936
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
{
	sch->qstats.overlimits++;
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
{
	__u32 qlen = qdisc_qlen_sum(sch);

	return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
}

static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
					     __u32 *backlog)
{
	struct gnet_stats_queue qstats = { 0 };
	__u32 len = qdisc_qlen_sum(sch);

	__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
	*qlen = qstats.qlen;
	*backlog = qstats.backlog;
}

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
{
	__u32 qlen, backlog;

	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
	qdisc_tree_reduce_backlog(sch, qlen, backlog);
}

static inline void qdisc_purge_queue(struct Qdisc *sch)
{
	__u32 qlen, backlog;

	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
	qdisc_reset(sch);
	qdisc_tree_reduce_backlog(sch, qlen, backlog);
}

972 973 974 975 976 977 978
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
	qh->head = NULL;
	qh->tail = NULL;
	qh->qlen = 0;
}

979 980
static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
					struct qdisc_skb_head *qh)
981
{
982 983 984 985 986 987 988 989 990 991 992
	struct sk_buff *last = qh->tail;

	if (last) {
		skb->next = NULL;
		last->next = skb;
		qh->tail = skb;
	} else {
		qh->tail = skb;
		qh->head = skb;
	}
	qh->qlen++;
993 994 995 996
}

static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
997 998 999
	__qdisc_enqueue_tail(skb, &sch->q);
	qdisc_qstats_backlog_inc(sch, skb);
	return NET_XMIT_SUCCESS;
1000 1001
}

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static inline void __qdisc_enqueue_head(struct sk_buff *skb,
					struct qdisc_skb_head *qh)
{
	skb->next = qh->head;

	if (!qh->head)
		qh->tail = skb;
	qh->head = skb;
	qh->qlen++;
}

1013
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
1014
{
1015 1016 1017 1018 1019 1020 1021 1022 1023
	struct sk_buff *skb = qh->head;

	if (likely(skb != NULL)) {
		qh->head = skb->next;
		qh->qlen--;
		if (qh->head == NULL)
			qh->tail = NULL;
		skb->next = NULL;
	}
1024

1025 1026 1027 1028 1029 1030 1031
	return skb;
}

static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);

1032
	if (likely(skb != NULL)) {
1033
		qdisc_qstats_backlog_dec(sch, skb);
1034 1035
		qdisc_bstats_update(sch, skb);
	}
1036 1037 1038 1039

	return skb;
}

1040 1041 1042 1043 1044 1045 1046 1047 1048
/* Instead of calling kfree_skb() while root qdisc lock is held,
 * queue the skb for future freeing at end of __dev_xmit_skb()
 */
static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
{
	skb->next = *to_free;
	*to_free = skb;
}

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
static inline void __qdisc_drop_all(struct sk_buff *skb,
				    struct sk_buff **to_free)
{
	if (skb->prev)
		skb->prev->next = *to_free;
	else
		skb->next = *to_free;
	*to_free = skb;
}

1059
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
1060
						   struct qdisc_skb_head *qh,
1061
						   struct sk_buff **to_free)
1062
{
1063
	struct sk_buff *skb = __qdisc_dequeue_head(qh);
1064 1065 1066

	if (likely(skb != NULL)) {
		unsigned int len = qdisc_pkt_len(skb);
1067

1068
		qdisc_qstats_backlog_dec(sch, skb);
1069
		__qdisc_drop(skb, to_free);
1070 1071 1072 1073 1074 1075
		return len;
	}

	return 0;
}

1076 1077
static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
						 struct sk_buff **to_free)
1078
{
1079
	return __qdisc_queue_drop_head(sch, &sch->q, to_free);
1080 1081
}

1082 1083
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
1084 1085 1086
	const struct qdisc_skb_head *qh = &sch->q;

	return qh->head;
1087 1088
}

1089 1090 1091
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
1092 1093
	struct sk_buff *skb = skb_peek(&sch->gso_skb);

1094
	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
1095 1096 1097 1098 1099
	if (!skb) {
		skb = sch->dequeue(sch);

		if (skb) {
			__skb_queue_head(&sch->gso_skb, skb);
1100
			/* it's still part of the queue */
1101
			qdisc_qstats_backlog_inc(sch, skb);
1102
			sch->q.qlen++;
1103
		}
1104
	}
1105

1106
	return skb;
1107 1108
}

1109 1110 1111 1112 1113 1114
static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
						 struct sk_buff *skb)
{
	if (qdisc_is_percpu_stats(sch)) {
		qdisc_qstats_cpu_backlog_dec(sch, skb);
		qdisc_bstats_cpu_update(sch, skb);
1115
		qdisc_qstats_cpu_qlen_dec(sch);
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	} else {
		qdisc_qstats_backlog_dec(sch, skb);
		qdisc_bstats_update(sch, skb);
		sch->q.qlen--;
	}
}

static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
						 unsigned int pkt_len)
{
	if (qdisc_is_percpu_stats(sch)) {
1127
		qdisc_qstats_cpu_qlen_inc(sch);
1128 1129 1130 1131 1132 1133 1134
		this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
	} else {
		sch->qstats.backlog += pkt_len;
		sch->q.qlen++;
	}
}

1135 1136 1137
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
1138
	struct sk_buff *skb = skb_peek(&sch->gso_skb);
1139

1140
	if (skb) {
1141
		skb = __skb_dequeue(&sch->gso_skb);
1142 1143
		if (qdisc_is_percpu_stats(sch)) {
			qdisc_qstats_cpu_backlog_dec(sch, skb);
1144
			qdisc_qstats_cpu_qlen_dec(sch);
1145 1146 1147 1148
		} else {
			qdisc_qstats_backlog_dec(sch, skb);
			sch->q.qlen--;
		}
1149
	} else {
1150
		skb = sch->dequeue(sch);
1151
	}
1152 1153 1154 1155

	return skb;
}

1156
static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
1157 1158 1159 1160 1161
{
	/*
	 * We do not know the backlog in bytes of this list, it
	 * is up to the caller to correct it
	 */
1162 1163 1164 1165 1166 1167 1168
	ASSERT_RTNL();
	if (qh->qlen) {
		rtnl_kfree_skbs(qh->head, qh->tail);

		qh->head = NULL;
		qh->tail = NULL;
		qh->qlen = 0;
1169
	}
1170 1171 1172 1173
}

static inline void qdisc_reset_queue(struct Qdisc *sch)
{
1174
	__qdisc_reset_queue(&sch->q);
1175 1176 1177
	sch->qstats.backlog = 0;
}

1178 1179 1180 1181 1182 1183 1184 1185
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
					  struct Qdisc **pold)
{
	struct Qdisc *old;

	sch_tree_lock(sch);
	old = *pold;
	*pold = new;
1186 1187
	if (old != NULL)
		qdisc_tree_flush_backlog(old);
1188 1189 1190 1191 1192
	sch_tree_unlock(sch);

	return old;
}

1193 1194 1195 1196 1197 1198
static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
	rtnl_kfree_skbs(skb, skb);
	qdisc_qstats_drop(sch);
}

1199 1200 1201 1202 1203 1204 1205 1206
static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
				 struct sk_buff **to_free)
{
	__qdisc_drop(skb, to_free);
	qdisc_qstats_cpu_drop(sch);

	return NET_XMIT_DROP;
}
1207 1208 1209

static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
			     struct sk_buff **to_free)
1210
{
1211
	__qdisc_drop(skb, to_free);
1212
	qdisc_qstats_drop(sch);
1213 1214 1215 1216

	return NET_XMIT_DROP;
}

1217 1218 1219 1220 1221 1222 1223 1224 1225
static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
				 struct sk_buff **to_free)
{
	__qdisc_drop_all(skb, to_free);
	qdisc_qstats_drop(sch);

	return NET_XMIT_DROP;
}

1226 1227 1228 1229 1230
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
   long it will take to send a packet given its size.
 */
static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
{
1231 1232 1233
	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
	if (slot < 0)
		slot = 0;
1234 1235
	slot >>= rtab->rate.cell_log;
	if (slot > 255)
E
Eric Dumazet 已提交
1236
		return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
1237 1238 1239
	return rtab->data[slot];
}

1240
struct psched_ratecfg {
1241
	u64	rate_bytes_ps; /* bytes per second */
1242 1243
	u32	mult;
	u16	overhead;
1244
	u8	linklayer;
1245
	u8	shift;
1246 1247 1248 1249 1250
};

static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
				unsigned int len)
{
1251 1252 1253 1254 1255 1256
	len += r->overhead;

	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;

	return ((u64)len * r->mult) >> r->shift;
1257 1258
}

1259
void psched_ratecfg_precompute(struct psched_ratecfg *r,
1260 1261
			       const struct tc_ratespec *conf,
			       u64 rate64);
1262

1263 1264
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
					  const struct psched_ratecfg *r)
1265
{
1266
	memset(res, 0, sizeof(*res));
1267 1268 1269 1270 1271 1272 1273

	/* legacy struct tc_ratespec has a 32bit @rate field
	 * Qdisc using 64bit rate should add new attributes
	 * in order to maintain compatibility.
	 */
	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);

1274
	res->overhead = r->overhead;
1275
	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1276 1277
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
 * The fast path only needs to access filter list and to update stats
 */
struct mini_Qdisc {
	struct tcf_proto *filter_list;
	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
	struct gnet_stats_queue	__percpu *cpu_qstats;
	struct rcu_head rcu;
};

static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
						const struct sk_buff *skb)
{
	bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}

static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
{
	this_cpu_inc(miniq->cpu_qstats->drops);
}

struct mini_Qdisc_pair {
	struct mini_Qdisc miniq1;
	struct mini_Qdisc miniq2;
	struct mini_Qdisc __rcu **p_miniq;
};

void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
			  struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
			  struct mini_Qdisc __rcu **p_miniq);

P
Paolo Abeni 已提交
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
	struct gnet_stats_queue *stats = res->qstats;
	int ret;

	if (res->ingress)
		ret = netif_receive_skb(skb);
	else
		ret = dev_queue_xmit(skb);
	if (ret && stats)
		qstats_overlimit_inc(res->qstats);
}

L
Linus Torvalds 已提交
1323
#endif