sch_generic.h 25.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9
#ifndef __NET_SCHED_GENERIC_H
#define __NET_SCHED_GENERIC_H

#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/pkt_sched.h>
#include <linux/pkt_cls.h>
10
#include <linux/percpu.h>
11
#include <linux/dynamic_queue_limits.h>
12
#include <linux/list.h>
13
#include <linux/refcount.h>
14
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
15
#include <net/gen_stats.h>
16
#include <net/rtnetlink.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22

struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;

E
Eric Dumazet 已提交
23
struct qdisc_rate_table {
L
Linus Torvalds 已提交
24 25 26 27 28 29
	struct tc_ratespec rate;
	u32		data[256];
	struct qdisc_rate_table *next;
	int		refcnt;
};

E
Eric Dumazet 已提交
30
enum qdisc_state_t {
31
	__QDISC_STATE_SCHED,
32
	__QDISC_STATE_DEACTIVATED,
33 34
};

35
struct qdisc_size_table {
E
Eric Dumazet 已提交
36
	struct rcu_head		rcu;
37 38 39 40 41 42
	struct list_head	list;
	struct tc_sizespec	szopts;
	int			refcnt;
	u16			data[];
};

43 44 45 46 47 48 49 50
/* similar to sk_buff_head, but skb->prev pointer is undefined. */
struct qdisc_skb_head {
	struct sk_buff	*head;
	struct sk_buff	*tail;
	__u32		qlen;
	spinlock_t	lock;
};

E
Eric Dumazet 已提交
51
struct Qdisc {
52 53 54 55
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   struct sk_buff **to_free);
	struct sk_buff *	(*dequeue)(struct Qdisc *sch);
56
	unsigned int		flags;
57
#define TCQ_F_BUILTIN		1
58 59 60
#define TCQ_F_INGRESS		2
#define TCQ_F_CAN_BYPASS	4
#define TCQ_F_MQROOT		8
61 62 63 64 65 66 67
#define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
				      * q->dev_queue : It can test
				      * netif_xmit_frozen_or_stopped() before
				      * dequeueing next packet.
				      * Its true for MQ/MQPRIO slaves, or non
				      * multiqueue device.
				      */
68
#define TCQ_F_WARN_NONWC	(1 << 16)
69
#define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
70 71 72
#define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
				      * qdisc_tree_decrease_qlen() should stop.
				      */
73
#define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
74
#define TCQ_F_NOLOCK		0x100 /* qdisc does not require locking */
Y
Yuval Mintz 已提交
75
#define TCQ_F_OFFLOADED		0x200 /* qdisc is offloaded to HW */
76
	u32			limit;
77
	const struct Qdisc_ops	*ops;
E
Eric Dumazet 已提交
78
	struct qdisc_size_table	__rcu *stab;
79
	struct hlist_node       hash;
L
Linus Torvalds 已提交
80 81
	u32			handle;
	u32			parent;
82

83 84
	struct netdev_queue	*dev_queue;

85
	struct net_rate_estimator __rcu *rate_est;
86 87 88
	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
	struct gnet_stats_queue	__percpu *cpu_qstats;

89 90 91
	/*
	 * For performance sake on SMP, we put highly modified fields at the end
	 */
92
	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
93
	struct qdisc_skb_head	q;
94
	struct gnet_stats_basic_packed bstats;
95
	seqcount_t		running;
96
	struct gnet_stats_queue	qstats;
97 98
	unsigned long		state;
	struct Qdisc            *next_sched;
99
	struct sk_buff_head	skb_bad_txq;
100
	int			padded;
101
	refcount_t		refcnt;
102 103

	spinlock_t		busylock ____cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
104 105
};

106 107 108 109 110 111 112
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
{
	if (qdisc->flags & TCQ_F_BUILTIN)
		return;
	refcount_inc(&qdisc->refcnt);
}

113
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
114
{
115
	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
116 117 118 119
}

static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
120 121
	if (qdisc_is_running(qdisc))
		return false;
122 123 124 125 126
	/* Variant of write_seqcount_begin() telling lockdep a trylock
	 * was attempted.
	 */
	raw_write_seqcount_begin(&qdisc->running);
	seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
127
	return true;
128 129 130 131
}

static inline void qdisc_run_end(struct Qdisc *qdisc)
{
132
	write_seqcount_end(&qdisc->running);
133 134
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
{
	return qdisc->flags & TCQ_F_ONETXQUEUE;
}

static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
#ifdef CONFIG_BQL
	/* Non-BQL migrated drivers will return 0, too. */
	return dql_avail(&txq->dql);
#else
	return 0;
#endif
}

E
Eric Dumazet 已提交
150
struct Qdisc_class_ops {
L
Linus Torvalds 已提交
151
	/* Child qdisc manipulation */
152
	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
L
Linus Torvalds 已提交
153
	int			(*graft)(struct Qdisc *, unsigned long cl,
154 155
					struct Qdisc *, struct Qdisc **,
					struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
156
	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
157
	void			(*qlen_notify)(struct Qdisc *, unsigned long);
L
Linus Torvalds 已提交
158 159

	/* Class manipulation routines */
160
	unsigned long		(*find)(struct Qdisc *, u32 classid);
L
Linus Torvalds 已提交
161
	int			(*change)(struct Qdisc *, u32, u32,
162 163
					struct nlattr **, unsigned long *,
					struct netlink_ext_ack *);
L
Linus Torvalds 已提交
164 165 166 167
	int			(*delete)(struct Qdisc *, unsigned long);
	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);

	/* Filter manipulation */
168
	struct tcf_block *	(*tcf_block)(struct Qdisc *sch,
169 170
					     unsigned long arg,
					     struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
171 172 173 174 175 176 177 178 179 180 181
	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
					u32 classid);
	void			(*unbind_tcf)(struct Qdisc *, unsigned long);

	/* rtnetlink specific */
	int			(*dump)(struct Qdisc *, unsigned long,
					struct sk_buff *skb, struct tcmsg*);
	int			(*dump_stats)(struct Qdisc *, unsigned long,
					struct gnet_dump *);
};

E
Eric Dumazet 已提交
182
struct Qdisc_ops {
L
Linus Torvalds 已提交
183
	struct Qdisc_ops	*next;
184
	const struct Qdisc_class_ops	*cl_ops;
L
Linus Torvalds 已提交
185 186
	char			id[IFNAMSIZ];
	int			priv_size;
187
	unsigned int		static_flags;
L
Linus Torvalds 已提交
188

189 190 191
	int 			(*enqueue)(struct sk_buff *skb,
					   struct Qdisc *sch,
					   struct sk_buff **to_free);
L
Linus Torvalds 已提交
192
	struct sk_buff *	(*dequeue)(struct Qdisc *);
193
	struct sk_buff *	(*peek)(struct Qdisc *);
L
Linus Torvalds 已提交
194

195 196
	int			(*init)(struct Qdisc *sch, struct nlattr *arg,
					struct netlink_ext_ack *extack);
L
Linus Torvalds 已提交
197 198
	void			(*reset)(struct Qdisc *);
	void			(*destroy)(struct Qdisc *);
199
	int			(*change)(struct Qdisc *sch,
200 201
					  struct nlattr *arg,
					  struct netlink_ext_ack *extack);
202
	void			(*attach)(struct Qdisc *sch);
L
Linus Torvalds 已提交
203 204 205 206

	int			(*dump)(struct Qdisc *, struct sk_buff *);
	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);

207 208 209 210 211 212 213
	void			(*ingress_block_set)(struct Qdisc *sch,
						     u32 block_index);
	void			(*egress_block_set)(struct Qdisc *sch,
						    u32 block_index);
	u32			(*ingress_block_get)(struct Qdisc *sch);
	u32			(*egress_block_get)(struct Qdisc *sch);

L
Linus Torvalds 已提交
214 215 216 217
	struct module		*owner;
};


E
Eric Dumazet 已提交
218
struct tcf_result {
219 220 221 222 223 224 225
	union {
		struct {
			unsigned long	class;
			u32		classid;
		};
		const struct tcf_proto *goto_tp;
	};
L
Linus Torvalds 已提交
226 227
};

E
Eric Dumazet 已提交
228
struct tcf_proto_ops {
229
	struct list_head	head;
L
Linus Torvalds 已提交
230 231
	char			kind[IFNAMSIZ];

232 233 234
	int			(*classify)(struct sk_buff *,
					    const struct tcf_proto *,
					    struct tcf_result *);
L
Linus Torvalds 已提交
235
	int			(*init)(struct tcf_proto*);
236
	void			(*destroy)(struct tcf_proto*);
L
Linus Torvalds 已提交
237

238
	void*			(*get)(struct tcf_proto*, u32 handle);
239
	int			(*change)(struct net *net, struct sk_buff *,
240
					struct tcf_proto*, unsigned long,
241
					u32 handle, struct nlattr **,
242
					void **, bool);
243 244
	int			(*delete)(struct tcf_proto *tp, void *arg,
					  bool *last);
L
Linus Torvalds 已提交
245
	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);
246
	void			(*bind_class)(void *, u32, unsigned long);
L
Linus Torvalds 已提交
247 248

	/* rtnetlink specific */
249
	int			(*dump)(struct net*, struct tcf_proto*, void *,
L
Linus Torvalds 已提交
250 251 252 253 254
					struct sk_buff *skb, struct tcmsg*);

	struct module		*owner;
};

E
Eric Dumazet 已提交
255
struct tcf_proto {
L
Linus Torvalds 已提交
256
	/* Fast access part */
J
John Fastabend 已提交
257 258
	struct tcf_proto __rcu	*next;
	void __rcu		*root;
259 260 261
	int			(*classify)(struct sk_buff *,
					    const struct tcf_proto *,
					    struct tcf_result *);
A
Al Viro 已提交
262
	__be16			protocol;
L
Linus Torvalds 已提交
263 264 265 266

	/* All the rest */
	u32			prio;
	void			*data;
267
	const struct tcf_proto_ops	*ops;
268
	struct tcf_chain	*chain;
J
John Fastabend 已提交
269
	struct rcu_head		rcu;
L
Linus Torvalds 已提交
270 271
};

272 273
struct qdisc_skb_cb {
	unsigned int		pkt_len;
274
	u16			slave_dev_queue_mapping;
275
	u16			tc_classid;
276 277
#define QDISC_CB_PRIV_LEN 20
	unsigned char		data[QDISC_CB_PRIV_LEN];
278 279
};

280 281
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);

282 283
struct tcf_chain {
	struct tcf_proto __rcu *filter_chain;
284
	struct list_head filter_chain_list;
285 286 287 288
	struct list_head list;
	struct tcf_block *block;
	u32 index; /* chain index */
	unsigned int refcnt;
289 290
};

291
struct tcf_block {
292
	struct list_head chain_list;
293 294
	u32 index; /* block index for shared blocks */
	unsigned int refcnt;
295
	struct net *net;
296
	struct Qdisc *q;
297
	struct list_head cb_list;
298 299
	struct list_head owner_list;
	bool keep_dst;
300 301
	unsigned int offloadcnt; /* Number of oddloaded filters */
	unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
302 303
};

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
	if (*flags & TCA_CLS_FLAGS_IN_HW)
		return;
	*flags |= TCA_CLS_FLAGS_IN_HW;
	block->offloadcnt++;
}

static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
{
	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
		return;
	*flags &= ~TCA_CLS_FLAGS_IN_HW;
	block->offloadcnt--;
}

320 321 322
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
	struct qdisc_skb_cb *qcb;
323 324

	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
325 326 327
	BUILD_BUG_ON(sizeof(qcb->data) < sz);
}

328 329 330 331 332
static inline int qdisc_qlen_cpu(const struct Qdisc *q)
{
	return this_cpu_ptr(q->cpu_qstats)->qlen;
}

333
static inline int qdisc_qlen(const struct Qdisc *q)
334 335 336 337
{
	return q->q.qlen;
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
static inline int qdisc_qlen_sum(const struct Qdisc *q)
{
	__u32 qlen = 0;
	int i;

	if (q->flags & TCQ_F_NOLOCK) {
		for_each_possible_cpu(i)
			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
	} else {
		qlen = q->q.qlen;
	}

	return qlen;
}

353
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
354 355 356 357
{
	return (struct qdisc_skb_cb *)skb->cb;
}

358 359 360 361 362
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
{
	return &qdisc->q.lock;
}

363
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
364
{
365 366 367
	struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);

	return q;
368 369
}

370
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
371 372 373 374
{
	return qdisc->dev_queue->qdisc_sleeping;
}

375 376 377 378 379 380 381 382 383 384 385
/* The qdisc root lock is a mechanism by which to top level
 * of a qdisc tree can be locked from any qdisc node in the
 * forest.  This allows changing the configuration of some
 * aspect of the qdisc tree while blocking out asynchronous
 * qdisc access in the packet processing paths.
 *
 * It is only legal to do this when the root will not change
 * on us.  Otherwise we'll potentially lock the wrong qdisc
 * root.  This is enforced by holding the RTNL semaphore, which
 * all users of this lock accessor must do.
 */
386
static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
387 388 389
{
	struct Qdisc *root = qdisc_root(qdisc);

390
	ASSERT_RTNL();
391
	return qdisc_lock(root);
392 393
}

394
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
395 396 397 398 399 400 401
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return qdisc_lock(root);
}

402 403 404 405 406 407 408 409
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
{
	struct Qdisc *root = qdisc_root_sleeping(qdisc);

	ASSERT_RTNL();
	return &root->running;
}

410
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
411 412 413
{
	return qdisc->dev_queue->dev;
}
L
Linus Torvalds 已提交
414

415
static inline void sch_tree_lock(const struct Qdisc *q)
416
{
J
Jarek Poplawski 已提交
417
	spin_lock_bh(qdisc_root_sleeping_lock(q));
418 419
}

420
static inline void sch_tree_unlock(const struct Qdisc *q)
421
{
J
Jarek Poplawski 已提交
422
	spin_unlock_bh(qdisc_root_sleeping_lock(q));
423 424
}

425 426
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
427 428
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
P
Phil Sutter 已提交
429
extern struct Qdisc_ops noqueue_qdisc_ops;
430
extern const struct Qdisc_ops *default_qdisc_ops;
431 432 433 434 435 436
static inline const struct Qdisc_ops *
get_default_qdisc_ops(const struct net_device *dev, int ntx)
{
	return ntx < dev->real_num_tx_queues ?
			default_qdisc_ops : &pfifo_fast_ops;
}
437

E
Eric Dumazet 已提交
438
struct Qdisc_class_common {
439 440 441 442
	u32			classid;
	struct hlist_node	hnode;
};

E
Eric Dumazet 已提交
443
struct Qdisc_class_hash {
444 445 446 447 448 449 450 451 452 453 454 455 456 457
	struct hlist_head	*hash;
	unsigned int		hashsize;
	unsigned int		hashmask;
	unsigned int		hashelems;
};

static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
{
	id ^= id >> 8;
	id ^= id >> 4;
	return id & mask;
}

static inline struct Qdisc_class_common *
458
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
459 460 461 462
{
	struct Qdisc_class_common *cl;
	unsigned int h;

463 464 465
	if (!id)
		return NULL;

466
	h = qdisc_class_hash(id, hash->hashmask);
467
	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
468 469 470 471 472 473
		if (cl->classid == id)
			return cl;
	}
	return NULL;
}

474 475 476 477 478 479 480
static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
	u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;

	return (hwtc < netdev_get_num_tc(dev)) ? hwtc : -EINVAL;
}

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
int qdisc_class_hash_init(struct Qdisc_class_hash *);
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
			     struct Qdisc_class_common *);
void qdisc_class_hash_remove(struct Qdisc_class_hash *,
			     struct Qdisc_class_common *);
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);

void dev_init_scheduler(struct net_device *dev);
void dev_shutdown(struct net_device *dev);
void dev_activate(struct net_device *dev);
void dev_deactivate(struct net_device *dev);
void dev_deactivate_many(struct list_head *head);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
			      struct Qdisc *qdisc);
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_destroy(struct Qdisc *qdisc);
498 499
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
			       unsigned int len);
500
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
501 502
			  const struct Qdisc_ops *ops,
			  struct netlink_ext_ack *extack);
503
void qdisc_free(struct Qdisc *qdisc);
504
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
505 506
				const struct Qdisc_ops *ops, u32 parentid,
				struct netlink_ext_ack *extack);
507 508
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
			       const struct qdisc_size_table *stab);
509
int skb_do_redirect(struct sk_buff *);
L
Linus Torvalds 已提交
510

511 512 513
static inline void skb_reset_tc(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
514
	skb->tc_redirected = 0;
515 516 517
#endif
}

518 519 520
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
521
	return skb->tc_at_ingress;
522 523 524 525 526
#else
	return false;
#endif
}

527 528 529 530 531 532 533 534 535 536 537
static inline bool skb_skip_tc_classify(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
	if (skb->tc_skip_classify) {
		skb->tc_skip_classify = 0;
		return true;
	}
#endif
	return false;
}

538 539
/* Reset all TX qdiscs greater then index of a device.  */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
540
{
541 542
	struct Qdisc *qdisc;

543
	for (; i < dev->num_tx_queues; i++) {
544
		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
545 546 547 548 549 550
		if (qdisc) {
			spin_lock_bh(qdisc_lock(qdisc));
			qdisc_reset(qdisc);
			spin_unlock_bh(qdisc_lock(qdisc));
		}
	}
551 552 553 554
}

static inline void qdisc_reset_all_tx(struct net_device *dev)
{
555
	qdisc_reset_all_tx_gt(dev, 0);
556 557
}

558 559 560
/* Are all TX queues of the device empty?  */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
561
	unsigned int i;
562 563

	rcu_read_lock();
564 565
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
566
		const struct Qdisc *q = rcu_dereference(txq->qdisc);
567

568 569
		if (q->q.qlen) {
			rcu_read_unlock();
570
			return false;
571
		}
572
	}
573
	rcu_read_unlock();
574
	return true;
575 576
}

577
/* Are any of the TX qdiscs changing?  */
578
static inline bool qdisc_tx_changing(const struct net_device *dev)
579
{
580
	unsigned int i;
581

582 583
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
584
		if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
585 586 587
			return true;
	}
	return false;
588 589
}

590
/* Is the device using the noop qdisc on all queues?  */
591 592
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
593
	unsigned int i;
594

595 596
	for (i = 0; i < dev->num_tx_queues; i++) {
		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
597
		if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
598 599 600
			return false;
	}
	return true;
601 602
}

603
static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
604
{
605
	return qdisc_skb_cb(skb)->pkt_len;
606 607
}

608
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
609 610
enum net_xmit_qdisc_t {
	__NET_XMIT_STOLEN = 0x00010000,
611
	__NET_XMIT_BYPASS = 0x00020000,
612 613
};

614
#ifdef CONFIG_NET_CLS_ACT
615 616 617 618 619
#define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
#else
#define net_xmit_drop_count(e)	(1)
#endif

E
Eric Dumazet 已提交
620 621
static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
					   const struct Qdisc *sch)
622
{
623
#ifdef CONFIG_NET_SCHED
E
Eric Dumazet 已提交
624 625 626 627
	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);

	if (stab)
		__qdisc_calculate_pkt_len(skb, stab);
628
#endif
E
Eric Dumazet 已提交
629 630
}

631 632
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
				struct sk_buff **to_free)
E
Eric Dumazet 已提交
633 634
{
	qdisc_calculate_pkt_len(skb, sch);
635
	return sch->enqueue(skb, sch, to_free);
636 637
}

638 639 640 641
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
	return q->flags & TCQ_F_CPUSTATS;
}
642

643 644 645 646 647 648 649
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
				  __u64 bytes, __u32 packets)
{
	bstats->bytes += bytes;
	bstats->packets += packets;
}

650 651 652
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
				 const struct sk_buff *skb)
{
653 654 655 656 657 658 659 660 661 662 663
	_bstats_update(bstats,
		       qdisc_pkt_len(skb),
		       skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
}

static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
				      __u64 bytes, __u32 packets)
{
	u64_stats_update_begin(&bstats->syncp);
	_bstats_update(&bstats->bstats, bytes, packets);
	u64_stats_update_end(&bstats->syncp);
664 665
}

666 667
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
				     const struct sk_buff *skb)
668 669 670 671 672 673
{
	u64_stats_update_begin(&bstats->syncp);
	bstats_update(&bstats->bstats, skb);
	u64_stats_update_end(&bstats->syncp);
}

674 675 676 677 678 679
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
					   const struct sk_buff *skb)
{
	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
}

680 681
static inline void qdisc_bstats_update(struct Qdisc *sch,
				       const struct sk_buff *skb)
682
{
683
	bstats_update(&sch->bstats, skb);
684 685
}

686 687 688 689 690 691
static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
					    const struct sk_buff *skb)
{
	sch->qstats.backlog -= qdisc_pkt_len(skb);
}

692 693 694 695 696 697
static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
						const struct sk_buff *skb)
{
	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}

698 699 700 701 702 703
static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
					    const struct sk_buff *skb)
{
	sch->qstats.backlog += qdisc_pkt_len(skb);
}

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
						const struct sk_buff *skb)
{
	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}

static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
{
	this_cpu_inc(sch->cpu_qstats->qlen);
}

static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
{
	this_cpu_dec(sch->cpu_qstats->qlen);
}

static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
{
	this_cpu_inc(sch->cpu_qstats->requeues);
}

725 726 727 728 729
static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
	sch->qstats.drops += count;
}

730
static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
731
{
732
	qstats->drops++;
733 734
}

735
static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
736
{
737 738
	qstats->overlimits++;
}
739

740 741 742 743 744 745 746
static inline void qdisc_qstats_drop(struct Qdisc *sch)
{
	qstats_drop_inc(&sch->qstats);
}

static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
{
747
	this_cpu_inc(sch->cpu_qstats->drops);
748 749
}

750 751 752 753 754
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
{
	sch->qstats.overlimits++;
}

755 756 757 758 759 760 761
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
	qh->head = NULL;
	qh->tail = NULL;
	qh->qlen = 0;
}

762
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
763
				       struct qdisc_skb_head *qh)
764
{
765 766 767 768 769 770 771 772 773 774 775
	struct sk_buff *last = qh->tail;

	if (last) {
		skb->next = NULL;
		last->next = skb;
		qh->tail = skb;
	} else {
		qh->tail = skb;
		qh->head = skb;
	}
	qh->qlen++;
776
	qdisc_qstats_backlog_inc(sch, skb);
777 778 779 780 781 782 783 784 785

	return NET_XMIT_SUCCESS;
}

static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
	return __qdisc_enqueue_tail(skb, sch, &sch->q);
}

786
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
787
{
788 789 790 791 792 793 794 795 796
	struct sk_buff *skb = qh->head;

	if (likely(skb != NULL)) {
		qh->head = skb->next;
		qh->qlen--;
		if (qh->head == NULL)
			qh->tail = NULL;
		skb->next = NULL;
	}
797

798 799 800 801 802 803 804
	return skb;
}

static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);

805
	if (likely(skb != NULL)) {
806
		qdisc_qstats_backlog_dec(sch, skb);
807 808
		qdisc_bstats_update(sch, skb);
	}
809 810 811 812

	return skb;
}

813 814 815 816 817 818 819 820 821
/* Instead of calling kfree_skb() while root qdisc lock is held,
 * queue the skb for future freeing at end of __dev_xmit_skb()
 */
static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
{
	skb->next = *to_free;
	*to_free = skb;
}

822
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
823
						   struct qdisc_skb_head *qh,
824
						   struct sk_buff **to_free)
825
{
826
	struct sk_buff *skb = __qdisc_dequeue_head(qh);
827 828 829

	if (likely(skb != NULL)) {
		unsigned int len = qdisc_pkt_len(skb);
830

831
		qdisc_qstats_backlog_dec(sch, skb);
832
		__qdisc_drop(skb, to_free);
833 834 835 836 837 838
		return len;
	}

	return 0;
}

839 840
static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
						 struct sk_buff **to_free)
841
{
842
	return __qdisc_queue_drop_head(sch, &sch->q, to_free);
843 844
}

845 846
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
847 848 849
	const struct qdisc_skb_head *qh = &sch->q;

	return qh->head;
850 851
}

852 853 854
/* generic pseudo peek method for non-work-conserving qdisc */
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
{
855 856
	struct sk_buff *skb = skb_peek(&sch->gso_skb);

857
	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
858 859 860 861 862
	if (!skb) {
		skb = sch->dequeue(sch);

		if (skb) {
			__skb_queue_head(&sch->gso_skb, skb);
863
			/* it's still part of the queue */
864
			qdisc_qstats_backlog_inc(sch, skb);
865
			sch->q.qlen++;
866
		}
867
	}
868

869
	return skb;
870 871 872 873 874
}

/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
875
	struct sk_buff *skb = skb_peek(&sch->gso_skb);
876

877
	if (skb) {
878
		skb = __skb_dequeue(&sch->gso_skb);
879
		qdisc_qstats_backlog_dec(sch, skb);
880 881
		sch->q.qlen--;
	} else {
882
		skb = sch->dequeue(sch);
883
	}
884 885 886 887

	return skb;
}

888
static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
889 890 891 892 893
{
	/*
	 * We do not know the backlog in bytes of this list, it
	 * is up to the caller to correct it
	 */
894 895 896 897 898 899 900
	ASSERT_RTNL();
	if (qh->qlen) {
		rtnl_kfree_skbs(qh->head, qh->tail);

		qh->head = NULL;
		qh->tail = NULL;
		qh->qlen = 0;
901
	}
902 903 904 905
}

static inline void qdisc_reset_queue(struct Qdisc *sch)
{
906
	__qdisc_reset_queue(&sch->q);
907 908 909
	sch->qstats.backlog = 0;
}

910 911 912 913 914 915 916 917 918
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
					  struct Qdisc **pold)
{
	struct Qdisc *old;

	sch_tree_lock(sch);
	old = *pold;
	*pold = new;
	if (old != NULL) {
919 920 921
		unsigned int qlen = old->q.qlen;
		unsigned int backlog = old->qstats.backlog;

922
		qdisc_reset(old);
923
		qdisc_tree_reduce_backlog(old, qlen, backlog);
924 925 926 927 928 929
	}
	sch_tree_unlock(sch);

	return old;
}

930 931 932 933 934 935
static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{
	rtnl_kfree_skbs(skb, skb);
	qdisc_qstats_drop(sch);
}

936 937 938 939 940 941 942 943
static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
				 struct sk_buff **to_free)
{
	__qdisc_drop(skb, to_free);
	qdisc_qstats_cpu_drop(sch);

	return NET_XMIT_DROP;
}
944 945 946

static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
			     struct sk_buff **to_free)
947
{
948
	__qdisc_drop(skb, to_free);
949
	qdisc_qstats_drop(sch);
950 951 952 953

	return NET_XMIT_DROP;
}

954 955 956 957 958
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
   long it will take to send a packet given its size.
 */
static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
{
959 960 961
	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
	if (slot < 0)
		slot = 0;
962 963
	slot >>= rtab->rate.cell_log;
	if (slot > 255)
E
Eric Dumazet 已提交
964
		return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
965 966 967
	return rtab->data[slot];
}

968
struct psched_ratecfg {
969
	u64	rate_bytes_ps; /* bytes per second */
970 971
	u32	mult;
	u16	overhead;
972
	u8	linklayer;
973
	u8	shift;
974 975 976 977 978
};

static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
				unsigned int len)
{
979 980 981 982 983 984
	len += r->overhead;

	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;

	return ((u64)len * r->mult) >> r->shift;
985 986
}

987
void psched_ratecfg_precompute(struct psched_ratecfg *r,
988 989
			       const struct tc_ratespec *conf,
			       u64 rate64);
990

991 992
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
					  const struct psched_ratecfg *r)
993
{
994
	memset(res, 0, sizeof(*res));
995 996 997 998 999 1000 1001

	/* legacy struct tc_ratespec has a 32bit @rate field
	 * Qdisc using 64bit rate should add new attributes
	 * in order to maintain compatibility.
	 */
	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);

1002
	res->overhead = r->overhead;
1003
	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
1004 1005
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
/* Mini Qdisc serves for specific needs of ingress/clsact Qdisc.
 * The fast path only needs to access filter list and to update stats
 */
struct mini_Qdisc {
	struct tcf_proto *filter_list;
	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
	struct gnet_stats_queue	__percpu *cpu_qstats;
	struct rcu_head rcu;
};

static inline void mini_qdisc_bstats_cpu_update(struct mini_Qdisc *miniq,
						const struct sk_buff *skb)
{
	bstats_cpu_update(this_cpu_ptr(miniq->cpu_bstats), skb);
}

static inline void mini_qdisc_qstats_cpu_drop(struct mini_Qdisc *miniq)
{
	this_cpu_inc(miniq->cpu_qstats->drops);
}

struct mini_Qdisc_pair {
	struct mini_Qdisc miniq1;
	struct mini_Qdisc miniq2;
	struct mini_Qdisc __rcu **p_miniq;
};

void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
			  struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
			  struct mini_Qdisc __rcu **p_miniq);

L
Linus Torvalds 已提交
1038
#endif