pkt_cls.h 22.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5
#ifndef __NET_PKT_CLS_H
#define __NET_PKT_CLS_H

#include <linux/pkt_cls.h>
6
#include <linux/workqueue.h>
L
Linus Torvalds 已提交
7 8
#include <net/sch_generic.h>
#include <net/act_api.h>
9
#include <net/flow_offload.h>
L
Linus Torvalds 已提交
10

P
Paolo Abeni 已提交
11 12 13
/* TC action not accessible from user space */
#define TC_ACT_REINSERT		(TC_ACT_VALUE_MAX + 1)

L
Linus Torvalds 已提交
14 15
/* Basic packet classifier frontend definitions. */

E
Eric Dumazet 已提交
16
struct tcf_walker {
L
Linus Torvalds 已提交
17 18 19
	int	stop;
	int	skip;
	int	count;
20
	unsigned long cookie;
21
	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
L
Linus Torvalds 已提交
22 23
};

24 25
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
L
Linus Torvalds 已提交
26

27 28
enum tcf_block_binder_type {
	TCF_BLOCK_BINDER_TYPE_UNSPEC,
29 30
	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
31 32 33 34
};

struct tcf_block_ext_info {
	enum tcf_block_binder_type binder_type;
35 36
	tcf_chain_head_change_t *chain_head_change;
	void *chain_head_change_priv;
37
	u32 block_index;
38 39
};

40
struct tcf_block_cb;
C
Cong Wang 已提交
41
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
42

43
#ifdef CONFIG_NET_CLS
44 45 46
struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
				       u32 chain_index);
void tcf_chain_put_by_act(struct tcf_chain *chain);
47 48
struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
				     struct tcf_chain *chain);
49
void tcf_block_netif_keep_dst(struct tcf_block *block);
50
int tcf_block_get(struct tcf_block **p_block,
51 52
		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
		  struct netlink_ext_ack *extack);
53
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
54 55
		      struct tcf_block_ext_info *ei,
		      struct netlink_ext_ack *extack);
56
void tcf_block_put(struct tcf_block *block);
57
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
58
		       struct tcf_block_ext_info *ei);
59

60 61 62 63 64
static inline bool tcf_block_shared(struct tcf_block *block)
{
	return block->index;
}

65 66
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
67
	WARN_ON(tcf_block_shared(block));
68 69 70
	return block->q;
}

71 72 73 74 75 76 77
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
					 tc_setup_cb_t *cb, void *cb_ident);
void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
					     tc_setup_cb_t *cb, void *cb_ident,
78 79
					     void *cb_priv,
					     struct netlink_ext_ack *extack);
80 81
int tcf_block_cb_register(struct tcf_block *block,
			  tc_setup_cb_t *cb, void *cb_ident,
82
			  void *cb_priv, struct netlink_ext_ack *extack);
83 84
void __tcf_block_cb_unregister(struct tcf_block *block,
			       struct tcf_block_cb *block_cb);
85 86
void tcf_block_cb_unregister(struct tcf_block *block,
			     tc_setup_cb_t *cb, void *cb_ident);
87 88 89 90 91 92 93 94
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
				tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
			      tc_indr_block_bind_cb_t *cb, void *cb_ident);
void __tc_indr_block_cb_unregister(struct net_device *dev,
				   tc_indr_block_bind_cb_t *cb, void *cb_ident);
void tc_indr_block_cb_unregister(struct net_device *dev,
				 tc_indr_block_bind_cb_t *cb, void *cb_ident);
95

96 97 98
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
		 struct tcf_result *res, bool compat_mode);

99
#else
100 101
static inline
int tcf_block_get(struct tcf_block **p_block,
S
Sudip Mukherjee 已提交
102 103
		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
		  struct netlink_ext_ack *extack)
104 105 106 107
{
	return 0;
}

108
static inline
109
int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
110 111
		      struct tcf_block_ext_info *ei,
		      struct netlink_ext_ack *extack)
112 113 114 115
{
	return 0;
}

116
static inline void tcf_block_put(struct tcf_block *block)
117 118
{
}
119

120
static inline
121
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
122 123 124 125
		       struct tcf_block_ext_info *ei)
{
}

126 127 128 129 130
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
	return NULL;
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
static inline
int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
			       void *cb_priv)
{
	return 0;
}

static inline
void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
				  void *cb_priv)
{
}

static inline
void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
{
	return NULL;
}

static inline
struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
					 tc_setup_cb_t *cb, void *cb_ident)
{
	return NULL;
}

static inline
void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
{
}

static inline
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
{
	return 0;
}

static inline
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
					     tc_setup_cb_t *cb, void *cb_ident,
171 172
					     void *cb_priv,
					     struct netlink_ext_ack *extack)
173 174 175 176 177 178 179
{
	return NULL;
}

static inline
int tcf_block_cb_register(struct tcf_block *block,
			  tc_setup_cb_t *cb, void *cb_ident,
180
			  void *cb_priv, struct netlink_ext_ack *extack)
181 182 183 184 185
{
	return 0;
}

static inline
186 187
void __tcf_block_cb_unregister(struct tcf_block *block,
			       struct tcf_block_cb *block_cb)
188 189 190 191 192 193 194 195 196
{
}

static inline
void tcf_block_cb_unregister(struct tcf_block *block,
			     tc_setup_cb_t *cb, void *cb_ident)
{
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static inline
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
				tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
	return 0;
}

static inline
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
	return 0;
}

static inline
void __tc_indr_block_cb_unregister(struct net_device *dev,
				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
}

static inline
void tc_indr_block_cb_unregister(struct net_device *dev,
				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
}

223 224 225 226 227
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
			       struct tcf_result *res, bool compat_mode)
{
	return TC_ACT_UNSPEC;
}
228
#endif
229

L
Linus Torvalds 已提交
230 231 232
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
{
233
	return xchg(clp, cl);
L
Linus Torvalds 已提交
234 235 236
}

static inline unsigned long
237
cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
L
Linus Torvalds 已提交
238 239
{
	unsigned long old_cl;
240 241

	sch_tree_lock(q);
L
Linus Torvalds 已提交
242
	old_cl = __cls_set_class(clp, cl);
243
	sch_tree_unlock(q);
L
Linus Torvalds 已提交
244 245 246 247 248 249
	return old_cl;
}

static inline void
tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
{
250
	struct Qdisc *q = tp->chain->block->q;
L
Linus Torvalds 已提交
251 252
	unsigned long cl;

253 254 255 256 257 258 259
	/* Check q as it is not set for shared blocks. In that case,
	 * setting class is not supported.
	 */
	if (!q)
		return;
	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
	cl = cls_set_class(q, &r->class, cl);
L
Linus Torvalds 已提交
260
	if (cl)
261
		q->ops->cl_ops->unbind_tcf(q, cl);
L
Linus Torvalds 已提交
262 263 264 265 266
}

static inline void
tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
{
267
	struct Qdisc *q = tp->chain->block->q;
L
Linus Torvalds 已提交
268 269
	unsigned long cl;

270 271
	if (!q)
		return;
L
Linus Torvalds 已提交
272
	if ((cl = __cls_set_class(&r->class, 0)) != 0)
273
		q->ops->cl_ops->unbind_tcf(q, cl);
L
Linus Torvalds 已提交
274 275
}

E
Eric Dumazet 已提交
276
struct tcf_exts {
L
Linus Torvalds 已提交
277
#ifdef CONFIG_NET_CLS_ACT
278
	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
279 280
	int nr_actions;
	struct tc_action **actions;
281
	struct net *net;
L
Linus Torvalds 已提交
282
#endif
283 284 285
	/* Map to export classifier specific extension TLV types to the
	 * generic extensions API. Unsupported extensions must be set to 0.
	 */
L
Linus Torvalds 已提交
286 287 288 289
	int action;
	int police;
};

290
static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
291 292
{
#ifdef CONFIG_NET_CLS_ACT
293
	exts->type = 0;
294
	exts->nr_actions = 0;
295
	exts->net = NULL;
296 297
	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
				GFP_KERNEL);
298 299
	if (!exts->actions)
		return -ENOMEM;
300
#endif
301 302
	exts->action = action;
	exts->police = police;
303
	return 0;
304 305
}

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/* Return false if the netns is being destroyed in cleanup_net(). Callers
 * need to do cleanup synchronously in this case, otherwise may race with
 * tc_action_net_exit(). Return true for other cases.
 */
static inline bool tcf_exts_get_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
	exts->net = maybe_get_net(exts->net);
	return exts->net != NULL;
#else
	return true;
#endif
}

static inline void tcf_exts_put_net(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
	if (exts->net)
		put_net(exts->net);
#endif
}

328
#ifdef CONFIG_NET_CLS_ACT
329 330 331 332
#define tcf_exts_for_each_action(i, a, exts) \
	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
#else
#define tcf_exts_for_each_action(i, a, exts) \
333
	for (; 0; (void)(i), (void)(a), (void)(exts))
334 335
#endif

336 337 338 339 340 341 342 343 344 345 346 347
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
		      u64 bytes, u64 packets, u64 lastuse)
{
#ifdef CONFIG_NET_CLS_ACT
	int i;

	preempt_disable();

	for (i = 0; i < exts->nr_actions; i++) {
		struct tc_action *a = exts->actions[i];

348
		tcf_action_stats_update(a, bytes, packets, lastuse, true);
349 350 351 352 353 354
	}

	preempt_enable();
#endif
}

355 356 357 358 359 360 361 362
/**
 * tcf_exts_has_actions - check if at least one action is present
 * @exts: tc filter extensions handle
 *
 * Returns true if at least one action is present.
 */
static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
{
363
#ifdef CONFIG_NET_CLS_ACT
364 365 366 367 368
	return exts->nr_actions;
#else
	return false;
#endif
}
369

370 371 372 373 374 375 376 377 378 379 380 381 382 383
/**
 * tcf_exts_has_one_action - check if exactly one action is present
 * @exts: tc filter extensions handle
 *
 * Returns true if exactly one action is present.
 */
static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
	return exts->nr_actions == 1;
#else
	return false;
#endif
}
384

385 386 387 388 389 390 391 392 393
static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
	return exts->actions[0];
#else
	return NULL;
#endif
}

394 395 396 397 398 399
/**
 * tcf_exts_exec - execute tc filter extensions
 * @skb: socket buffer
 * @exts: tc filter extensions handle
 * @res: desired result
 *
400
 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
401 402 403 404 405 406 407 408 409
 * a negative number if the filter must be considered unmatched or
 * a positive action code (TC_ACT_*) which must be returned to the
 * underlying layer.
 */
static inline int
tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
	      struct tcf_result *res)
{
#ifdef CONFIG_NET_CLS_ACT
410
	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
411
#endif
412
	return TC_ACT_OK;
413 414
}

415 416
int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
		      struct nlattr **tb, struct nlattr *rate_tlv,
417 418
		      struct tcf_exts *exts, bool ovr,
		      struct netlink_ext_ack *extack);
419
void tcf_exts_destroy(struct tcf_exts *exts);
420
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
421 422
int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
L
Linus Torvalds 已提交
423 424 425 426

/**
 * struct tcf_pkt_info - packet information
 */
E
Eric Dumazet 已提交
427
struct tcf_pkt_info {
L
Linus Torvalds 已提交
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	unsigned char *		ptr;
	int			nexthdr;
};

#ifdef CONFIG_NET_EMATCH

struct tcf_ematch_ops;

/**
 * struct tcf_ematch - extended match (ematch)
 * 
 * @matchid: identifier to allow userspace to reidentify a match
 * @flags: flags specifying attributes and the relation to other matches
 * @ops: the operations lookup table of the corresponding ematch module
 * @datalen: length of the ematch specific configuration data
 * @data: ematch specific data
 */
E
Eric Dumazet 已提交
445
struct tcf_ematch {
L
Linus Torvalds 已提交
446 447 448 449 450
	struct tcf_ematch_ops * ops;
	unsigned long		data;
	unsigned int		datalen;
	u16			matchid;
	u16			flags;
451
	struct net		*net;
L
Linus Torvalds 已提交
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
};

static inline int tcf_em_is_container(struct tcf_ematch *em)
{
	return !em->ops;
}

static inline int tcf_em_is_simple(struct tcf_ematch *em)
{
	return em->flags & TCF_EM_SIMPLE;
}

static inline int tcf_em_is_inverted(struct tcf_ematch *em)
{
	return em->flags & TCF_EM_INVERT;
}

static inline int tcf_em_last_match(struct tcf_ematch *em)
{
	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
}

static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
{
	if (tcf_em_last_match(em))
		return 1;

	if (result == 0 && em->flags & TCF_EM_REL_AND)
		return 1;

	if (result != 0 && em->flags & TCF_EM_REL_OR)
		return 1;

	return 0;
}
	
/**
 * struct tcf_ematch_tree - ematch tree handle
 *
 * @hdr: ematch tree header supplied by userspace
 * @matches: array of ematches
 */
E
Eric Dumazet 已提交
494
struct tcf_ematch_tree {
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	struct tcf_ematch_tree_hdr hdr;
	struct tcf_ematch *	matches;
	
};

/**
 * struct tcf_ematch_ops - ematch module operations
 * 
 * @kind: identifier (kind) of this ematch module
 * @datalen: length of expected configuration data (optional)
 * @change: called during validation (optional)
 * @match: called during ematch tree evaluation, must return 1/0
 * @destroy: called during destroyage (optional)
 * @dump: called during dumping process (optional)
 * @owner: owner, must be set to THIS_MODULE
 * @link: link to previous/next ematch module (internal use)
 */
E
Eric Dumazet 已提交
512
struct tcf_ematch_ops {
L
Linus Torvalds 已提交
513 514
	int			kind;
	int			datalen;
515
	int			(*change)(struct net *net, void *,
L
Linus Torvalds 已提交
516 517 518
					  int, struct tcf_ematch *);
	int			(*match)(struct sk_buff *, struct tcf_ematch *,
					 struct tcf_pkt_info *);
519
	void			(*destroy)(struct tcf_ematch *);
L
Linus Torvalds 已提交
520 521 522 523 524
	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
	struct module		*owner;
	struct list_head	link;
};

525 526 527 528
int tcf_em_register(struct tcf_ematch_ops *);
void tcf_em_unregister(struct tcf_ematch_ops *);
int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
			 struct tcf_ematch_tree *);
529
void tcf_em_tree_destroy(struct tcf_ematch_tree *);
530 531 532
int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
			struct tcf_pkt_info *);
L
Linus Torvalds 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557

/**
 * tcf_em_tree_match - evaulate an ematch tree
 *
 * @skb: socket buffer of the packet in question
 * @tree: ematch tree to be used for evaluation
 * @info: packet information examined by classifier
 *
 * This function matches @skb against the ematch tree in @tree by going
 * through all ematches respecting their logic relations returning
 * as soon as the result is obvious.
 *
 * Returns 1 if the ematch tree as-one matches, no ematches are configured
 * or ematch is not enabled in the kernel, otherwise 0 is returned.
 */
static inline int tcf_em_tree_match(struct sk_buff *skb,
				    struct tcf_ematch_tree *tree,
				    struct tcf_pkt_info *info)
{
	if (tree->hdr.nmatches)
		return __tcf_em_tree_match(skb, tree, info);
	else
		return 1;
}

558 559
#define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))

L
Linus Torvalds 已提交
560 561
#else /* CONFIG_NET_EMATCH */

E
Eric Dumazet 已提交
562
struct tcf_ematch_tree {
L
Linus Torvalds 已提交
563 564 565
};

#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
566
#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
L
Linus Torvalds 已提交
567 568 569 570 571 572 573 574 575
#define tcf_em_tree_dump(skb, t, tlv) (0)
#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)

#endif /* CONFIG_NET_EMATCH */

static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
{
	switch (layer) {
		case TCF_LAYER_LINK:
576
			return skb_mac_header(skb);
L
Linus Torvalds 已提交
577
		case TCF_LAYER_NETWORK:
578
			return skb_network_header(skb);
L
Linus Torvalds 已提交
579
		case TCF_LAYER_TRANSPORT:
580
			return skb_transport_header(skb);
L
Linus Torvalds 已提交
581 582 583 584 585
	}

	return NULL;
}

586 587
static inline int tcf_valid_offset(const struct sk_buff *skb,
				   const unsigned char *ptr, const int len)
L
Linus Torvalds 已提交
588
{
589 590 591
	return likely((ptr + len) <= skb_tail_pointer(skb) &&
		      ptr >= skb->head &&
		      (ptr <= (ptr + len)));
L
Linus Torvalds 已提交
592 593 594
}

#ifdef CONFIG_NET_CLS_IND
595 596
#include <net/net_namespace.h>

L
Linus Torvalds 已提交
597
static inline int
598 599
tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
		 struct netlink_ext_ack *extack)
L
Linus Torvalds 已提交
600
{
601 602 603
	char indev[IFNAMSIZ];
	struct net_device *dev;

604 605
	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
		NL_SET_ERR_MSG(extack, "Interface name too long");
L
Linus Torvalds 已提交
606
		return -EINVAL;
607
	}
608 609 610 611
	dev = __dev_get_by_name(net, indev);
	if (!dev)
		return -ENODEV;
	return dev->ifindex;
L
Linus Torvalds 已提交
612 613
}

614 615
static inline bool
tcf_match_indev(struct sk_buff *skb, int ifindex)
L
Linus Torvalds 已提交
616
{
617 618 619 620 621
	if (!ifindex)
		return true;
	if  (!skb->skb_iif)
		return false;
	return ifindex == skb->skb_iif;
L
Linus Torvalds 已提交
622 623 624
}
#endif /* CONFIG_NET_CLS_IND */

625 626
int tc_setup_flow_action(struct flow_action *flow_action,
			 const struct tcf_exts *exts);
627 628
int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
		     void *type_data, bool err_stop);
629
unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
630

631 632 633 634 635 636 637 638 639
enum tc_block_command {
	TC_BLOCK_BIND,
	TC_BLOCK_UNBIND,
};

struct tc_block_offload {
	enum tc_block_command command;
	enum tcf_block_binder_type binder_type;
	struct tcf_block *block;
640
	struct netlink_ext_ack *extack;
641 642
};

643 644 645
struct tc_cls_common_offload {
	u32 chain_index;
	__be16 protocol;
646
	u32 prio;
647
	struct netlink_ext_ack *extack;
648 649
};

650 651
struct tc_cls_u32_knode {
	struct tcf_exts *exts;
652
	struct tcf_result *res;
653
	struct tc_u32_sel *sel;
654 655 656 657
	u32 handle;
	u32 val;
	u32 mask;
	u32 link_handle;
658
	u8 fshift;
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
};

struct tc_cls_u32_hnode {
	u32 handle;
	u32 prio;
	unsigned int divisor;
};

enum tc_clsu32_command {
	TC_CLSU32_NEW_KNODE,
	TC_CLSU32_REPLACE_KNODE,
	TC_CLSU32_DELETE_KNODE,
	TC_CLSU32_NEW_HNODE,
	TC_CLSU32_REPLACE_HNODE,
	TC_CLSU32_DELETE_HNODE,
};

struct tc_cls_u32_offload {
677
	struct tc_cls_common_offload common;
678 679 680 681 682 683 684 685
	/* knode values */
	enum tc_clsu32_command command;
	union {
		struct tc_cls_u32_knode knode;
		struct tc_cls_u32_hnode hnode;
	};
};

686
static inline bool tc_can_offload(const struct net_device *dev)
687
{
688
	return dev->features & NETIF_F_HW_TC;
689 690
}

691 692 693 694 695 696 697 698 699 700 701
static inline bool tc_can_offload_extack(const struct net_device *dev,
					 struct netlink_ext_ack *extack)
{
	bool can = tc_can_offload(dev);

	if (!can)
		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");

	return can;
}

702 703 704 705 706 707 708 709 710 711 712 713 714 715
static inline bool
tc_cls_can_offload_and_chain0(const struct net_device *dev,
			      struct tc_cls_common_offload *common)
{
	if (!tc_can_offload_extack(dev, common->extack))
		return false;
	if (common->chain_index) {
		NL_SET_ERR_MSG(common->extack,
			       "Driver supports only offload of chain 0");
		return false;
	}
	return true;
}

716 717 718 719 720
static inline bool tc_skip_hw(u32 flags)
{
	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
}

721 722 723 724 725 726 727 728
static inline bool tc_skip_sw(u32 flags)
{
	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
}

/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
static inline bool tc_flags_valid(u32 flags)
{
729 730
	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
		      TCA_CLS_FLAGS_VERBOSE))
731 732
		return false;

733
	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
734 735 736 737 738 739
	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
		return false;

	return true;
}

740 741 742 743 744
static inline bool tc_in_hw(u32 flags)
{
	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
}

745 746 747 748 749 750 751 752
static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
			   const struct tcf_proto *tp, u32 flags,
			   struct netlink_ext_ack *extack)
{
	cls_common->chain_index = tp->chain->index;
	cls_common->protocol = tp->protocol;
	cls_common->prio = tp->prio;
753
	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
754 755 756
		cls_common->extack = extack;
}

757 758 759
enum tc_fl_command {
	TC_CLSFLOWER_REPLACE,
	TC_CLSFLOWER_DESTROY,
760
	TC_CLSFLOWER_STATS,
761 762
	TC_CLSFLOWER_TMPLT_CREATE,
	TC_CLSFLOWER_TMPLT_DESTROY,
763 764 765
};

struct tc_cls_flower_offload {
766
	struct tc_cls_common_offload common;
767
	enum tc_fl_command command;
A
Amir Vadai 已提交
768
	unsigned long cookie;
769
	struct flow_rule *rule;
770
	struct flow_stats stats;
771
	u32 classid;
772 773
};

774 775 776 777 778 779
static inline struct flow_rule *
tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
{
	return tc_flow_cmd->rule;
}

780 781 782 783 784 785
enum tc_matchall_command {
	TC_CLSMATCHALL_REPLACE,
	TC_CLSMATCHALL_DESTROY,
};

struct tc_cls_matchall_offload {
786
	struct tc_cls_common_offload common;
787 788 789 790 791
	enum tc_matchall_command command;
	struct tcf_exts *exts;
	unsigned long cookie;
};

792
enum tc_clsbpf_command {
793
	TC_CLSBPF_OFFLOAD,
794
	TC_CLSBPF_STATS,
795 796 797
};

struct tc_cls_bpf_offload {
798
	struct tc_cls_common_offload common;
799 800 801
	enum tc_clsbpf_command command;
	struct tcf_exts *exts;
	struct bpf_prog *prog;
802
	struct bpf_prog *oldprog;
803 804 805 806
	const char *name;
	bool exts_integrated;
};

807 808 809 810 811 812 813 814 815
struct tc_mqprio_qopt_offload {
	/* struct tc_mqprio_qopt must always be the first element */
	struct tc_mqprio_qopt qopt;
	u16 mode;
	u16 shaper;
	u32 flags;
	u64 min_rate[TC_QOPT_MAX_QUEUE];
	u64 max_rate[TC_QOPT_MAX_QUEUE];
};
816 817 818 819 820 821 822

/* This structure holds cookie structure that is passed from user
 * to the kernel for actions and classifiers
 */
struct tc_cookie {
	u8  *data;
	u32 len;
823
	struct rcu_head rcu;
824
};
825

826 827 828 829 830
struct tc_qopt_offload_stats {
	struct gnet_stats_basic_packed *bstats;
	struct gnet_stats_queue *qstats;
};

831 832 833
enum tc_mq_command {
	TC_MQ_CREATE,
	TC_MQ_DESTROY,
834
	TC_MQ_STATS,
835 836 837 838 839 840
	TC_MQ_GRAFT,
};

struct tc_mq_opt_offload_graft_params {
	unsigned long queue;
	u32 child_handle;
841 842 843 844 845
};

struct tc_mq_qopt_offload {
	enum tc_mq_command command;
	u32 handle;
846 847 848 849
	union {
		struct tc_qopt_offload_stats stats;
		struct tc_mq_opt_offload_graft_params graft_params;
	};
850 851
};

852 853 854 855 856
enum tc_red_command {
	TC_RED_REPLACE,
	TC_RED_DESTROY,
	TC_RED_STATS,
	TC_RED_XSTATS,
857
	TC_RED_GRAFT,
858 859 860 861 862 863
};

struct tc_red_qopt_offload_params {
	u32 min;
	u32 max;
	u32 probability;
864
	u32 limit;
865
	bool is_ecn;
866
	bool is_harddrop;
867
	struct gnet_stats_queue *qstats;
868 869 870 871 872 873 874 875
};

struct tc_red_qopt_offload {
	enum tc_red_command command;
	u32 handle;
	u32 parent;
	union {
		struct tc_red_qopt_offload_params set;
876
		struct tc_qopt_offload_stats stats;
877
		struct red_stats *xstats;
878
		u32 child_handle;
879 880 881
	};
};

882 883 884
enum tc_gred_command {
	TC_GRED_REPLACE,
	TC_GRED_DESTROY,
885
	TC_GRED_STATS,
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
};

struct tc_gred_vq_qopt_offload_params {
	bool present;
	u32 limit;
	u32 prio;
	u32 min;
	u32 max;
	bool is_ecn;
	bool is_harddrop;
	u32 probability;
	/* Only need backlog, see struct tc_prio_qopt_offload_params */
	u32 *backlog;
};

struct tc_gred_qopt_offload_params {
	bool grio_on;
	bool wred_on;
	unsigned int dp_cnt;
	unsigned int dp_def;
	struct gnet_stats_queue *qstats;
	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
};

910 911 912 913 914 915
struct tc_gred_qopt_offload_stats {
	struct gnet_stats_basic_packed bstats[MAX_DPs];
	struct gnet_stats_queue qstats[MAX_DPs];
	struct red_stats *xstats[MAX_DPs];
};

916 917 918 919 920 921
struct tc_gred_qopt_offload {
	enum tc_gred_command command;
	u32 handle;
	u32 parent;
	union {
		struct tc_gred_qopt_offload_params set;
922
		struct tc_gred_qopt_offload_stats stats;
923 924 925
	};
};

926 927 928 929
enum tc_prio_command {
	TC_PRIO_REPLACE,
	TC_PRIO_DESTROY,
	TC_PRIO_STATS,
930
	TC_PRIO_GRAFT,
931 932 933 934 935 936 937 938 939 940 941 942
};

struct tc_prio_qopt_offload_params {
	int bands;
	u8 priomap[TC_PRIO_MAX + 1];
	/* In case that a prio qdisc is offloaded and now is changed to a
	 * non-offloadedable config, it needs to update the backlog & qlen
	 * values to negate the HW backlog & qlen values (and only them).
	 */
	struct gnet_stats_queue *qstats;
};

943 944 945 946 947
struct tc_prio_qopt_offload_graft_params {
	u8 band;
	u32 child_handle;
};

948 949 950 951 952 953 954
struct tc_prio_qopt_offload {
	enum tc_prio_command command;
	u32 handle;
	u32 parent;
	union {
		struct tc_prio_qopt_offload_params replace_params;
		struct tc_qopt_offload_stats stats;
955
		struct tc_prio_qopt_offload_graft_params graft_params;
956 957
	};
};
958

959 960 961 962 963 964 965 966 967 968
enum tc_root_command {
	TC_ROOT_GRAFT,
};

struct tc_root_qopt_offload {
	enum tc_root_command command;
	u32 handle;
	bool ingress;
};

L
Linus Torvalds 已提交
969
#endif