flow_offload.h 15.2 KB
Newer Older
1 2 3
#ifndef _NET_FLOW_OFFLOAD_H
#define _NET_FLOW_OFFLOAD_H

4
#include <linux/kernel.h>
5
#include <linux/list.h>
6
#include <linux/netlink.h>
7 8 9 10 11 12 13 14
#include <net/flow_dissector.h>

struct flow_match {
	struct flow_dissector	*dissector;
	void			*mask;
	void			*key;
};

15 16 17 18
struct flow_match_meta {
	struct flow_dissector_key_meta *key, *mask;
};

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
struct flow_match_basic {
	struct flow_dissector_key_basic *key, *mask;
};

struct flow_match_control {
	struct flow_dissector_key_control *key, *mask;
};

struct flow_match_eth_addrs {
	struct flow_dissector_key_eth_addrs *key, *mask;
};

struct flow_match_vlan {
	struct flow_dissector_key_vlan *key, *mask;
};

struct flow_match_ipv4_addrs {
	struct flow_dissector_key_ipv4_addrs *key, *mask;
};

struct flow_match_ipv6_addrs {
	struct flow_dissector_key_ipv6_addrs *key, *mask;
};

struct flow_match_ip {
	struct flow_dissector_key_ip *key, *mask;
};

struct flow_match_ports {
	struct flow_dissector_key_ports *key, *mask;
};

struct flow_match_icmp {
	struct flow_dissector_key_icmp *key, *mask;
};

struct flow_match_tcp {
	struct flow_dissector_key_tcp *key, *mask;
};

struct flow_match_mpls {
	struct flow_dissector_key_mpls *key, *mask;
};

struct flow_match_enc_keyid {
	struct flow_dissector_key_keyid *key, *mask;
};

struct flow_match_enc_opts {
	struct flow_dissector_key_enc_opts *key, *mask;
};

71 72 73 74
struct flow_match_ct {
	struct flow_dissector_key_ct *key, *mask;
};

75 76
struct flow_rule;

77 78
void flow_rule_match_meta(const struct flow_rule *rule,
			  struct flow_match_meta *out);
79 80 81 82 83 84 85 86
void flow_rule_match_basic(const struct flow_rule *rule,
			   struct flow_match_basic *out);
void flow_rule_match_control(const struct flow_rule *rule,
			     struct flow_match_control *out);
void flow_rule_match_eth_addrs(const struct flow_rule *rule,
			       struct flow_match_eth_addrs *out);
void flow_rule_match_vlan(const struct flow_rule *rule,
			  struct flow_match_vlan *out);
E
Edward Cree 已提交
87 88
void flow_rule_match_cvlan(const struct flow_rule *rule,
			   struct flow_match_vlan *out);
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
				struct flow_match_ipv4_addrs *out);
void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
				struct flow_match_ipv6_addrs *out);
void flow_rule_match_ip(const struct flow_rule *rule,
			struct flow_match_ip *out);
void flow_rule_match_ports(const struct flow_rule *rule,
			   struct flow_match_ports *out);
void flow_rule_match_tcp(const struct flow_rule *rule,
			 struct flow_match_tcp *out);
void flow_rule_match_icmp(const struct flow_rule *rule,
			  struct flow_match_icmp *out);
void flow_rule_match_mpls(const struct flow_rule *rule,
			  struct flow_match_mpls *out);
void flow_rule_match_enc_control(const struct flow_rule *rule,
				 struct flow_match_control *out);
void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
				    struct flow_match_ipv4_addrs *out);
void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
				    struct flow_match_ipv6_addrs *out);
void flow_rule_match_enc_ip(const struct flow_rule *rule,
			    struct flow_match_ip *out);
void flow_rule_match_enc_ports(const struct flow_rule *rule,
			       struct flow_match_ports *out);
void flow_rule_match_enc_keyid(const struct flow_rule *rule,
			       struct flow_match_enc_keyid *out);
void flow_rule_match_enc_opts(const struct flow_rule *rule,
			      struct flow_match_enc_opts *out);
117 118
void flow_rule_match_ct(const struct flow_rule *rule,
			struct flow_match_ct *out);
119

120 121 122 123 124 125 126
enum flow_action_id {
	FLOW_ACTION_ACCEPT		= 0,
	FLOW_ACTION_DROP,
	FLOW_ACTION_TRAP,
	FLOW_ACTION_GOTO,
	FLOW_ACTION_REDIRECT,
	FLOW_ACTION_MIRRED,
127 128
	FLOW_ACTION_REDIRECT_INGRESS,
	FLOW_ACTION_MIRRED_INGRESS,
129 130 131 132 133 134 135 136 137
	FLOW_ACTION_VLAN_PUSH,
	FLOW_ACTION_VLAN_POP,
	FLOW_ACTION_VLAN_MANGLE,
	FLOW_ACTION_TUNNEL_ENCAP,
	FLOW_ACTION_TUNNEL_DECAP,
	FLOW_ACTION_MANGLE,
	FLOW_ACTION_ADD,
	FLOW_ACTION_CSUM,
	FLOW_ACTION_MARK,
138
	FLOW_ACTION_PTYPE,
139
	FLOW_ACTION_PRIORITY,
140 141
	FLOW_ACTION_WAKE,
	FLOW_ACTION_QUEUE,
142
	FLOW_ACTION_SAMPLE,
143
	FLOW_ACTION_POLICE,
P
Paul Blakey 已提交
144
	FLOW_ACTION_CT,
145
	FLOW_ACTION_CT_METADATA,
146 147 148
	FLOW_ACTION_MPLS_PUSH,
	FLOW_ACTION_MPLS_POP,
	FLOW_ACTION_MPLS_MANGLE,
149
	FLOW_ACTION_GATE,
150
	NUM_FLOW_ACTIONS,
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
};

/* This is mirroring enum pedit_header_type definition for easy mapping between
 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
 */
enum flow_action_mangle_base {
	FLOW_ACT_MANGLE_UNSPEC		= 0,
	FLOW_ACT_MANGLE_HDR_TYPE_ETH,
	FLOW_ACT_MANGLE_HDR_TYPE_IP4,
	FLOW_ACT_MANGLE_HDR_TYPE_IP6,
	FLOW_ACT_MANGLE_HDR_TYPE_TCP,
	FLOW_ACT_MANGLE_HDR_TYPE_UDP,
};

166
enum flow_action_hw_stats_bit {
167 168
	FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
	FLOW_ACTION_HW_STATS_DELAYED_BIT,
169
	FLOW_ACTION_HW_STATS_DISABLED_BIT,
170 171

	FLOW_ACTION_HW_STATS_NUM_BITS
172 173
};

174
enum flow_action_hw_stats {
175 176 177 178 179
	FLOW_ACTION_HW_STATS_IMMEDIATE =
		BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
	FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
	FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
				   FLOW_ACTION_HW_STATS_DELAYED,
180 181
	FLOW_ACTION_HW_STATS_DISABLED =
		BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
182
	FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
183
};
184

185 186
typedef void (*action_destr)(void *priv);

187 188 189 190 191 192 193 194 195 196
struct flow_action_cookie {
	u32 cookie_len;
	u8 cookie[];
};

struct flow_action_cookie *flow_action_cookie_create(void *data,
						     unsigned int len,
						     gfp_t gfp);
void flow_action_cookie_destroy(struct flow_action_cookie *cookie);

197 198
struct flow_action_entry {
	enum flow_action_id		id;
199
	enum flow_action_hw_stats	hw_stats;
200 201
	action_destr			destructor;
	void				*destructor_priv;
202 203 204 205 206 207 208 209
	union {
		u32			chain_index;	/* FLOW_ACTION_GOTO */
		struct net_device	*dev;		/* FLOW_ACTION_REDIRECT */
		struct {				/* FLOW_ACTION_VLAN */
			u16		vid;
			__be16		proto;
			u8		prio;
		} vlan;
210 211
		struct {				/* FLOW_ACTION_MANGLE */
							/* FLOW_ACTION_ADD */
212 213 214 215 216
			enum flow_action_mangle_base htype;
			u32		offset;
			u32		mask;
			u32		val;
		} mangle;
217
		struct ip_tunnel_info	*tunnel;	/* FLOW_ACTION_TUNNEL_ENCAP */
218 219
		u32			csum_flags;	/* FLOW_ACTION_CSUM */
		u32			mark;		/* FLOW_ACTION_MARK */
220
		u16                     ptype;          /* FLOW_ACTION_PTYPE */
221
		u32			priority;	/* FLOW_ACTION_PRIORITY */
222 223 224 225 226
		struct {				/* FLOW_ACTION_QUEUE */
			u32		ctx;
			u32		index;
			u8		vf;
		} queue;
227 228 229 230 231 232
		struct {				/* FLOW_ACTION_SAMPLE */
			struct psample_group	*psample_group;
			u32			rate;
			u32			trunc_size;
			bool			truncate;
		} sample;
233
		struct {				/* FLOW_ACTION_POLICE */
234
			u32			index;
235
			u32			burst;
236
			u64			rate_bytes_ps;
237
			u32			mtu;
238
		} police;
P
Paul Blakey 已提交
239 240 241
		struct {				/* FLOW_ACTION_CT */
			int action;
			u16 zone;
242
			struct nf_flowtable *flow_table;
P
Paul Blakey 已提交
243
		} ct;
244
		struct {
245
			unsigned long cookie;
246 247 248
			u32 mark;
			u32 labels[4];
		} ct_metadata;
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
		struct {				/* FLOW_ACTION_MPLS_PUSH */
			u32		label;
			__be16		proto;
			u8		tc;
			u8		bos;
			u8		ttl;
		} mpls_push;
		struct {				/* FLOW_ACTION_MPLS_POP */
			__be16		proto;
		} mpls_pop;
		struct {				/* FLOW_ACTION_MPLS_MANGLE */
			u32		label;
			u8		tc;
			u8		bos;
			u8		ttl;
		} mpls_mangle;
265 266 267 268 269 270 271 272 273
		struct {
			u32		index;
			s32		prio;
			u64		basetime;
			u64		cycletime;
			u64		cycletimeext;
			u32		num_entries;
			struct action_gate_entry *entries;
		} gate;
274
	};
275
	struct flow_action_cookie *cookie; /* user defined action cookie */
276 277 278 279
};

struct flow_action {
	unsigned int			num_entries;
280
	struct flow_action_entry	entries[];
281 282 283 284 285 286 287
};

static inline bool flow_action_has_entries(const struct flow_action *action)
{
	return action->num_entries;
}

288 289 290 291 292 293 294 295 296 297 298
/**
 * flow_action_has_one_action() - check if exactly one action is present
 * @action: tc filter flow offload action
 *
 * Returns true if exactly one action is present.
 */
static inline bool flow_offload_has_one_action(const struct flow_action *action)
{
	return action->num_entries == 1;
}

299 300 301 302 303
#define flow_action_for_each(__i, __act, __actions)			\
        for (__i = 0, __act = &(__actions)->entries[0];			\
	     __i < (__actions)->num_entries;				\
	     __act = &(__actions)->entries[++__i])

304
static inline bool
305 306
flow_action_mixed_hw_stats_check(const struct flow_action *action,
				 struct netlink_ext_ack *extack)
307 308
{
	const struct flow_action_entry *action_entry;
309
	u8 last_hw_stats;
310 311 312 313 314
	int i;

	if (flow_offload_has_one_action(action))
		return true;

315 316 317 318
	flow_action_for_each(i, action_entry, action) {
		if (i && action_entry->hw_stats != last_hw_stats) {
			NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
			return false;
319
		}
320
		last_hw_stats = action_entry->hw_stats;
321 322 323 324 325 326 327 328 329 330 331 332
	}
	return true;
}

static inline const struct flow_action_entry *
flow_action_first_entry_get(const struct flow_action *action)
{
	WARN_ON(!flow_action_has_entries(action));
	return &action->entries[0];
}

static inline bool
333 334 335
__flow_action_hw_stats_check(const struct flow_action *action,
			     struct netlink_ext_ack *extack,
			     bool check_allow_bit,
336
			     enum flow_action_hw_stats_bit allow_bit)
337 338 339 340 341
{
	const struct flow_action_entry *action_entry;

	if (!flow_action_has_entries(action))
		return true;
342
	if (!flow_action_mixed_hw_stats_check(action, extack))
343
		return false;
344

345
	action_entry = flow_action_first_entry_get(action);
346 347 348

	/* Zero is not a legal value for hw_stats, catch anyone passing it */
	WARN_ON_ONCE(!action_entry->hw_stats);
349

350
	if (!check_allow_bit &&
351
	    ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
352 353
		NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
		return false;
354
	} else if (check_allow_bit &&
355
		   !(action_entry->hw_stats & BIT(allow_bit))) {
356 357 358 359 360 361
		NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
		return false;
	}
	return true;
}

362
static inline bool
363 364
flow_action_hw_stats_check(const struct flow_action *action,
			   struct netlink_ext_ack *extack,
365
			   enum flow_action_hw_stats_bit allow_bit)
366
{
367
	return __flow_action_hw_stats_check(action, extack, true, allow_bit);
368 369
}

370
static inline bool
371 372
flow_action_basic_hw_stats_check(const struct flow_action *action,
				 struct netlink_ext_ack *extack)
373
{
374
	return __flow_action_hw_stats_check(action, extack, false, 0);
375 376
}

377 378
struct flow_rule {
	struct flow_match	match;
379
	struct flow_action	action;
380 381
};

382
struct flow_rule *flow_rule_alloc(unsigned int num_actions);
383 384 385 386 387 388 389

static inline bool flow_rule_match_key(const struct flow_rule *rule,
				       enum flow_dissector_key_id key)
{
	return dissector_uses_key(rule->match.dissector, key);
}

390 391 392
struct flow_stats {
	u64	pkts;
	u64	bytes;
393
	u64	drops;
394
	u64	lastused;
395 396
	enum flow_action_hw_stats used_hw_stats;
	bool used_hw_stats_valid;
397 398 399
};

static inline void flow_stats_update(struct flow_stats *flow_stats,
400 401
				     u64 bytes, u64 pkts,
				     u64 drops, u64 lastused,
402
				     enum flow_action_hw_stats used_hw_stats)
403
{
J
John Hurley 已提交
404 405
	flow_stats->pkts	+= pkts;
	flow_stats->bytes	+= bytes;
406
	flow_stats->drops	+= drops;
J
John Hurley 已提交
407
	flow_stats->lastused	= max_t(u64, flow_stats->lastused, lastused);
408 409 410 411 412 413 414

	/* The driver should pass value with a maximum of one bit set.
	 * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
	 */
	WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
	flow_stats->used_hw_stats |= used_hw_stats;
	flow_stats->used_hw_stats_valid = true;
415 416
}

417
enum flow_block_command {
418 419
	FLOW_BLOCK_BIND,
	FLOW_BLOCK_UNBIND,
420 421 422
};

enum flow_block_binder_type {
423 424 425
	FLOW_BLOCK_BINDER_TYPE_UNSPEC,
	FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
	FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
426 427
	FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
	FLOW_BLOCK_BINDER_TYPE_RED_MARK,
428 429
};

430 431 432 433
struct flow_block {
	struct list_head cb_list;
};

434 435 436 437 438
struct netlink_ext_ack;

struct flow_block_offload {
	enum flow_block_command command;
	enum flow_block_binder_type binder_type;
439
	bool block_shared;
440
	bool unlocked_driver_cb;
441
	struct net *net;
442
	struct flow_block *block;
443
	struct list_head cb_list;
444 445
	struct list_head *driver_block_list;
	struct netlink_ext_ack *extack;
446
	struct Qdisc *sch;
447
	struct list_head *cb_list_head;
448 449
};

450 451 452 453
enum tc_setup_type;
typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
			    void *cb_priv);

454 455 456 457 458
struct flow_block_cb;

struct flow_block_indr {
	struct list_head		list;
	struct net_device		*dev;
459
	struct Qdisc			*sch;
460 461
	enum flow_block_binder_type	binder_type;
	void				*data;
462
	void				*cb_priv;
463 464 465
	void				(*cleanup)(struct flow_block_cb *block_cb);
};

466
struct flow_block_cb {
467
	struct list_head	driver_list;
468
	struct list_head	list;
469
	flow_setup_cb_t		*cb;
470 471 472
	void			*cb_ident;
	void			*cb_priv;
	void			(*release)(void *cb_priv);
473
	struct flow_block_indr	indr;
474 475 476
	unsigned int		refcnt;
};

477
struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
478 479
					  void *cb_ident, void *cb_priv,
					  void (*release)(void *cb_priv));
480 481 482 483
struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
					       void *cb_ident, void *cb_priv,
					       void (*release)(void *cb_priv),
					       struct flow_block_offload *bo,
484 485
					       struct net_device *dev,
					       struct Qdisc *sch, void *data,
486
					       void *indr_cb_priv,
487
					       void (*cleanup)(struct flow_block_cb *block_cb));
488 489
void flow_block_cb_free(struct flow_block_cb *block_cb);

490
struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
491
					   flow_setup_cb_t *cb, void *cb_ident);
492

493 494 495 496
void *flow_block_cb_priv(struct flow_block_cb *block_cb);
void flow_block_cb_incref(struct flow_block_cb *block_cb);
unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);

497 498 499 500 501 502 503 504 505 506 507 508
static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
				     struct flow_block_offload *offload)
{
	list_add_tail(&block_cb->list, &offload->cb_list);
}

static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
					struct flow_block_offload *offload)
{
	list_move(&block_cb->list, &offload->cb_list);
}

509 510 511 512 513 514 515
static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
					     struct flow_block_offload *offload)
{
	list_del(&block_cb->indr.list);
	list_move(&block_cb->list, &offload->cb_list);
}

516
bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
517 518
			   struct list_head *driver_block_list);

519
int flow_block_cb_setup_simple(struct flow_block_offload *f,
520 521
			       struct list_head *driver_list,
			       flow_setup_cb_t *cb,
522 523
			       void *cb_ident, void *cb_priv, bool ingress_only);

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
enum flow_cls_command {
	FLOW_CLS_REPLACE,
	FLOW_CLS_DESTROY,
	FLOW_CLS_STATS,
	FLOW_CLS_TMPLT_CREATE,
	FLOW_CLS_TMPLT_DESTROY,
};

struct flow_cls_common_offload {
	u32 chain_index;
	__be16 protocol;
	u32 prio;
	struct netlink_ext_ack *extack;
};

struct flow_cls_offload {
	struct flow_cls_common_offload common;
	enum flow_cls_command command;
	unsigned long cookie;
	struct flow_rule *rule;
	struct flow_stats stats;
	u32 classid;
};

static inline struct flow_rule *
flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
{
	return flow_cmd->rule;
}

554 555 556 557 558
static inline void flow_block_init(struct flow_block *flow_block)
{
	INIT_LIST_HEAD(&flow_block->cb_list);
}

559
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
560 561 562
				      enum tc_setup_type type, void *type_data,
				      void *data,
				      void (*cleanup)(struct flow_block_cb *block_cb));
563

564 565
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
566
			      void (*release)(void *cb_priv));
567
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
568 569 570
				enum tc_setup_type type, void *data,
				struct flow_block_offload *bo,
				void (*cleanup)(struct flow_block_cb *block_cb));
571
bool flow_indr_dev_exists(void);
572

573
#endif /* _NET_FLOW_OFFLOAD_H */