spectrum_flower.c 16.2 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 4 5 6

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
7
#include <net/net_namespace.h>
8 9 10 11
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
12
#include <net/tc_act/tc_vlan.h>
13 14 15 16 17

#include "spectrum.h"
#include "core_acl_flex_keys.h"

static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18
					 struct mlxsw_sp_acl_block *block,
19
					 struct mlxsw_sp_acl_rule_info *rulei,
20
					 struct flow_action *flow_action,
21
					 struct netlink_ext_ack *extack)
22
{
23
	const struct flow_action_entry *act;
24
	int err, i;
25

26
	if (!flow_action_has_entries(flow_action))
27 28
		return 0;

29
	/* Count action is inserted first */
30
	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
31 32 33
	if (err)
		return err;

34 35 36
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
37
			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
38 39
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
40
				return err;
41
			}
42 43
			break;
		case FLOW_ACTION_DROP:
44
			err = mlxsw_sp_acl_rulei_act_drop(rulei);
45 46
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
47
				return err;
48
			}
49 50
			break;
		case FLOW_ACTION_TRAP:
51
			err = mlxsw_sp_acl_rulei_act_trap(rulei);
52 53
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
54
				return err;
55
			}
56 57 58
			break;
		case FLOW_ACTION_GOTO: {
			u32 chain_index = act->chain_index;
59 60 61
			struct mlxsw_sp_acl_ruleset *ruleset;
			u16 group_id;

62
			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
63 64 65 66 67 68
							      chain_index,
							      MLXSW_SP_ACL_PROFILE_FLOWER);
			if (IS_ERR(ruleset))
				return PTR_ERR(ruleset);

			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
69
			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
70 71
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
72
				return err;
73
			}
74 75 76
			}
			break;
		case FLOW_ACTION_REDIRECT: {
77
			struct net_device *out_dev;
78 79
			struct mlxsw_sp_fid *fid;
			u16 fid_index;
80

81 82
			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
			fid_index = mlxsw_sp_fid_index(fid);
83
			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
84
							     fid_index, extack);
85 86 87
			if (err)
				return err;

88
			out_dev = act->dev;
89
			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
90
							 out_dev, extack);
91 92
			if (err)
				return err;
93 94 95 96
			}
			break;
		case FLOW_ACTION_MIRRED: {
			struct net_device *out_dev = act->dev;
97 98

			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
99 100
							    block, out_dev,
							    extack);
101 102
			if (err)
				return err;
103 104
			}
			break;
105
		case FLOW_ACTION_VLAN_MANGLE: {
106 107 108
			u16 proto = be16_to_cpu(act->vlan.proto);
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
109 110

			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
111
							   act->id, vid,
112
							   proto, prio, extack);
113 114
			}
		default:
115
			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
116 117 118 119 120 121 122
			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
			return -EOPNOTSUPP;
		}
	}
	return 0;
}

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
				      struct tc_cls_flower_offload *f,
				      struct mlxsw_sp_acl_block *block)
{
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct net_device *ingress_dev;
	struct flow_match_meta match;

	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
		return 0;

	flow_rule_match_meta(rule, &match);
	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
		return -EINVAL;
	}

	ingress_dev = __dev_get_by_index(block->net,
					 match.key->ingress_ifindex);
	if (!ingress_dev) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
		return -EINVAL;
	}

	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
		return -EINVAL;
	}

	mlxsw_sp_port = netdev_priv(ingress_dev);
	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
		return -EINVAL;
	}

	mlxsw_sp_acl_rulei_keymask_u32(rulei,
				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
				       mlxsw_sp_port->local_port,
				       0xFFFFFFFF);
	return 0;
}

166 167 168
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
				       struct tc_cls_flower_offload *f)
{
169 170 171
	struct flow_match_ipv4_addrs match;

	flow_rule_match_ipv4_addrs(f->rule, &match);
172

173
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
174 175
				       (char *) &match.key->src,
				       (char *) &match.mask->src, 4);
176
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
177 178
				       (char *) &match.key->dst,
				       (char *) &match.mask->dst, 4);
179 180 181 182 183
}

static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
				       struct tc_cls_flower_offload *f)
{
184 185 186
	struct flow_match_ipv6_addrs match;

	flow_rule_match_ipv6_addrs(f->rule, &match);
187 188

	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
189 190
				       &match.key->src.s6_addr[0x0],
				       &match.mask->src.s6_addr[0x0], 4);
191
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
192 193
				       &match.key->src.s6_addr[0x4],
				       &match.mask->src.s6_addr[0x4], 4);
194
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
195 196
				       &match.key->src.s6_addr[0x8],
				       &match.mask->src.s6_addr[0x8], 4);
197
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
198 199
				       &match.key->src.s6_addr[0xC],
				       &match.mask->src.s6_addr[0xC], 4);
200
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
201 202
				       &match.key->dst.s6_addr[0x0],
				       &match.mask->dst.s6_addr[0x0], 4);
203
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
204 205
				       &match.key->dst.s6_addr[0x4],
				       &match.mask->dst.s6_addr[0x4], 4);
206
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
207 208
				       &match.key->dst.s6_addr[0x8],
				       &match.mask->dst.s6_addr[0x8], 4);
209
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
210 211
				       &match.key->dst.s6_addr[0xC],
				       &match.mask->dst.s6_addr[0xC], 4);
212 213 214 215 216 217 218
}

static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_acl_rule_info *rulei,
				       struct tc_cls_flower_offload *f,
				       u8 ip_proto)
{
219 220
	const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct flow_match_ports match;
221

222
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
223 224 225
		return 0;

	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
226
		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
227 228 229 230
		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
		return -EINVAL;
	}

231
	flow_rule_match_ports(rule, &match);
232
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
233 234
				       ntohs(match.key->dst),
				       ntohs(match.mask->dst));
235
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
236 237
				       ntohs(match.key->src),
				       ntohs(match.mask->src));
238 239 240
	return 0;
}

241 242 243 244 245
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_rule_info *rulei,
				     struct tc_cls_flower_offload *f,
				     u8 ip_proto)
{
246 247
	const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct flow_match_tcp match;
248

249
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
250 251 252
		return 0;

	if (ip_proto != IPPROTO_TCP) {
253
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
254 255 256 257
		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
		return -EINVAL;
	}

258 259
	flow_rule_match_tcp(rule, &match);

260
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
261 262
				       ntohs(match.key->flags),
				       ntohs(match.mask->flags));
263 264 265
	return 0;
}

266 267 268 269 270
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_acl_rule_info *rulei,
				    struct tc_cls_flower_offload *f,
				    u16 n_proto)
{
271 272
	const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct flow_match_ip match;
273

274
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
275 276 277
		return 0;

	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
278
		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
279 280 281 282
		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
		return -EINVAL;
	}

283 284
	flow_rule_match_ip(rule, &match);

285
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
286
				       match.key->ttl, match.mask->ttl);
287 288

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
289 290
				       match.key->tos & 0x3,
				       match.mask->tos & 0x3);
291 292

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
293 294
				       match.key->tos >> 2,
				       match.mask->tos >> 2);
295

296 297 298
	return 0;
}

299
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
300
				 struct mlxsw_sp_acl_block *block,
301 302 303
				 struct mlxsw_sp_acl_rule_info *rulei,
				 struct tc_cls_flower_offload *f)
{
304 305
	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
	struct flow_dissector *dissector = rule->match.dissector;
306 307
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
308 309 310 311
	u16 addr_type = 0;
	u8 ip_proto = 0;
	int err;

312
	if (dissector->used_keys &
313 314
	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
315 316 317 318
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
319
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
320
	      BIT(FLOW_DISSECTOR_KEY_TCP) |
321
	      BIT(FLOW_DISSECTOR_KEY_IP) |
322
	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
323
		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
324
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
325 326 327
		return -EOPNOTSUPP;
	}

328
	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
329

330 331 332 333
	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
	if (err)
		return err;

334 335 336 337 338
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;

		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
339 340
	}

341 342 343 344 345 346
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		n_proto_key = ntohs(match.key->n_proto);
		n_proto_mask = ntohs(match.mask->n_proto);
347 348 349 350 351

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
352 353
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_ETHERTYPE,
354 355
					       n_proto_key, n_proto_mask);

356
		ip_proto = match.key->ip_proto;
357 358
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_IP_PROTO,
359 360
					       match.key->ip_proto,
					       match.mask->ip_proto);
361 362
	}

363 364
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_match_eth_addrs match;
365

366
		flow_rule_match_eth_addrs(rule, &match);
367
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
368
					       MLXSW_AFK_ELEMENT_DMAC_32_47,
369 370
					       match.key->dst,
					       match.mask->dst, 2);
371 372
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_DMAC_0_31,
373 374
					       match.key->dst + 2,
					       match.mask->dst + 2, 4);
375 376
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_SMAC_32_47,
377 378
					       match.key->src,
					       match.mask->src, 2);
379
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
380
					       MLXSW_AFK_ELEMENT_SMAC_0_31,
381 382
					       match.key->src + 2,
					       match.mask->src + 2, 4);
383 384
	}

385 386
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
387

388
		flow_rule_match_vlan(rule, &match);
389 390 391 392
		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
			return -EOPNOTSUPP;
		}
393
		if (match.mask->vlan_id != 0)
394 395
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_VID,
396 397 398
						       match.key->vlan_id,
						       match.mask->vlan_id);
		if (match.mask->vlan_priority != 0)
399 400
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_PCP,
401 402
						       match.key->vlan_priority,
						       match.mask->vlan_priority);
403 404
	}

405 406 407 408 409 410 411
	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
		mlxsw_sp_flower_parse_ipv4(rulei, f);

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
		mlxsw_sp_flower_parse_ipv6(rulei, f);

	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
412 413 414
	if (err)
		return err;
	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
415 416 417
	if (err)
		return err;

418 419 420 421
	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
	if (err)
		return err;

422 423
	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
					     &f->rule->action,
424
					     f->common.extack);
425 426
}

427 428
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_acl_block *block,
429
			    struct tc_cls_flower_offload *f)
430 431 432 433 434 435
{
	struct mlxsw_sp_acl_rule_info *rulei;
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	int err;

436
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
437
					   f->common.chain_index,
438
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
439 440 441
	if (IS_ERR(ruleset))
		return PTR_ERR(ruleset);

442
	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
443
					f->common.extack);
444 445 446 447 448 449
	if (IS_ERR(rule)) {
		err = PTR_ERR(rule);
		goto err_rule_create;
	}

	rulei = mlxsw_sp_acl_rule_rulei(rule);
450
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	if (err)
		goto err_flower_parse;

	err = mlxsw_sp_acl_rulei_commit(rulei);
	if (err)
		goto err_rulei_commit;

	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
	if (err)
		goto err_rule_add;

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_add:
err_rulei_commit:
err_flower_parse:
	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
err_rule_create:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}

474 475
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_block *block,
476 477 478 479 480
			     struct tc_cls_flower_offload *f)
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;

481 482
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
483
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
484
	if (IS_ERR(ruleset))
485 486 487
		return;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
488
	if (rule) {
489 490 491 492 493 494
		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
	}

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
495

496 497
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
			  struct mlxsw_sp_acl_block *block,
498 499 500 501 502 503 504 505 506
			  struct tc_cls_flower_offload *f)
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	u64 packets;
	u64 lastuse;
	u64 bytes;
	int err;

507 508
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
509
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
510 511 512 513 514 515 516
	if (WARN_ON(IS_ERR(ruleset)))
		return -EINVAL;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
	if (!rule)
		return -EINVAL;

517
	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
518 519 520 521
					  &lastuse);
	if (err)
		goto err_rule_get_stats;

522
	flow_stats_update(&f->stats, bytes, packets, lastuse);
523 524 525 526 527 528 529 530

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_get_stats:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_block *block,
				 struct tc_cls_flower_offload *f)
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule_info rulei;
	int err;

	memset(&rulei, 0, sizeof(rulei));
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
	if (err)
		return err;
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER,
					   &rulei.values.elusage);
548

549
	/* keep the reference to the ruleset */
550
	return PTR_ERR_OR_ZERO(ruleset);
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
}

void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_block *block,
				   struct tc_cls_flower_offload *f)
{
	struct mlxsw_sp_acl_ruleset *ruleset;

	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
	if (IS_ERR(ruleset))
		return;
	/* put the reference to the ruleset kept in create */
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}