spectrum_flower.c 16.5 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 4 5 6

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
7
#include <net/net_namespace.h>
8 9 10 11
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
12
#include <net/tc_act/tc_vlan.h>
13 14 15 16 17

#include "spectrum.h"
#include "core_acl_flex_keys.h"

static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18
					 struct mlxsw_sp_acl_block *block,
19
					 struct mlxsw_sp_acl_rule_info *rulei,
20
					 struct flow_action *flow_action,
21
					 struct netlink_ext_ack *extack)
22
{
23
	const struct flow_action_entry *act;
24
	int err, i;
25

26
	if (!flow_action_has_entries(flow_action))
27 28
		return 0;

29
	/* Count action is inserted first */
30
	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
31 32 33
	if (err)
		return err;

34 35 36
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
37
			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
38 39
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
40
				return err;
41
			}
42 43
			break;
		case FLOW_ACTION_DROP:
44
			err = mlxsw_sp_acl_rulei_act_drop(rulei);
45 46
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
47
				return err;
48
			}
49 50
			break;
		case FLOW_ACTION_TRAP:
51
			err = mlxsw_sp_acl_rulei_act_trap(rulei);
52 53
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
54
				return err;
55
			}
56 57 58
			break;
		case FLOW_ACTION_GOTO: {
			u32 chain_index = act->chain_index;
59 60 61
			struct mlxsw_sp_acl_ruleset *ruleset;
			u16 group_id;

62
			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
63 64 65 66 67 68
							      chain_index,
							      MLXSW_SP_ACL_PROFILE_FLOWER);
			if (IS_ERR(ruleset))
				return PTR_ERR(ruleset);

			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
69
			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
70 71
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
72
				return err;
73
			}
74 75 76
			}
			break;
		case FLOW_ACTION_REDIRECT: {
77
			struct net_device *out_dev;
78 79
			struct mlxsw_sp_fid *fid;
			u16 fid_index;
80

81 82 83 84 85
			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
				return -EOPNOTSUPP;
			}

86 87 88 89 90
			/* Forbid block with this rulei to be bound
			 * to egress in future.
			 */
			rulei->egress_bind_blocker = 1;

91 92
			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
			fid_index = mlxsw_sp_fid_index(fid);
93
			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
94
							     fid_index, extack);
95 96 97
			if (err)
				return err;

98
			out_dev = act->dev;
99
			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
100
							 out_dev, extack);
101 102
			if (err)
				return err;
103 104 105 106
			}
			break;
		case FLOW_ACTION_MIRRED: {
			struct net_device *out_dev = act->dev;
107 108

			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
109 110
							    block, out_dev,
							    extack);
111 112
			if (err)
				return err;
113 114
			}
			break;
115
		case FLOW_ACTION_VLAN_MANGLE: {
116 117 118
			u16 proto = be16_to_cpu(act->vlan.proto);
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
119 120

			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
121
							   act->id, vid,
122
							   proto, prio, extack);
123 124
			}
		default:
125
			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
126 127 128 129 130 131 132
			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
			return -EOPNOTSUPP;
		}
	}
	return 0;
}

133
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
134
				      struct flow_cls_offload *f,
135 136
				      struct mlxsw_sp_acl_block *block)
{
137
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct net_device *ingress_dev;
	struct flow_match_meta match;

	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
		return 0;

	flow_rule_match_meta(rule, &match);
	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
		return -EINVAL;
	}

	ingress_dev = __dev_get_by_index(block->net,
					 match.key->ingress_ifindex);
	if (!ingress_dev) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
		return -EINVAL;
	}

	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
		return -EINVAL;
	}

	mlxsw_sp_port = netdev_priv(ingress_dev);
	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
		return -EINVAL;
	}

	mlxsw_sp_acl_rulei_keymask_u32(rulei,
				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
				       mlxsw_sp_port->local_port,
				       0xFFFFFFFF);
	return 0;
}

176
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
177
				       struct flow_cls_offload *f)
178
{
179 180 181
	struct flow_match_ipv4_addrs match;

	flow_rule_match_ipv4_addrs(f->rule, &match);
182

183
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
184 185
				       (char *) &match.key->src,
				       (char *) &match.mask->src, 4);
186
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
187 188
				       (char *) &match.key->dst,
				       (char *) &match.mask->dst, 4);
189 190 191
}

static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
192
				       struct flow_cls_offload *f)
193
{
194 195 196
	struct flow_match_ipv6_addrs match;

	flow_rule_match_ipv6_addrs(f->rule, &match);
197 198

	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
199 200
				       &match.key->src.s6_addr[0x0],
				       &match.mask->src.s6_addr[0x0], 4);
201
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
202 203
				       &match.key->src.s6_addr[0x4],
				       &match.mask->src.s6_addr[0x4], 4);
204
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
205 206
				       &match.key->src.s6_addr[0x8],
				       &match.mask->src.s6_addr[0x8], 4);
207
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
208 209
				       &match.key->src.s6_addr[0xC],
				       &match.mask->src.s6_addr[0xC], 4);
210
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
211 212
				       &match.key->dst.s6_addr[0x0],
				       &match.mask->dst.s6_addr[0x0], 4);
213
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
214 215
				       &match.key->dst.s6_addr[0x4],
				       &match.mask->dst.s6_addr[0x4], 4);
216
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
217 218
				       &match.key->dst.s6_addr[0x8],
				       &match.mask->dst.s6_addr[0x8], 4);
219
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
220 221
				       &match.key->dst.s6_addr[0xC],
				       &match.mask->dst.s6_addr[0xC], 4);
222 223 224 225
}

static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_acl_rule_info *rulei,
226
				       struct flow_cls_offload *f,
227 228
				       u8 ip_proto)
{
229
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
230
	struct flow_match_ports match;
231

232
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
233 234 235
		return 0;

	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
236
		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
237 238 239 240
		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
		return -EINVAL;
	}

241
	flow_rule_match_ports(rule, &match);
242
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
243 244
				       ntohs(match.key->dst),
				       ntohs(match.mask->dst));
245
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
246 247
				       ntohs(match.key->src),
				       ntohs(match.mask->src));
248 249 250
	return 0;
}

251 252
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_rule_info *rulei,
253
				     struct flow_cls_offload *f,
254 255
				     u8 ip_proto)
{
256
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
257
	struct flow_match_tcp match;
258

259
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
260 261 262
		return 0;

	if (ip_proto != IPPROTO_TCP) {
263
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
264 265 266 267
		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
		return -EINVAL;
	}

268 269
	flow_rule_match_tcp(rule, &match);

270
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
271 272
				       ntohs(match.key->flags),
				       ntohs(match.mask->flags));
273 274 275
	return 0;
}

276 277
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_acl_rule_info *rulei,
278
				    struct flow_cls_offload *f,
279 280
				    u16 n_proto)
{
281
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
282
	struct flow_match_ip match;
283

284
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
285 286 287
		return 0;

	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
288
		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
289 290 291 292
		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
		return -EINVAL;
	}

293 294
	flow_rule_match_ip(rule, &match);

295
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
296
				       match.key->ttl, match.mask->ttl);
297 298

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
299 300
				       match.key->tos & 0x3,
				       match.mask->tos & 0x3);
301 302

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
303 304
				       match.key->tos >> 2,
				       match.mask->tos >> 2);
305

306 307 308
	return 0;
}

309
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
310
				 struct mlxsw_sp_acl_block *block,
311
				 struct mlxsw_sp_acl_rule_info *rulei,
312
				 struct flow_cls_offload *f)
313
{
314
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
315
	struct flow_dissector *dissector = rule->match.dissector;
316 317
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
318 319 320 321
	u16 addr_type = 0;
	u8 ip_proto = 0;
	int err;

322
	if (dissector->used_keys &
323 324
	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
325 326 327 328
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
329
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
330
	      BIT(FLOW_DISSECTOR_KEY_TCP) |
331
	      BIT(FLOW_DISSECTOR_KEY_IP) |
332
	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
333
		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
334
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
335 336 337
		return -EOPNOTSUPP;
	}

338
	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
339

340 341 342 343
	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
	if (err)
		return err;

344 345 346 347 348
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;

		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
349 350
	}

351 352 353 354 355 356
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		n_proto_key = ntohs(match.key->n_proto);
		n_proto_mask = ntohs(match.mask->n_proto);
357 358 359 360 361

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
362 363
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_ETHERTYPE,
364 365
					       n_proto_key, n_proto_mask);

366
		ip_proto = match.key->ip_proto;
367 368
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_IP_PROTO,
369 370
					       match.key->ip_proto,
					       match.mask->ip_proto);
371 372
	}

373 374
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_match_eth_addrs match;
375

376
		flow_rule_match_eth_addrs(rule, &match);
377
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
378
					       MLXSW_AFK_ELEMENT_DMAC_32_47,
379 380
					       match.key->dst,
					       match.mask->dst, 2);
381 382
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_DMAC_0_31,
383 384
					       match.key->dst + 2,
					       match.mask->dst + 2, 4);
385 386
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_SMAC_32_47,
387 388
					       match.key->src,
					       match.mask->src, 2);
389
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
390
					       MLXSW_AFK_ELEMENT_SMAC_0_31,
391 392
					       match.key->src + 2,
					       match.mask->src + 2, 4);
393 394
	}

395 396
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
397

398
		flow_rule_match_vlan(rule, &match);
399 400 401 402
		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
			return -EOPNOTSUPP;
		}
403 404 405 406 407 408

		/* Forbid block with this rulei to be bound
		 * to egress in future.
		 */
		rulei->egress_bind_blocker = 1;

409
		if (match.mask->vlan_id != 0)
410 411
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_VID,
412 413 414
						       match.key->vlan_id,
						       match.mask->vlan_id);
		if (match.mask->vlan_priority != 0)
415 416
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_PCP,
417 418
						       match.key->vlan_priority,
						       match.mask->vlan_priority);
419 420
	}

421 422 423 424 425 426 427
	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
		mlxsw_sp_flower_parse_ipv4(rulei, f);

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
		mlxsw_sp_flower_parse_ipv6(rulei, f);

	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
428 429 430
	if (err)
		return err;
	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
431 432 433
	if (err)
		return err;

434 435 436 437
	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
	if (err)
		return err;

438 439
	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
					     &f->rule->action,
440
					     f->common.extack);
441 442
}

443 444
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_acl_block *block,
445
			    struct flow_cls_offload *f)
446 447 448 449 450 451
{
	struct mlxsw_sp_acl_rule_info *rulei;
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	int err;

452
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
453
					   f->common.chain_index,
454
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
455 456 457
	if (IS_ERR(ruleset))
		return PTR_ERR(ruleset);

458
	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
459
					f->common.extack);
460 461 462 463 464 465
	if (IS_ERR(rule)) {
		err = PTR_ERR(rule);
		goto err_rule_create;
	}

	rulei = mlxsw_sp_acl_rule_rulei(rule);
466
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	if (err)
		goto err_flower_parse;

	err = mlxsw_sp_acl_rulei_commit(rulei);
	if (err)
		goto err_rulei_commit;

	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
	if (err)
		goto err_rule_add;

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_add:
err_rulei_commit:
err_flower_parse:
	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
err_rule_create:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}

490 491
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_block *block,
492
			     struct flow_cls_offload *f)
493 494 495 496
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;

497 498
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
499
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
500
	if (IS_ERR(ruleset))
501 502 503
		return;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
504
	if (rule) {
505 506 507 508 509 510
		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
	}

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
511

512 513
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
			  struct mlxsw_sp_acl_block *block,
514
			  struct flow_cls_offload *f)
515 516 517 518 519 520 521 522
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	u64 packets;
	u64 lastuse;
	u64 bytes;
	int err;

523 524
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
525
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
526 527 528 529 530 531 532
	if (WARN_ON(IS_ERR(ruleset)))
		return -EINVAL;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
	if (!rule)
		return -EINVAL;

533
	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
534 535 536 537
					  &lastuse);
	if (err)
		goto err_rule_get_stats;

538
	flow_stats_update(&f->stats, bytes, packets, lastuse);
539 540 541 542 543 544 545 546

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_get_stats:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}
547 548 549

int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_block *block,
550
				 struct flow_cls_offload *f)
551 552 553 554 555 556 557 558 559 560 561 562 563
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule_info rulei;
	int err;

	memset(&rulei, 0, sizeof(rulei));
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
	if (err)
		return err;
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER,
					   &rulei.values.elusage);
564

565
	/* keep the reference to the ruleset */
566
	return PTR_ERR_OR_ZERO(ruleset);
567 568 569 570
}

void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_block *block,
571
				   struct flow_cls_offload *f)
572 573 574 575 576 577 578 579 580 581 582 583
{
	struct mlxsw_sp_acl_ruleset *ruleset;

	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
	if (IS_ERR(ruleset))
		return;
	/* put the reference to the ruleset kept in create */
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}