spectrum_flower.c 17.5 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 4 5 6

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
7
#include <net/net_namespace.h>
8 9 10 11
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
12
#include <net/tc_act/tc_vlan.h>
13 14 15 16 17

#include "spectrum.h"
#include "core_acl_flex_keys.h"

static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18
					 struct mlxsw_sp_acl_block *block,
19
					 struct mlxsw_sp_acl_rule_info *rulei,
20
					 struct flow_action *flow_action,
21
					 struct netlink_ext_ack *extack)
22
{
23
	const struct flow_action_entry *act;
24
	int mirror_act_count = 0;
25
	int err, i;
26

27
	if (!flow_action_has_entries(flow_action))
28
		return 0;
29 30
	if (!flow_action_mixed_hw_stats_types_check(flow_action, extack))
		return -EOPNOTSUPP;
31

32
	/* Count action is inserted first */
33
	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
34 35 36
	if (err)
		return err;

37 38 39
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
40
			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
41 42
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
43
				return err;
44
			}
45
			break;
46 47 48 49 50 51 52 53
		case FLOW_ACTION_DROP: {
			bool ingress;

			if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
				NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
				return -EOPNOTSUPP;
			}
			ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
54 55
			err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
							  act->cookie, extack);
56 57
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
58
				return err;
59
			}
60 61 62 63 64 65 66 67 68 69

			/* Forbid block with this rulei to be bound
			 * to ingress/egress in future. Ingress rule is
			 * a blocker for egress and vice versa.
			 */
			if (ingress)
				rulei->egress_bind_blocker = 1;
			else
				rulei->ingress_bind_blocker = 1;
			}
70 71
			break;
		case FLOW_ACTION_TRAP:
72
			err = mlxsw_sp_acl_rulei_act_trap(rulei);
73 74
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
75
				return err;
76
			}
77 78 79
			break;
		case FLOW_ACTION_GOTO: {
			u32 chain_index = act->chain_index;
80 81 82
			struct mlxsw_sp_acl_ruleset *ruleset;
			u16 group_id;

83
			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
84 85 86 87 88 89
							      chain_index,
							      MLXSW_SP_ACL_PROFILE_FLOWER);
			if (IS_ERR(ruleset))
				return PTR_ERR(ruleset);

			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
90
			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
91 92
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
93
				return err;
94
			}
95 96 97
			}
			break;
		case FLOW_ACTION_REDIRECT: {
98
			struct net_device *out_dev;
99 100
			struct mlxsw_sp_fid *fid;
			u16 fid_index;
101

102 103 104 105 106
			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
				return -EOPNOTSUPP;
			}

107 108 109 110 111
			/* Forbid block with this rulei to be bound
			 * to egress in future.
			 */
			rulei->egress_bind_blocker = 1;

112 113
			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
			fid_index = mlxsw_sp_fid_index(fid);
114
			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
115
							     fid_index, extack);
116 117 118
			if (err)
				return err;

119
			out_dev = act->dev;
120
			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
121
							 out_dev, extack);
122 123
			if (err)
				return err;
124 125 126 127
			}
			break;
		case FLOW_ACTION_MIRRED: {
			struct net_device *out_dev = act->dev;
128

129 130 131 132 133
			if (mirror_act_count++) {
				NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
				return -EOPNOTSUPP;
			}

134
			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
135 136
							    block, out_dev,
							    extack);
137 138
			if (err)
				return err;
139 140
			}
			break;
141
		case FLOW_ACTION_VLAN_MANGLE: {
142 143 144
			u16 proto = be16_to_cpu(act->vlan.proto);
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
145 146

			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
147
							   act->id, vid,
148
							   proto, prio, extack);
149 150
			}
		default:
151
			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
152 153 154 155 156 157 158
			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
			return -EOPNOTSUPP;
		}
	}
	return 0;
}

159
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
160
				      struct flow_cls_offload *f,
161 162
				      struct mlxsw_sp_acl_block *block)
{
163
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct net_device *ingress_dev;
	struct flow_match_meta match;

	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
		return 0;

	flow_rule_match_meta(rule, &match);
	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
		return -EINVAL;
	}

	ingress_dev = __dev_get_by_index(block->net,
					 match.key->ingress_ifindex);
	if (!ingress_dev) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
		return -EINVAL;
	}

	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
		return -EINVAL;
	}

	mlxsw_sp_port = netdev_priv(ingress_dev);
	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
		return -EINVAL;
	}

	mlxsw_sp_acl_rulei_keymask_u32(rulei,
				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
				       mlxsw_sp_port->local_port,
				       0xFFFFFFFF);
	return 0;
}

202
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
203
				       struct flow_cls_offload *f)
204
{
205 206 207
	struct flow_match_ipv4_addrs match;

	flow_rule_match_ipv4_addrs(f->rule, &match);
208

209
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
210 211
				       (char *) &match.key->src,
				       (char *) &match.mask->src, 4);
212
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
213 214
				       (char *) &match.key->dst,
				       (char *) &match.mask->dst, 4);
215 216 217
}

static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
218
				       struct flow_cls_offload *f)
219
{
220 221 222
	struct flow_match_ipv6_addrs match;

	flow_rule_match_ipv6_addrs(f->rule, &match);
223 224

	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
225 226
				       &match.key->src.s6_addr[0x0],
				       &match.mask->src.s6_addr[0x0], 4);
227
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
228 229
				       &match.key->src.s6_addr[0x4],
				       &match.mask->src.s6_addr[0x4], 4);
230
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
231 232
				       &match.key->src.s6_addr[0x8],
				       &match.mask->src.s6_addr[0x8], 4);
233
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
234 235
				       &match.key->src.s6_addr[0xC],
				       &match.mask->src.s6_addr[0xC], 4);
236
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
237 238
				       &match.key->dst.s6_addr[0x0],
				       &match.mask->dst.s6_addr[0x0], 4);
239
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
240 241
				       &match.key->dst.s6_addr[0x4],
				       &match.mask->dst.s6_addr[0x4], 4);
242
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
243 244
				       &match.key->dst.s6_addr[0x8],
				       &match.mask->dst.s6_addr[0x8], 4);
245
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
246 247
				       &match.key->dst.s6_addr[0xC],
				       &match.mask->dst.s6_addr[0xC], 4);
248 249 250 251
}

static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_acl_rule_info *rulei,
252
				       struct flow_cls_offload *f,
253 254
				       u8 ip_proto)
{
255
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
256
	struct flow_match_ports match;
257

258
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
259 260 261
		return 0;

	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
262
		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
263 264 265 266
		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
		return -EINVAL;
	}

267
	flow_rule_match_ports(rule, &match);
268
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
269 270
				       ntohs(match.key->dst),
				       ntohs(match.mask->dst));
271
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
272 273
				       ntohs(match.key->src),
				       ntohs(match.mask->src));
274 275 276
	return 0;
}

277 278
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_rule_info *rulei,
279
				     struct flow_cls_offload *f,
280 281
				     u8 ip_proto)
{
282
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
283
	struct flow_match_tcp match;
284

285
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
286 287 288
		return 0;

	if (ip_proto != IPPROTO_TCP) {
289
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
290 291 292 293
		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
		return -EINVAL;
	}

294 295
	flow_rule_match_tcp(rule, &match);

296 297 298 299 300 301
	if (match.mask->flags & htons(0x0E00)) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
		dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
		return -EINVAL;
	}

302
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
303 304
				       ntohs(match.key->flags),
				       ntohs(match.mask->flags));
305 306 307
	return 0;
}

308 309
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_acl_rule_info *rulei,
310
				    struct flow_cls_offload *f,
311 312
				    u16 n_proto)
{
313
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
314
	struct flow_match_ip match;
315

316
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
317 318 319
		return 0;

	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
320
		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
321 322 323 324
		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
		return -EINVAL;
	}

325 326
	flow_rule_match_ip(rule, &match);

327
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
328
				       match.key->ttl, match.mask->ttl);
329 330

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
331 332
				       match.key->tos & 0x3,
				       match.mask->tos & 0x3);
333 334

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
335 336
				       match.key->tos >> 2,
				       match.mask->tos >> 2);
337

338 339 340
	return 0;
}

341
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
342
				 struct mlxsw_sp_acl_block *block,
343
				 struct mlxsw_sp_acl_rule_info *rulei,
344
				 struct flow_cls_offload *f)
345
{
346
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
347
	struct flow_dissector *dissector = rule->match.dissector;
348 349
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
350 351 352 353
	u16 addr_type = 0;
	u8 ip_proto = 0;
	int err;

354
	if (dissector->used_keys &
355 356
	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
357 358 359 360
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
361
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
362
	      BIT(FLOW_DISSECTOR_KEY_TCP) |
363
	      BIT(FLOW_DISSECTOR_KEY_IP) |
364
	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
365
		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
366
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
367 368 369
		return -EOPNOTSUPP;
	}

370
	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
371

372 373 374 375
	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
	if (err)
		return err;

376 377 378 379 380
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;

		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
381 382
	}

383 384 385 386 387 388
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		n_proto_key = ntohs(match.key->n_proto);
		n_proto_mask = ntohs(match.mask->n_proto);
389 390 391 392 393

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
394 395
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_ETHERTYPE,
396 397
					       n_proto_key, n_proto_mask);

398
		ip_proto = match.key->ip_proto;
399 400
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_IP_PROTO,
401 402
					       match.key->ip_proto,
					       match.mask->ip_proto);
403 404
	}

405 406
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_match_eth_addrs match;
407

408
		flow_rule_match_eth_addrs(rule, &match);
409
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
410
					       MLXSW_AFK_ELEMENT_DMAC_32_47,
411 412
					       match.key->dst,
					       match.mask->dst, 2);
413 414
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_DMAC_0_31,
415 416
					       match.key->dst + 2,
					       match.mask->dst + 2, 4);
417 418
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_SMAC_32_47,
419 420
					       match.key->src,
					       match.mask->src, 2);
421
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
422
					       MLXSW_AFK_ELEMENT_SMAC_0_31,
423 424
					       match.key->src + 2,
					       match.mask->src + 2, 4);
425 426
	}

427 428
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
429

430
		flow_rule_match_vlan(rule, &match);
431 432 433 434
		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
			return -EOPNOTSUPP;
		}
435 436 437 438 439 440

		/* Forbid block with this rulei to be bound
		 * to egress in future.
		 */
		rulei->egress_bind_blocker = 1;

441
		if (match.mask->vlan_id != 0)
442 443
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_VID,
444 445 446
						       match.key->vlan_id,
						       match.mask->vlan_id);
		if (match.mask->vlan_priority != 0)
447 448
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_PCP,
449 450
						       match.key->vlan_priority,
						       match.mask->vlan_priority);
451 452
	}

453 454 455 456 457 458 459
	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
		mlxsw_sp_flower_parse_ipv4(rulei, f);

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
		mlxsw_sp_flower_parse_ipv6(rulei, f);

	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
460 461 462
	if (err)
		return err;
	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
463 464 465
	if (err)
		return err;

466 467 468 469
	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
	if (err)
		return err;

470 471
	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
					     &f->rule->action,
472
					     f->common.extack);
473 474
}

475 476
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_acl_block *block,
477
			    struct flow_cls_offload *f)
478 479 480 481 482 483
{
	struct mlxsw_sp_acl_rule_info *rulei;
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	int err;

484
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
485
					   f->common.chain_index,
486
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
487 488 489
	if (IS_ERR(ruleset))
		return PTR_ERR(ruleset);

490
	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
491
					f->common.extack);
492 493 494 495 496 497
	if (IS_ERR(rule)) {
		err = PTR_ERR(rule);
		goto err_rule_create;
	}

	rulei = mlxsw_sp_acl_rule_rulei(rule);
498
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	if (err)
		goto err_flower_parse;

	err = mlxsw_sp_acl_rulei_commit(rulei);
	if (err)
		goto err_rulei_commit;

	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
	if (err)
		goto err_rule_add;

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_add:
err_rulei_commit:
err_flower_parse:
	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
err_rule_create:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}

522 523
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_block *block,
524
			     struct flow_cls_offload *f)
525 526 527 528
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;

529 530
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
531
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
532
	if (IS_ERR(ruleset))
533 534 535
		return;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
536
	if (rule) {
537 538 539 540 541 542
		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
	}

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
543

544 545
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
			  struct mlxsw_sp_acl_block *block,
546
			  struct flow_cls_offload *f)
547 548 549 550 551 552 553 554
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	u64 packets;
	u64 lastuse;
	u64 bytes;
	int err;

555 556
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
557
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
558 559 560 561 562 563 564
	if (WARN_ON(IS_ERR(ruleset)))
		return -EINVAL;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
	if (!rule)
		return -EINVAL;

565
	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
566 567 568 569
					  &lastuse);
	if (err)
		goto err_rule_get_stats;

570
	flow_stats_update(&f->stats, bytes, packets, lastuse);
571 572 573 574 575 576 577 578

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_get_stats:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}
579 580 581

int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_block *block,
582
				 struct flow_cls_offload *f)
583 584 585 586 587 588 589 590 591 592 593 594 595
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule_info rulei;
	int err;

	memset(&rulei, 0, sizeof(rulei));
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
	if (err)
		return err;
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER,
					   &rulei.values.elusage);
596

597
	/* keep the reference to the ruleset */
598
	return PTR_ERR_OR_ZERO(ruleset);
599 600 601 602
}

void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_block *block,
603
				   struct flow_cls_offload *f)
604 605 606 607 608 609 610 611 612 613 614 615
{
	struct mlxsw_sp_acl_ruleset *ruleset;

	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
	if (IS_ERR(ruleset))
		return;
	/* put the reference to the ruleset kept in create */
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}