spectrum_flower.c 17.5 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 4 5 6

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
7
#include <net/net_namespace.h>
8 9 10 11
#include <net/flow_dissector.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
12
#include <net/tc_act/tc_vlan.h>
13 14 15 16 17

#include "spectrum.h"
#include "core_acl_flex_keys.h"

static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18
					 struct mlxsw_sp_acl_block *block,
19
					 struct mlxsw_sp_acl_rule_info *rulei,
20
					 struct flow_action *flow_action,
21
					 struct netlink_ext_ack *extack)
22
{
23
	const struct flow_action_entry *act;
24
	int mirror_act_count = 0;
25
	int err, i;
26

27
	if (!flow_action_has_entries(flow_action))
28 29
		return 0;

30
	/* Count action is inserted first */
31
	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
32 33 34
	if (err)
		return err;

35 36 37
	flow_action_for_each(i, act, flow_action) {
		switch (act->id) {
		case FLOW_ACTION_ACCEPT:
38
			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 40
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
41
				return err;
42
			}
43
			break;
44 45 46 47 48 49 50 51
		case FLOW_ACTION_DROP: {
			bool ingress;

			if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
				NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
				return -EOPNOTSUPP;
			}
			ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
52 53
			err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
							  act->cookie, extack);
54 55
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
56
				return err;
57
			}
58 59 60 61 62 63 64 65 66 67

			/* Forbid block with this rulei to be bound
			 * to ingress/egress in future. Ingress rule is
			 * a blocker for egress and vice versa.
			 */
			if (ingress)
				rulei->egress_bind_blocker = 1;
			else
				rulei->ingress_bind_blocker = 1;
			}
68 69
			break;
		case FLOW_ACTION_TRAP:
70
			err = mlxsw_sp_acl_rulei_act_trap(rulei);
71 72
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
73
				return err;
74
			}
75 76 77
			break;
		case FLOW_ACTION_GOTO: {
			u32 chain_index = act->chain_index;
78 79 80
			struct mlxsw_sp_acl_ruleset *ruleset;
			u16 group_id;

81
			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
82 83 84 85 86 87
							      chain_index,
							      MLXSW_SP_ACL_PROFILE_FLOWER);
			if (IS_ERR(ruleset))
				return PTR_ERR(ruleset);

			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
88
			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
89 90
			if (err) {
				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
91
				return err;
92
			}
93 94 95
			}
			break;
		case FLOW_ACTION_REDIRECT: {
96
			struct net_device *out_dev;
97 98
			struct mlxsw_sp_fid *fid;
			u16 fid_index;
99

100 101 102 103 104
			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
				return -EOPNOTSUPP;
			}

105 106 107 108 109
			/* Forbid block with this rulei to be bound
			 * to egress in future.
			 */
			rulei->egress_bind_blocker = 1;

110 111
			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
			fid_index = mlxsw_sp_fid_index(fid);
112
			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
113
							     fid_index, extack);
114 115 116
			if (err)
				return err;

117
			out_dev = act->dev;
118
			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
119
							 out_dev, extack);
120 121
			if (err)
				return err;
122 123 124 125
			}
			break;
		case FLOW_ACTION_MIRRED: {
			struct net_device *out_dev = act->dev;
126

127 128 129 130 131
			if (mirror_act_count++) {
				NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
				return -EOPNOTSUPP;
			}

132
			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
133 134
							    block, out_dev,
							    extack);
135 136
			if (err)
				return err;
137 138
			}
			break;
139
		case FLOW_ACTION_VLAN_MANGLE: {
140 141 142
			u16 proto = be16_to_cpu(act->vlan.proto);
			u8 prio = act->vlan.prio;
			u16 vid = act->vlan.vid;
143 144

			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
145
							   act->id, vid,
146
							   proto, prio, extack);
147 148
			}
		default:
149
			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
150 151 152 153 154 155 156
			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
			return -EOPNOTSUPP;
		}
	}
	return 0;
}

157
static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
158
				      struct flow_cls_offload *f,
159 160
				      struct mlxsw_sp_acl_block *block)
{
161
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct net_device *ingress_dev;
	struct flow_match_meta match;

	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
		return 0;

	flow_rule_match_meta(rule, &match);
	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
		return -EINVAL;
	}

	ingress_dev = __dev_get_by_index(block->net,
					 match.key->ingress_ifindex);
	if (!ingress_dev) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
		return -EINVAL;
	}

	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
		return -EINVAL;
	}

	mlxsw_sp_port = netdev_priv(ingress_dev);
	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
		return -EINVAL;
	}

	mlxsw_sp_acl_rulei_keymask_u32(rulei,
				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
				       mlxsw_sp_port->local_port,
				       0xFFFFFFFF);
	return 0;
}

200
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
201
				       struct flow_cls_offload *f)
202
{
203 204 205
	struct flow_match_ipv4_addrs match;

	flow_rule_match_ipv4_addrs(f->rule, &match);
206

207
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
208 209
				       (char *) &match.key->src,
				       (char *) &match.mask->src, 4);
210
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
211 212
				       (char *) &match.key->dst,
				       (char *) &match.mask->dst, 4);
213 214 215
}

static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
216
				       struct flow_cls_offload *f)
217
{
218 219 220
	struct flow_match_ipv6_addrs match;

	flow_rule_match_ipv6_addrs(f->rule, &match);
221 222

	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
223 224
				       &match.key->src.s6_addr[0x0],
				       &match.mask->src.s6_addr[0x0], 4);
225
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
226 227
				       &match.key->src.s6_addr[0x4],
				       &match.mask->src.s6_addr[0x4], 4);
228
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
229 230
				       &match.key->src.s6_addr[0x8],
				       &match.mask->src.s6_addr[0x8], 4);
231
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
232 233
				       &match.key->src.s6_addr[0xC],
				       &match.mask->src.s6_addr[0xC], 4);
234
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
235 236
				       &match.key->dst.s6_addr[0x0],
				       &match.mask->dst.s6_addr[0x0], 4);
237
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
238 239
				       &match.key->dst.s6_addr[0x4],
				       &match.mask->dst.s6_addr[0x4], 4);
240
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
241 242
				       &match.key->dst.s6_addr[0x8],
				       &match.mask->dst.s6_addr[0x8], 4);
243
	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
244 245
				       &match.key->dst.s6_addr[0xC],
				       &match.mask->dst.s6_addr[0xC], 4);
246 247 248 249
}

static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_acl_rule_info *rulei,
250
				       struct flow_cls_offload *f,
251 252
				       u8 ip_proto)
{
253
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
254
	struct flow_match_ports match;
255

256
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
257 258 259
		return 0;

	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
260
		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
261 262 263 264
		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
		return -EINVAL;
	}

265
	flow_rule_match_ports(rule, &match);
266
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
267 268
				       ntohs(match.key->dst),
				       ntohs(match.mask->dst));
269
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
270 271
				       ntohs(match.key->src),
				       ntohs(match.mask->src));
272 273 274
	return 0;
}

275 276
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_rule_info *rulei,
277
				     struct flow_cls_offload *f,
278 279
				     u8 ip_proto)
{
280
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
281
	struct flow_match_tcp match;
282

283
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
284 285 286
		return 0;

	if (ip_proto != IPPROTO_TCP) {
287
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
288 289 290 291
		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
		return -EINVAL;
	}

292 293
	flow_rule_match_tcp(rule, &match);

294 295 296 297 298 299
	if (match.mask->flags & htons(0x0E00)) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
		dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
		return -EINVAL;
	}

300
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
301 302
				       ntohs(match.key->flags),
				       ntohs(match.mask->flags));
303 304 305
	return 0;
}

306 307
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_acl_rule_info *rulei,
308
				    struct flow_cls_offload *f,
309 310
				    u16 n_proto)
{
311
	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
312
	struct flow_match_ip match;
313

314
	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
315 316 317
		return 0;

	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
318
		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
319 320 321 322
		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
		return -EINVAL;
	}

323 324
	flow_rule_match_ip(rule, &match);

325
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
326
				       match.key->ttl, match.mask->ttl);
327 328

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
329 330
				       match.key->tos & 0x3,
				       match.mask->tos & 0x3);
331 332

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
333 334
				       match.key->tos >> 2,
				       match.mask->tos >> 2);
335

336 337 338
	return 0;
}

339
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
340
				 struct mlxsw_sp_acl_block *block,
341
				 struct mlxsw_sp_acl_rule_info *rulei,
342
				 struct flow_cls_offload *f)
343
{
344
	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
345
	struct flow_dissector *dissector = rule->match.dissector;
346 347
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
348 349 350 351
	u16 addr_type = 0;
	u8 ip_proto = 0;
	int err;

352
	if (dissector->used_keys &
353 354
	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
355 356 357 358
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
359
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
360
	      BIT(FLOW_DISSECTOR_KEY_TCP) |
361
	      BIT(FLOW_DISSECTOR_KEY_IP) |
362
	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
363
		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
364
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
365 366 367
		return -EOPNOTSUPP;
	}

368
	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
369

370 371 372 373
	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
	if (err)
		return err;

374 375 376 377 378
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_match_control match;

		flow_rule_match_control(rule, &match);
		addr_type = match.key->addr_type;
379 380
	}

381 382 383 384 385 386
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_match_basic match;

		flow_rule_match_basic(rule, &match);
		n_proto_key = ntohs(match.key->n_proto);
		n_proto_mask = ntohs(match.mask->n_proto);
387 388 389 390 391

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
392 393
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_ETHERTYPE,
394 395
					       n_proto_key, n_proto_mask);

396
		ip_proto = match.key->ip_proto;
397 398
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_IP_PROTO,
399 400
					       match.key->ip_proto,
					       match.mask->ip_proto);
401 402
	}

403 404
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_match_eth_addrs match;
405

406
		flow_rule_match_eth_addrs(rule, &match);
407
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
408
					       MLXSW_AFK_ELEMENT_DMAC_32_47,
409 410
					       match.key->dst,
					       match.mask->dst, 2);
411 412
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_DMAC_0_31,
413 414
					       match.key->dst + 2,
					       match.mask->dst + 2, 4);
415 416
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_SMAC_32_47,
417 418
					       match.key->src,
					       match.mask->src, 2);
419
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
420
					       MLXSW_AFK_ELEMENT_SMAC_0_31,
421 422
					       match.key->src + 2,
					       match.mask->src + 2, 4);
423 424
	}

425 426
	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_match_vlan match;
427

428
		flow_rule_match_vlan(rule, &match);
429 430 431 432
		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
			return -EOPNOTSUPP;
		}
433 434 435 436 437 438

		/* Forbid block with this rulei to be bound
		 * to egress in future.
		 */
		rulei->egress_bind_blocker = 1;

439
		if (match.mask->vlan_id != 0)
440 441
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_VID,
442 443 444
						       match.key->vlan_id,
						       match.mask->vlan_id);
		if (match.mask->vlan_priority != 0)
445 446
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_PCP,
447 448
						       match.key->vlan_priority,
						       match.mask->vlan_priority);
449 450
	}

451 452 453 454 455 456 457
	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
		mlxsw_sp_flower_parse_ipv4(rulei, f);

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
		mlxsw_sp_flower_parse_ipv6(rulei, f);

	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
458 459 460
	if (err)
		return err;
	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
461 462 463
	if (err)
		return err;

464 465 466 467
	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
	if (err)
		return err;

468 469
	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
					     &f->rule->action,
470
					     f->common.extack);
471 472
}

473 474
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_acl_block *block,
475
			    struct flow_cls_offload *f)
476 477 478 479 480 481
{
	struct mlxsw_sp_acl_rule_info *rulei;
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	int err;

482
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
483
					   f->common.chain_index,
484
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
485 486 487
	if (IS_ERR(ruleset))
		return PTR_ERR(ruleset);

488
	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
489
					f->common.extack);
490 491 492 493 494 495
	if (IS_ERR(rule)) {
		err = PTR_ERR(rule);
		goto err_rule_create;
	}

	rulei = mlxsw_sp_acl_rule_rulei(rule);
496
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
	if (err)
		goto err_flower_parse;

	err = mlxsw_sp_acl_rulei_commit(rulei);
	if (err)
		goto err_rulei_commit;

	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
	if (err)
		goto err_rule_add;

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_add:
err_rulei_commit:
err_flower_parse:
	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
err_rule_create:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}

520 521
void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_block *block,
522
			     struct flow_cls_offload *f)
523 524 525 526
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;

527 528
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
529
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
530
	if (IS_ERR(ruleset))
531 532 533
		return;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
534
	if (rule) {
535 536 537 538 539 540
		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
	}

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
541

542 543
int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
			  struct mlxsw_sp_acl_block *block,
544
			  struct flow_cls_offload *f)
545 546 547 548 549 550 551 552
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule *rule;
	u64 packets;
	u64 lastuse;
	u64 bytes;
	int err;

553 554
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
555
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
556 557 558 559 560 561 562
	if (WARN_ON(IS_ERR(ruleset)))
		return -EINVAL;

	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
	if (!rule)
		return -EINVAL;

563
	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
564 565 566 567
					  &lastuse);
	if (err)
		goto err_rule_get_stats;

568
	flow_stats_update(&f->stats, bytes, packets, lastuse);
569 570 571 572 573 574 575 576

	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return 0;

err_rule_get_stats:
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	return err;
}
577 578 579

int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_block *block,
580
				 struct flow_cls_offload *f)
581 582 583 584 585 586 587 588 589 590 591 592 593
{
	struct mlxsw_sp_acl_ruleset *ruleset;
	struct mlxsw_sp_acl_rule_info rulei;
	int err;

	memset(&rulei, 0, sizeof(rulei));
	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
	if (err)
		return err;
	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER,
					   &rulei.values.elusage);
594

595
	/* keep the reference to the ruleset */
596
	return PTR_ERR_OR_ZERO(ruleset);
597 598 599 600
}

void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_block *block,
601
				   struct flow_cls_offload *f)
602 603 604 605 606 607 608 609 610 611 612 613
{
	struct mlxsw_sp_acl_ruleset *ruleset;

	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
					   f->common.chain_index,
					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
	if (IS_ERR(ruleset))
		return;
	/* put the reference to the ruleset kept in create */
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}