spectrum_acl_tcam.c 45.1 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 4 5 6 7 8 9 10

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
11
#include <trace/events/mlxsw.h>
12 13 14 15 16

#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
17
#include "spectrum_acl_tcam.h"
18 19
#include "core_acl_flex_keys.h"

20 21 22 23 24 25
size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	return ops->priv_size;
}
26

27
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
28
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
29

30 31
int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
			   struct mlxsw_sp_acl_tcam *tcam)
32
{
33
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
34 35 36 37 38 39
	u64 max_tcam_regions;
	u64 max_regions;
	u64 max_groups;
	size_t alloc_size;
	int err;

40 41 42 43
	tcam->vregion_rehash_intrvl =
			MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
	INIT_LIST_HEAD(&tcam->vregion_list);

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
					      ACL_MAX_TCAM_REGIONS);
	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);

	/* Use 1:1 mapping between ACL region and TCAM region */
	if (max_tcam_regions < max_regions)
		max_regions = max_tcam_regions;

	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
	if (!tcam->used_regions)
		return -ENOMEM;
	tcam->max_regions = max_regions;

	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
	if (!tcam->used_groups) {
		err = -ENOMEM;
		goto err_alloc_used_groups;
	}
	tcam->max_groups = max_groups;
	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
						 ACL_MAX_GROUP_SIZE);
68 69 70 71 72

	err = ops->init(mlxsw_sp, tcam->priv, tcam);
	if (err)
		goto err_tcam_init;

73 74
	return 0;

75 76
err_tcam_init:
	kfree(tcam->used_groups);
77 78 79 80 81
err_alloc_used_groups:
	kfree(tcam->used_regions);
	return err;
}

82 83
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_acl_tcam *tcam)
84
{
85
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
86

87
	ops->fini(mlxsw_sp, tcam->priv);
88 89 90 91
	kfree(tcam->used_groups);
	kfree(tcam->used_regions);
}

92 93 94 95 96 97 98 99 100 101 102 103 104 105
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_rule_info *rulei,
				   u32 *priority, bool fillup_priority)
{
	u64 max_priority;

	if (!fillup_priority) {
		*priority = 0;
		return 0;
	}

	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
		return -EIO;

106 107 108
	/* Priority range is 1..cap_kvd_size-1. */
	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
	if (rulei->priority >= max_priority)
109 110 111 112 113 114 115
		return -EINVAL;

	/* Unlike in TC, in HW, higher number means higher priority. */
	*priority = max_priority - rulei->priority;
	return 0;
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
					   u16 *p_id)
{
	u16 id;

	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
	if (id < tcam->max_regions) {
		__set_bit(id, tcam->used_regions);
		*p_id = id;
		return 0;
	}
	return -ENOBUFS;
}

static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
					    u16 id)
{
	__clear_bit(id, tcam->used_regions);
}

static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
					  u16 *p_id)
{
	u16 id;

	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
	if (id < tcam->max_groups) {
		__set_bit(id, tcam->used_groups);
		*p_id = id;
		return 0;
	}
	return -ENOBUFS;
}

static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
					   u16 id)
{
	__clear_bit(id, tcam->used_groups);
}

struct mlxsw_sp_acl_tcam_pattern {
	const enum mlxsw_afk_element *elements;
	unsigned int elements_count;
};

struct mlxsw_sp_acl_tcam_group {
	struct mlxsw_sp_acl_tcam *tcam;
	u16 id;
164
	struct list_head region_list;
165
	unsigned int region_count;
166 167 168 169 170
};

struct mlxsw_sp_acl_tcam_vgroup {
	struct mlxsw_sp_acl_tcam_group group;
	struct list_head vregion_list;
171
	struct rhashtable vchunk_ht;
172 173
	const struct mlxsw_sp_acl_tcam_pattern *patterns;
	unsigned int patterns_count;
174 175
	bool tmplt_elusage_set;
	struct mlxsw_afk_element_usage tmplt_elusage;
176 177
};

178 179
struct mlxsw_sp_acl_tcam_vregion {
	struct mlxsw_sp_acl_tcam_region *region;
180
	struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
181
	struct list_head list; /* Member of a TCAM group */
182
	struct list_head tlist; /* Member of a TCAM */
183
	struct list_head vchunk_list; /* List of vchunks under this vregion */
184
	struct mlxsw_afk_key_info *key_info;
185 186 187 188
	struct mlxsw_sp_acl_tcam *tcam;
	struct delayed_work rehash_dw;
	struct mlxsw_sp *mlxsw_sp;
	bool failed_rollback; /* Indicates failed rollback during migration */
189 190
};

191 192
struct mlxsw_sp_acl_tcam_vchunk;

193
struct mlxsw_sp_acl_tcam_chunk {
194
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
195
	struct mlxsw_sp_acl_tcam_region *region;
196 197 198 199 200 201
	unsigned long priv[0];
	/* priv has to be always the last item */
};

struct mlxsw_sp_acl_tcam_vchunk {
	struct mlxsw_sp_acl_tcam_chunk *chunk;
202
	struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
203
	struct list_head list; /* Member of a TCAM vregion */
204
	struct rhash_head ht_node; /* Member of a chunk HT */
205
	struct list_head ventry_list;
206
	unsigned int priority; /* Priority within the vregion and group */
207
	struct mlxsw_sp_acl_tcam_vgroup *vgroup;
208
	struct mlxsw_sp_acl_tcam_vregion *vregion;
209 210 211 212
	unsigned int ref_count;
};

struct mlxsw_sp_acl_tcam_entry {
213
	struct mlxsw_sp_acl_tcam_ventry *ventry;
214
	struct mlxsw_sp_acl_tcam_chunk *chunk;
215 216
	unsigned long priv[0];
	/* priv has to be always the last item */
217 218
};

219 220
struct mlxsw_sp_acl_tcam_ventry {
	struct mlxsw_sp_acl_tcam_entry *entry;
221
	struct list_head list; /* Member of a TCAM vchunk */
222
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
223
	struct mlxsw_sp_acl_rule_info *rulei;
224 225
};

226
static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
227
	.key_len = sizeof(unsigned int),
228 229
	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
230 231 232 233 234 235
	.automatic_shrinking = true,
};

static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
					  struct mlxsw_sp_acl_tcam_group *group)
{
236
	struct mlxsw_sp_acl_tcam_region *region;
237 238 239 240
	char pagt_pl[MLXSW_REG_PAGT_LEN];
	int acl_index = 0;

	mlxsw_reg_pagt_pack(pagt_pl, group->id);
241 242 243 244 245 246 247
	list_for_each_entry(region, &group->region_list, list) {
		bool multi = false;

		/* Check if the next entry in the list has the same vregion. */
		if (region->list.next != &group->region_list &&
		    list_next_entry(region, list)->vregion == region->vregion)
			multi = true;
248
		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
249
					   region->id, multi);
250
	}
251 252 253 254 255
	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
}

static int
256 257
mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
			    struct mlxsw_sp_acl_tcam_group *group)
258 259 260 261
{
	int err;

	group->tcam = tcam;
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	INIT_LIST_HEAD(&group->region_list);

	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
	if (err)
		return err;

	return 0;
}

static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
{
	struct mlxsw_sp_acl_tcam *tcam = group->tcam;

	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
	WARN_ON(!list_empty(&group->region_list));
}

static int
mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_tcam *tcam,
			     struct mlxsw_sp_acl_tcam_vgroup *vgroup,
			     const struct mlxsw_sp_acl_tcam_pattern *patterns,
			     unsigned int patterns_count,
			     struct mlxsw_afk_element_usage *tmplt_elusage)
{
	int err;

	vgroup->patterns = patterns;
	vgroup->patterns_count = patterns_count;
291
	if (tmplt_elusage) {
292 293 294
		vgroup->tmplt_elusage_set = true;
		memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
		       sizeof(vgroup->tmplt_elusage));
295
	}
296 297 298
	INIT_LIST_HEAD(&vgroup->vregion_list);

	err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
299 300 301
	if (err)
		return err;

302
	err = rhashtable_init(&vgroup->vchunk_ht,
303
			      &mlxsw_sp_acl_tcam_vchunk_ht_params);
304 305 306 307 308 309
	if (err)
		goto err_rhashtable_init;

	return 0;

err_rhashtable_init:
310
	mlxsw_sp_acl_tcam_group_del(&vgroup->group);
311 312 313
	return err;
}

314 315
static void
mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
316
{
317 318 319
	rhashtable_destroy(&vgroup->vchunk_ht);
	mlxsw_sp_acl_tcam_group_del(&vgroup->group);
	WARN_ON(!list_empty(&vgroup->vregion_list));
320 321 322 323 324
}

static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_tcam_group *group,
325 326
			     struct mlxsw_sp_port *mlxsw_sp_port,
			     bool ingress)
327 328 329
{
	char ppbt_pl[MLXSW_REG_PPBT_LEN];

330 331 332
	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
					       MLXSW_REG_PXBT_E_EACL,
			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
333 334 335 336 337 338
			    group->id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}

static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
339
			       struct mlxsw_sp_acl_tcam_group *group,
340 341
			       struct mlxsw_sp_port *mlxsw_sp_port,
			       bool ingress)
342 343 344
{
	char ppbt_pl[MLXSW_REG_PPBT_LEN];

345 346 347
	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
					       MLXSW_REG_PXBT_E_EACL,
			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
348 349 350 351
			    group->id);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}

352 353 354 355 356 357
static u16
mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
{
	return group->id;
}

358
static unsigned int
359
mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
360
{
361
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
362

363
	if (list_empty(&vregion->vchunk_list))
364
		return 0;
365 366 367 368
	/* As a priority of a vregion, return priority of the first vchunk */
	vchunk = list_first_entry(&vregion->vchunk_list,
				  typeof(*vchunk), list);
	return vchunk->priority;
369 370 371
}

static unsigned int
372
mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
373
{
374
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
375

376
	if (list_empty(&vregion->vchunk_list))
377
		return 0;
378 379 380
	vchunk = list_last_entry(&vregion->vchunk_list,
				 typeof(*vchunk), list);
	return vchunk->priority;
381 382
}

383 384
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
385 386 387
				      struct mlxsw_sp_acl_tcam_group *group,
				      struct mlxsw_sp_acl_tcam_region *region,
				      struct mlxsw_sp_acl_tcam_region *next_region)
388
{
389 390
	struct mlxsw_sp_acl_tcam_region *region2;
	struct list_head *pos;
391 392 393 394 395
	int err;

	if (group->region_count == group->tcam->max_group_size)
		return -ENOBUFS;

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	if (next_region) {
		/* If the next region is defined, place the new one
		 * before it. The next one is a sibling.
		 */
		pos = &next_region->list;
	} else {
		/* Position the region inside the list according to priority */
		list_for_each(pos, &group->region_list) {
			region2 = list_entry(pos, typeof(*region2), list);
			if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
			    mlxsw_sp_acl_tcam_vregion_prio(region->vregion))
				break;
		}
	}
	list_add_tail(&region->list, pos);
	region->group = group;

413 414
	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
	if (err)
415
		goto err_group_update;
416 417

	group->region_count++;
418
	return 0;
419 420 421 422

err_group_update:
	list_del(&region->list);
	return err;
423 424 425
}

static void
426 427
mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
				      struct mlxsw_sp_acl_tcam_region *region)
428
{
429
	struct mlxsw_sp_acl_tcam_group *group = region->group;
430

431
	list_del(&region->list);
432
	group->region_count--;
433
	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
434 435 436
}

static int
437 438 439
mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_acl_tcam_vgroup *vgroup,
					struct mlxsw_sp_acl_tcam_vregion *vregion)
440
{
441 442
	struct mlxsw_sp_acl_tcam_vregion *vregion2;
	struct list_head *pos;
443 444
	int err;

445
	/* Position the vregion inside the list according to priority */
446
	list_for_each(pos, &vgroup->vregion_list) {
447 448
		vregion2 = list_entry(pos, typeof(*vregion2), list);
		if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) >
449
		    mlxsw_sp_acl_tcam_vregion_prio(vregion)) {
450
			break;
451
		}
452 453
	}
	list_add_tail(&vregion->list, pos);
454

455 456
	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
						    vregion->region, NULL);
457
	if (err)
458
		goto err_region_attach;
459 460 461

	return 0;

462 463
err_region_attach:
	list_del(&vregion->list);
464 465 466 467
	return err;
}

static void
468 469
mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_acl_tcam_vregion *vregion)
470
{
471
	list_del(&vregion->list);
472 473 474
	if (vregion->region2)
		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
						      vregion->region2);
475
	mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
476 477
}

478
static struct mlxsw_sp_acl_tcam_vregion *
479 480 481 482
mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
				      unsigned int priority,
				      struct mlxsw_afk_element_usage *elusage,
				      bool *p_need_split)
483
{
484
	struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
485 486 487
	struct list_head *pos;
	bool issubset;

488
	list_for_each(pos, &vgroup->vregion_list) {
489
		vregion = list_entry(pos, typeof(*vregion), list);
490 491

		/* First, check if the requested priority does not rather belong
492
		 * under some of the next vregions.
493
		 */
494
		if (pos->next != &vgroup->vregion_list) { /* not last */
495 496 497 498
			vregion2 = list_entry(pos->next, typeof(*vregion2),
					      list);
			if (priority >=
			    mlxsw_sp_acl_tcam_vregion_prio(vregion2))
499 500 501
				continue;
		}

502 503
		issubset = mlxsw_afk_key_info_subset(vregion->key_info,
						     elusage);
504 505

		/* If requested element usage would not fit and the priority
506 507
		 * is lower than the currently inspected vregion we cannot
		 * use this region, so return NULL to indicate new vregion has
508 509 510
		 * to be created.
		 */
		if (!issubset &&
511
		    priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
512 513 514
			return NULL;

		/* If requested element usage would not fit and the priority
515 516 517
		 * is higher than the currently inspected vregion we cannot
		 * use this vregion. There is still some hope that the next
		 * vregion would be the fit. So let it be processed and
518 519 520
		 * eventually break at the check right above this.
		 */
		if (!issubset &&
521
		    priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
522 523
			continue;

524
		/* Indicate if the vregion needs to be split in order to add
525
		 * the requested priority. Split is needed when requested
526
		 * element usage won't fit into the found vregion.
527 528
		 */
		*p_need_split = !issubset;
529
		return vregion;
530
	}
531
	return NULL; /* New vregion has to be created. */
532 533 534
}

static void
535 536 537
mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
				      struct mlxsw_afk_element_usage *elusage,
				      struct mlxsw_afk_element_usage *out)
538 539 540 541
{
	const struct mlxsw_sp_acl_tcam_pattern *pattern;
	int i;

542 543 544
	/* In case the template is set, we don't have to look up the pattern
	 * and just use the template.
	 */
545 546
	if (vgroup->tmplt_elusage_set) {
		memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
547 548 549 550
		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
		return;
	}

551 552
	for (i = 0; i < vgroup->patterns_count; i++) {
		pattern = &vgroup->patterns[i];
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
		mlxsw_afk_element_usage_fill(out, pattern->elements,
					     pattern->elements_count);
		if (mlxsw_afk_element_usage_subset(elusage, out))
			return;
	}
	memcpy(out, elusage, sizeof(*out));
}

static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_acl_tcam_region *region)
{
	struct mlxsw_afk_key_info *key_info = region->key_info;
	char ptar_pl[MLXSW_REG_PTAR_LEN];
	unsigned int encodings_count;
	int i;
	int err;

	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
572
			    region->key_type,
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
			    region->id, region->tcam_region_info);
	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
	for (i = 0; i < encodings_count; i++) {
		u16 encoding;

		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
	}
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
	if (err)
		return err;
	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
	return 0;
}

static void
mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_acl_tcam_region *region)
{
	char ptar_pl[MLXSW_REG_PTAR_LEN];

595 596
	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
			    region->key_type, 0, region->id,
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
			    region->tcam_region_info);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}

static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_tcam_region *region)
{
	char pacl_pl[MLXSW_REG_PACL_LEN];

	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
			    region->tcam_region_info);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}

static void
mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_region *region)
{
	char pacl_pl[MLXSW_REG_PACL_LEN];

	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
			    region->tcam_region_info);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}

static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_tcam *tcam,
626 627
				struct mlxsw_sp_acl_tcam_vregion *vregion,
				void *hints_priv)
628
{
629
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
630 631 632
	struct mlxsw_sp_acl_tcam_region *region;
	int err;

633
	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
634 635 636
	if (!region)
		return ERR_PTR(-ENOMEM);
	region->mlxsw_sp = mlxsw_sp;
637 638
	region->vregion = vregion;
	region->key_info = vregion->key_info;
639 640 641 642 643

	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
	if (err)
		goto err_region_id_get;

644 645 646 647
	err = ops->region_associate(mlxsw_sp, region);
	if (err)
		goto err_tcam_region_associate;

648
	region->key_type = ops->key_type;
649 650 651 652 653 654 655 656
	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
	if (err)
		goto err_tcam_region_alloc;

	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
	if (err)
		goto err_tcam_region_enable;

657
	err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
658
			       region, hints_priv);
659
	if (err)
660
		goto err_tcam_region_init;
661 662 663

	return region;

664
err_tcam_region_init:
665 666 667 668
	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
669
err_tcam_region_associate:
670 671 672 673 674 675 676 677 678 679
	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
	kfree(region);
	return ERR_PTR(err);
}

static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_region *region)
{
680 681 682
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	ops->region_fini(mlxsw_sp, region->priv);
683 684
	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
685
	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
686
					region->id);
687 688 689
	kfree(region);
}

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
static void
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
{
	unsigned long interval = vregion->tcam->vregion_rehash_intrvl;

	if (!interval)
		return;
	mlxsw_core_schedule_dw(&vregion->rehash_dw,
			       msecs_to_jiffies(interval));
}

static int
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vregion *vregion);

static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
{
	struct mlxsw_sp_acl_tcam_vregion *vregion =
		container_of(work, struct mlxsw_sp_acl_tcam_vregion,
			     rehash_dw.work);

	/* TODO: Take rtnl lock here as the rest of the code counts on it
	 * now. Later, this should be replaced by per-vregion lock.
	 */
	rtnl_lock();
	mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion);
	rtnl_unlock();
	mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
}

720 721 722 723 724
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam *tcam,
				 struct mlxsw_afk_element_usage *elusage)
{
725
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
726 727 728 729 730 731 732
	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
	struct mlxsw_sp_acl_tcam_vregion *vregion;
	int err;

	vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
	if (!vregion)
		return ERR_PTR(-ENOMEM);
733
	INIT_LIST_HEAD(&vregion->vchunk_list);
734 735
	vregion->tcam = tcam;
	vregion->mlxsw_sp = mlxsw_sp;
736 737 738 739 740 741 742 743

	vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
	if (IS_ERR(vregion->key_info)) {
		err = PTR_ERR(vregion->key_info);
		goto err_key_info_get;
	}

	vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
744
							  vregion, NULL);
745 746 747 748 749
	if (IS_ERR(vregion->region)) {
		err = PTR_ERR(vregion->region);
		goto err_region_create;
	}

750 751 752 753 754 755 756 757 758
	list_add_tail(&vregion->tlist, &tcam->vregion_list);

	if (ops->region_rehash_hints_get) {
		/* Create the delayed work for vregion periodic rehash */
		INIT_DELAYED_WORK(&vregion->rehash_dw,
				  mlxsw_sp_acl_tcam_vregion_rehash_work);
		mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
	}

759 760 761 762 763 764 765 766 767 768 769 770 771
	return vregion;

err_region_create:
	mlxsw_afk_key_info_put(vregion->key_info);
err_key_info_get:
	kfree(vregion);
	return ERR_PTR(err);
}

static void
mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_acl_tcam_vregion *vregion)
{
772 773 774 775 776 777 778
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	if (ops->region_rehash_hints_get)
		cancel_delayed_work_sync(&vregion->rehash_dw);
	list_del(&vregion->tlist);
	if (vregion->region2)
		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
779 780 781 782 783
	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
	mlxsw_afk_key_info_put(vregion->key_info);
	kfree(vregion);
}

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
						struct mlxsw_sp_acl_tcam *tcam)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
	u32 vregion_rehash_intrvl;

	if (WARN_ON(!ops->region_rehash_hints_get))
		return 0;
	vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
	return vregion_rehash_intrvl;
}

int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
						struct mlxsw_sp_acl_tcam *tcam,
						u32 val)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
	struct mlxsw_sp_acl_tcam_vregion *vregion;

	if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
		return -EINVAL;
	if (WARN_ON(!ops->region_rehash_hints_get))
		return -EOPNOTSUPP;
	tcam->vregion_rehash_intrvl = val;
	rtnl_lock();
	list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
		if (val)
			mlxsw_core_schedule_dw(&vregion->rehash_dw, 0);
		else
			cancel_delayed_work_sync(&vregion->rehash_dw);
	}
	rtnl_unlock();
	return 0;
}

819
static int
820
mlxsw_sp_acl_tcam_vchunk_assoc(struct mlxsw_sp *mlxsw_sp,
821
			       struct mlxsw_sp_acl_tcam_vgroup *vgroup,
822 823 824
			       unsigned int priority,
			       struct mlxsw_afk_element_usage *elusage,
			       struct mlxsw_sp_acl_tcam_vchunk *vchunk)
825
{
826 827
	struct mlxsw_sp_acl_tcam_vregion *vregion;
	bool vregion_created = false;
828 829 830
	bool need_split;
	int err;

831 832
	vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
							elusage, &need_split);
833
	if (vregion && need_split) {
834 835
		/* According to priority, the vchunk should belong to an
		 * existing vregion. However, this vchunk needs elements
836
		 * that vregion does not contain. We need to split the existing
837
		 * vregion into two and create a new vregion for this vchunk
838 839 840 841
		 * in between. This is not supported now.
		 */
		return -EOPNOTSUPP;
	}
842 843
	if (!vregion) {
		struct mlxsw_afk_element_usage vregion_elusage;
844

845 846
		mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
						      &vregion_elusage);
847
		vregion = mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp,
848
							   vgroup->group.tcam,
849 850 851 852
							   &vregion_elusage);
		if (IS_ERR(vregion))
			return PTR_ERR(vregion);
		vregion_created = true;
853 854
	}

855 856
	vchunk->vregion = vregion;
	list_add_tail(&vchunk->list, &vregion->vchunk_list);
857

858
	if (!vregion_created)
859 860
		return 0;

861 862
	err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup,
						      vregion);
863
	if (err)
864
		goto err_vgroup_vregion_attach;
865 866 867

	return 0;

868
err_vgroup_vregion_attach:
869
	mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
870 871 872 873
	return err;
}

static void
874 875
mlxsw_sp_acl_tcam_vchunk_deassoc(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
876
{
877
	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
878

879 880
	list_del(&vchunk->list);
	if (list_empty(&vregion->vchunk_list)) {
881
		mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
882
		mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
883 884 885 886 887
	}
}

static struct mlxsw_sp_acl_tcam_chunk *
mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
888 889
			       struct mlxsw_sp_acl_tcam_vchunk *vchunk,
			       struct mlxsw_sp_acl_tcam_region *region)
890
{
891
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
892
	struct mlxsw_sp_acl_tcam_chunk *chunk;
893 894 895 896 897

	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
	if (!chunk)
		return ERR_PTR(-ENOMEM);
	chunk->vchunk = vchunk;
898
	chunk->region = region;
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915

	ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
	return chunk;
}

static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_tcam_chunk *chunk)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	ops->chunk_fini(chunk->priv);
	kfree(chunk);
}

static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
916
				struct mlxsw_sp_acl_tcam_vgroup *vgroup,
917 918 919 920
				unsigned int priority,
				struct mlxsw_afk_element_usage *elusage)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
921 922 923 924 925
	int err;

	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
		return ERR_PTR(-EINVAL);

926 927
	vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
	if (!vchunk)
928
		return ERR_PTR(-ENOMEM);
929
	INIT_LIST_HEAD(&vchunk->ventry_list);
930
	vchunk->priority = priority;
931
	vchunk->vgroup = vgroup;
932
	vchunk->ref_count = 1;
933

934
	err = mlxsw_sp_acl_tcam_vchunk_assoc(mlxsw_sp, vgroup, priority,
935
					     elusage, vchunk);
936
	if (err)
937
		goto err_vchunk_assoc;
938

939
	err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
940
				     mlxsw_sp_acl_tcam_vchunk_ht_params);
941 942 943
	if (err)
		goto err_rhashtable_insert;

944 945 946 947 948 949 950 951
	vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
						       vchunk->vregion->region);
	if (IS_ERR(vchunk->chunk)) {
		err = PTR_ERR(vchunk->chunk);
		goto err_chunk_create;
	}

	return vchunk;
952

953
err_chunk_create:
954
	rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
955
			       mlxsw_sp_acl_tcam_vchunk_ht_params);
956
err_rhashtable_insert:
957 958 959
	mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk);
err_vchunk_assoc:
	kfree(vchunk);
960 961 962 963
	return ERR_PTR(err);
}

static void
964 965
mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
966
{
967
	struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
968

969 970
	if (vchunk->chunk2)
		mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
971
	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
972
	rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
973 974 975
			       mlxsw_sp_acl_tcam_vchunk_ht_params);
	mlxsw_sp_acl_tcam_vchunk_deassoc(mlxsw_sp, vchunk);
	kfree(vchunk);
976 977
}

978 979
static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
980
			     struct mlxsw_sp_acl_tcam_vgroup *vgroup,
981 982
			     unsigned int priority,
			     struct mlxsw_afk_element_usage *elusage)
983
{
984
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
985

986
	vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
987 988 989
					mlxsw_sp_acl_tcam_vchunk_ht_params);
	if (vchunk) {
		if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
990 991
						       elusage)))
			return ERR_PTR(-EINVAL);
992 993
		vchunk->ref_count++;
		return vchunk;
994
	}
995
	return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
996
					       priority, elusage);
997 998
}

999 1000 1001
static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1002
{
1003
	if (--vchunk->ref_count)
1004
		return;
1005
	mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1006 1007
}

1008 1009 1010
static struct mlxsw_sp_acl_tcam_entry *
mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_acl_tcam_ventry *ventry,
1011
			       struct mlxsw_sp_acl_tcam_chunk *chunk)
1012
{
1013
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1014
	struct mlxsw_sp_acl_tcam_entry *entry;
1015 1016
	int err;

1017 1018 1019 1020
	entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
	if (!entry)
		return ERR_PTR(-ENOMEM);
	entry->ventry = ventry;
1021
	entry->chunk = chunk;
1022

1023 1024
	err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
			     entry->priv, ventry->rulei);
1025
	if (err)
1026
		goto err_entry_add;
1027

1028
	return entry;
1029

1030
err_entry_add:
1031 1032
	kfree(entry);
	return ERR_PTR(err);
1033 1034
}

1035 1036
static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
					    struct mlxsw_sp_acl_tcam_entry *entry)
1037
{
1038
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1039

1040 1041
	ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
		       entry->chunk->priv, entry->priv);
1042
	kfree(entry);
1043 1044
}

1045 1046
static int
mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1047
				       struct mlxsw_sp_acl_tcam_region *region,
1048 1049 1050 1051 1052
				       struct mlxsw_sp_acl_tcam_entry *entry,
				       struct mlxsw_sp_acl_rule_info *rulei)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

1053
	return ops->entry_action_replace(mlxsw_sp, region->priv,
1054 1055 1056
					 entry->priv, rulei);
}

1057 1058 1059 1060 1061
static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_tcam_entry *entry,
				     bool *activity)
{
1062
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1063

1064
	return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1065
				       entry->priv, activity);
1066 1067
}

1068
static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1069
					struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1070 1071 1072 1073 1074 1075
					struct mlxsw_sp_acl_tcam_ventry *ventry,
					struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
	int err;

1076
	vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1077 1078 1079 1080 1081
					      &rulei->values.elusage);
	if (IS_ERR(vchunk))
		return PTR_ERR(vchunk);

	ventry->vchunk = vchunk;
1082
	ventry->rulei = rulei;
1083
	ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1084
						       vchunk->chunk);
1085 1086 1087 1088 1089
	if (IS_ERR(ventry->entry)) {
		err = PTR_ERR(ventry->entry);
		goto err_entry_create;
	}

1090 1091
	list_add_tail(&ventry->list, &vchunk->ventry_list);

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	return 0;

err_entry_create:
	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
	return err;
}

static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_acl_tcam_ventry *ventry)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;

1104 1105
	list_del(&ventry->list);
	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
}

static int
mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_acl_tcam_ventry *ventry,
					struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;

	return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
						      vchunk->vregion->region,
						      ventry->entry, rulei);
}

static int
mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
				      struct mlxsw_sp_acl_tcam_ventry *ventry,
				      bool *activity)
{
	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
						    ventry->entry, activity);
}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
static int
mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_ventry *ventry,
				 struct mlxsw_sp_acl_tcam_chunk *chunk2)
{
	struct mlxsw_sp_acl_tcam_entry *entry2;

	entry2 = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk2);
	if (IS_ERR(entry2))
		return PTR_ERR(entry2);
	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
	ventry->entry = entry2;
	return 0;
}

static int
mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
				     struct mlxsw_sp_acl_tcam_region *region,
				     bool this_is_rollback)
{
	struct mlxsw_sp_acl_tcam_ventry *ventry;
	struct mlxsw_sp_acl_tcam_chunk *chunk2;
	int err;
	int err2;

	chunk2 = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
	if (IS_ERR(chunk2)) {
		if (this_is_rollback)
			vchunk->vregion->failed_rollback = true;
		return PTR_ERR(chunk2);
	}
	vchunk->chunk2 = chunk2;
	list_for_each_entry(ventry, &vchunk->ventry_list, list) {
		err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
						       vchunk->chunk2);
		if (err) {
			if (this_is_rollback) {
				vchunk->vregion->failed_rollback = true;
				return err;
			}
			goto rollback;
		}
	}
	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
	vchunk->chunk = chunk2;
	vchunk->chunk2 = NULL;
	return 0;

rollback:
	/* Migrate the entries back to the original chunk. If some entry
	 * migration fails, there's no good way how to proceed. Set the
	 * vregion with "failed_rollback" flag.
	 */
	list_for_each_entry_continue_reverse(ventry, &vchunk->ventry_list,
					     list) {
		err2 = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
							vchunk->chunk);
		if (err2) {
			vchunk->vregion->failed_rollback = true;
			goto err_rollback;
		}
	}

	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
	vchunk->chunk2 = NULL;

err_rollback:
	return err;
}

static int
mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_tcam_vregion *vregion)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
	int err;

	list_for_each_entry(vchunk, &vregion->vchunk_list, list) {
		err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
							   vregion->region2,
							   false);
		if (err)
			goto rollback;
	}
	return 0;

rollback:
	list_for_each_entry_continue_reverse(vchunk, &vregion->vchunk_list,
					     list) {
		mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
						     vregion->region, true);
	}
	return err;
}

static int
mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_acl_tcam_vregion *vregion,
				  void *hints_priv)
{
	struct mlxsw_sp_acl_tcam_region *region2, *unused_region;
	int err;

1234 1235
	trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);

1236 1237 1238 1239 1240 1241
	region2 = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
						  vregion, hints_priv);
	if (IS_ERR(region2))
		return PTR_ERR(region2);

	vregion->region2 = region2;
1242 1243 1244
	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
						    vregion->region->group,
						    region2, vregion->region);
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
	if (err)
		goto err_group_region_attach;

	err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion);
	if (!vregion->failed_rollback) {
		if (!err) {
			/* In case of successful migration, region2 is used and
			 * the original is unused.
			 */
			unused_region = vregion->region;
			vregion->region = vregion->region2;
		} else {
			/* In case of failure during migration, the original
			 * region is still used.
			 */
			unused_region = vregion->region2;
		}
		vregion->region2 = NULL;
		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
	}
	return err;

err_group_region_attach:
	vregion->region2 = NULL;
	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region2);
	return err;
}

static int
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vregion *vregion)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
	void *hints_priv;
	int err;

1282
	trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
	if (vregion->failed_rollback)
		return -EBUSY;

	hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
	if (IS_ERR(hints_priv)) {
		err = PTR_ERR(hints_priv);
		if (err != -EAGAIN)
			dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
		return err;
	}

	err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, hints_priv);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1297 1298 1299
		if (vregion->failed_rollback) {
			trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
								   vregion);
1300
			dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1301
		}
1302 1303 1304 1305 1306 1307
	}

	ops->region_rehash_hints_put(hints_priv);
	return err;
}

1308 1309
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1310 1311 1312 1313
	MLXSW_AFK_ELEMENT_DMAC_32_47,
	MLXSW_AFK_ELEMENT_DMAC_0_31,
	MLXSW_AFK_ELEMENT_SMAC_32_47,
	MLXSW_AFK_ELEMENT_SMAC_0_31,
1314 1315
	MLXSW_AFK_ELEMENT_ETHERTYPE,
	MLXSW_AFK_ELEMENT_IP_PROTO,
1316 1317
	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1318 1319
	MLXSW_AFK_ELEMENT_DST_L4_PORT,
	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1320 1321
	MLXSW_AFK_ELEMENT_VID,
	MLXSW_AFK_ELEMENT_PCP,
1322
	MLXSW_AFK_ELEMENT_TCP_FLAGS,
1323
	MLXSW_AFK_ELEMENT_IP_TTL_,
1324 1325
	MLXSW_AFK_ELEMENT_IP_ECN,
	MLXSW_AFK_ELEMENT_IP_DSCP,
1326 1327 1328 1329 1330
};

static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
	MLXSW_AFK_ELEMENT_ETHERTYPE,
	MLXSW_AFK_ELEMENT_IP_PROTO,
1331 1332 1333 1334 1335 1336 1337 1338
	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
	MLXSW_AFK_ELEMENT_DST_IP_96_127,
	MLXSW_AFK_ELEMENT_DST_IP_64_95,
	MLXSW_AFK_ELEMENT_DST_IP_32_63,
	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
	MLXSW_AFK_ELEMENT_DST_L4_PORT,
	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
};

static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
	{
		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
	},
	{
		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
	},
};

#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)

struct mlxsw_sp_acl_tcam_flower_ruleset {
1358
	struct mlxsw_sp_acl_tcam_vgroup vgroup;
1359 1360 1361
};

struct mlxsw_sp_acl_tcam_flower_rule {
1362
	struct mlxsw_sp_acl_tcam_ventry ventry;
1363 1364 1365 1366
};

static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1367
				     struct mlxsw_sp_acl_tcam *tcam,
1368 1369
				     void *ruleset_priv,
				     struct mlxsw_afk_element_usage *tmplt_elusage)
1370 1371 1372
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1373 1374 1375 1376
	return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
					    mlxsw_sp_acl_tcam_patterns,
					    MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
					    tmplt_elusage);
1377 1378 1379 1380 1381 1382 1383 1384
}

static void
mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
				     void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1385
	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1386 1387 1388 1389 1390
}

static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
				      void *ruleset_priv,
1391 1392
				      struct mlxsw_sp_port *mlxsw_sp_port,
				      bool ingress)
1393 1394 1395
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1396
	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1397
					    mlxsw_sp_port, ingress);
1398 1399 1400 1401
}

static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1402
					void *ruleset_priv,
1403 1404
					struct mlxsw_sp_port *mlxsw_sp_port,
					bool ingress)
1405 1406 1407
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1408
	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1409
				       mlxsw_sp_port, ingress);
1410 1411
}

1412 1413 1414 1415 1416
static u16
mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1417
	return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1418 1419
}

1420 1421 1422 1423 1424 1425 1426 1427
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
				  void *ruleset_priv, void *rule_priv,
				  struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;

1428
	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1429
					    &rule->ventry, rulei);
1430 1431 1432 1433 1434 1435 1436
}

static void
mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
{
	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;

1437
	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1438 1439
}

1440 1441 1442 1443 1444 1445 1446 1447
static int
mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
					     void *rule_priv,
					     struct mlxsw_sp_acl_rule_info *rulei)
{
	return -EOPNOTSUPP;
}

1448 1449 1450 1451 1452 1453
static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
					   void *rule_priv, bool *activity)
{
	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;

1454 1455
	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
						     activity);
1456 1457
}

1458 1459 1460 1461 1462 1463
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1464
	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1465
	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1466 1467
	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
1468
	.rule_action_replace	= mlxsw_sp_acl_tcam_flower_rule_action_replace,
1469
	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
1470 1471
};

1472
struct mlxsw_sp_acl_tcam_mr_ruleset {
1473
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1474
	struct mlxsw_sp_acl_tcam_vgroup vgroup;
1475 1476 1477
};

struct mlxsw_sp_acl_tcam_mr_rule {
1478
	struct mlxsw_sp_acl_tcam_ventry ventry;
1479 1480
};

1481 1482 1483 1484 1485 1486 1487 1488 1489
static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam *tcam,
				 void *ruleset_priv,
				 struct mlxsw_afk_element_usage *tmplt_elusage)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
	int err;

1490 1491 1492 1493
	err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
					   mlxsw_sp_acl_tcam_patterns,
					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
					   tmplt_elusage);
1494 1495 1496 1497 1498 1499 1500 1501 1502
	if (err)
		return err;

	/* For most of the TCAM clients it would make sense to take a tcam chunk
	 * only when the first rule is written. This is not the case for
	 * multicast router as it is required to bind the multicast router to a
	 * specific ACL Group ID which must exist in HW before multicast router
	 * is initialized.
	 */
1503
	ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1504
						       &ruleset->vgroup, 1,
1505 1506 1507
						       tmplt_elusage);
	if (IS_ERR(ruleset->vchunk)) {
		err = PTR_ERR(ruleset->vchunk);
1508 1509 1510 1511 1512 1513
		goto err_chunk_get;
	}

	return 0;

err_chunk_get:
1514
	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1515 1516 1517 1518 1519 1520 1521 1522
	return err;
}

static void
mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;

1523
	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1524
	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
}

static int
mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
				  struct mlxsw_sp_port *mlxsw_sp_port,
				  bool ingress)
{
	/* Binding is done when initializing multicast router */
	return 0;
}

static void
mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
				    void *ruleset_priv,
				    struct mlxsw_sp_port *mlxsw_sp_port,
				    bool ingress)
{
}

static u16
mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;

1549
	return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
}

static int
mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
			      void *rule_priv,
			      struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1560
	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1561
					   &rule->ventry, rulei);
1562 1563 1564 1565 1566 1567 1568
}

static void
mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
{
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1569
	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1570 1571
}

1572 1573
static int
mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1574
					 void *rule_priv,
1575 1576 1577 1578
					 struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1579 1580
	return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
						       rulei);
1581 1582
}

1583 1584 1585 1586 1587 1588
static int
mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
				       void *rule_priv, bool *activity)
{
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1589 1590
	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
						     activity);
1591 1592 1593 1594 1595 1596 1597 1598 1599
}

static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
	.ruleset_add		= mlxsw_sp_acl_tcam_mr_ruleset_add,
	.ruleset_del		= mlxsw_sp_acl_tcam_mr_ruleset_del,
	.ruleset_bind		= mlxsw_sp_acl_tcam_mr_ruleset_bind,
	.ruleset_unbind		= mlxsw_sp_acl_tcam_mr_ruleset_unbind,
	.ruleset_group_id	= mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1600
	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1601 1602
	.rule_add		= mlxsw_sp_acl_tcam_mr_rule_add,
	.rule_del		= mlxsw_sp_acl_tcam_mr_rule_del,
1603
	.rule_action_replace	= mlxsw_sp_acl_tcam_mr_rule_action_replace,
1604 1605 1606
	.rule_activity_get	= mlxsw_sp_acl_tcam_mr_rule_activity_get,
};

1607 1608 1609
static const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops_arr[] = {
	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1610
	[MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1611 1612
};

1613
const struct mlxsw_sp_acl_profile_ops *
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
			      enum mlxsw_sp_acl_profile profile)
{
	const struct mlxsw_sp_acl_profile_ops *ops;

	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
		return NULL;
	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
	if (WARN_ON(!ops))
		return NULL;
	return ops;
}