spectrum_acl_tcam.c 46.7 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 4 5 6 7 8 9 10

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
11
#include <linux/mutex.h>
12
#include <trace/events/mlxsw.h>
13 14 15 16 17

#include "reg.h"
#include "core.h"
#include "resources.h"
#include "spectrum.h"
18
#include "spectrum_acl_tcam.h"
19 20
#include "core_acl_flex_keys.h"

21 22 23 24 25 26
size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	return ops->priv_size;
}
27

28
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30

31 32
int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
			   struct mlxsw_sp_acl_tcam *tcam)
33
{
34
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
35 36 37 38 39 40
	u64 max_tcam_regions;
	u64 max_regions;
	u64 max_groups;
	size_t alloc_size;
	int err;

41
	mutex_init(&tcam->lock);
42 43 44 45
	tcam->vregion_rehash_intrvl =
			MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
	INIT_LIST_HEAD(&tcam->vregion_list);

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
					      ACL_MAX_TCAM_REGIONS);
	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);

	/* Use 1:1 mapping between ACL region and TCAM region */
	if (max_tcam_regions < max_regions)
		max_regions = max_tcam_regions;

	alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
	tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
	if (!tcam->used_regions)
		return -ENOMEM;
	tcam->max_regions = max_regions;

	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
	alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
	tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
	if (!tcam->used_groups) {
		err = -ENOMEM;
		goto err_alloc_used_groups;
	}
	tcam->max_groups = max_groups;
	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
						 ACL_MAX_GROUP_SIZE);
70 71 72 73 74

	err = ops->init(mlxsw_sp, tcam->priv, tcam);
	if (err)
		goto err_tcam_init;

75 76
	return 0;

77 78
err_tcam_init:
	kfree(tcam->used_groups);
79 80 81 82 83
err_alloc_used_groups:
	kfree(tcam->used_regions);
	return err;
}

84 85
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_acl_tcam *tcam)
86
{
87
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
88

89
	mutex_destroy(&tcam->lock);
90
	ops->fini(mlxsw_sp, tcam->priv);
91 92 93 94
	kfree(tcam->used_groups);
	kfree(tcam->used_regions);
}

95 96 97 98 99 100 101 102 103 104 105 106 107 108
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_rule_info *rulei,
				   u32 *priority, bool fillup_priority)
{
	u64 max_priority;

	if (!fillup_priority) {
		*priority = 0;
		return 0;
	}

	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
		return -EIO;

109 110 111
	/* Priority range is 1..cap_kvd_size-1. */
	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
	if (rulei->priority >= max_priority)
112 113 114 115 116 117 118
		return -EINVAL;

	/* Unlike in TC, in HW, higher number means higher priority. */
	*priority = max_priority - rulei->priority;
	return 0;
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
					   u16 *p_id)
{
	u16 id;

	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
	if (id < tcam->max_regions) {
		__set_bit(id, tcam->used_regions);
		*p_id = id;
		return 0;
	}
	return -ENOBUFS;
}

static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
					    u16 id)
{
	__clear_bit(id, tcam->used_regions);
}

static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
					  u16 *p_id)
{
	u16 id;

	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
	if (id < tcam->max_groups) {
		__set_bit(id, tcam->used_groups);
		*p_id = id;
		return 0;
	}
	return -ENOBUFS;
}

static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
					   u16 id)
{
	__clear_bit(id, tcam->used_groups);
}

struct mlxsw_sp_acl_tcam_pattern {
	const enum mlxsw_afk_element *elements;
	unsigned int elements_count;
};

struct mlxsw_sp_acl_tcam_group {
	struct mlxsw_sp_acl_tcam *tcam;
	u16 id;
167
	struct mutex lock; /* guards region list updates */
168
	struct list_head region_list;
169
	unsigned int region_count;
170 171 172 173 174
};

struct mlxsw_sp_acl_tcam_vgroup {
	struct mlxsw_sp_acl_tcam_group group;
	struct list_head vregion_list;
175
	struct rhashtable vchunk_ht;
176 177
	const struct mlxsw_sp_acl_tcam_pattern *patterns;
	unsigned int patterns_count;
178 179
	bool tmplt_elusage_set;
	struct mlxsw_afk_element_usage tmplt_elusage;
180
	bool vregion_rehash_enabled;
181 182
};

183
struct mlxsw_sp_acl_tcam_vregion {
184 185 186
	struct mutex lock; /* Protects consistency of region, region2 pointers
			    * and vchunk_list.
			    */
187
	struct mlxsw_sp_acl_tcam_region *region;
188
	struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
189
	struct list_head list; /* Member of a TCAM group */
190
	struct list_head tlist; /* Member of a TCAM */
191
	struct list_head vchunk_list; /* List of vchunks under this vregion */
192
	struct mlxsw_afk_key_info *key_info;
193
	struct mlxsw_sp_acl_tcam *tcam;
194
	struct mlxsw_sp_acl_tcam_vgroup *vgroup;
195 196 197
	struct delayed_work rehash_dw;
	struct mlxsw_sp *mlxsw_sp;
	bool failed_rollback; /* Indicates failed rollback during migration */
198
	unsigned int ref_count;
199 200
};

201 202
struct mlxsw_sp_acl_tcam_vchunk;

203
struct mlxsw_sp_acl_tcam_chunk {
204
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
205
	struct mlxsw_sp_acl_tcam_region *region;
206 207 208 209 210 211
	unsigned long priv[0];
	/* priv has to be always the last item */
};

struct mlxsw_sp_acl_tcam_vchunk {
	struct mlxsw_sp_acl_tcam_chunk *chunk;
212
	struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
213
	struct list_head list; /* Member of a TCAM vregion */
214
	struct rhash_head ht_node; /* Member of a chunk HT */
215
	struct list_head ventry_list;
216
	unsigned int priority; /* Priority within the vregion and group */
217
	struct mlxsw_sp_acl_tcam_vgroup *vgroup;
218
	struct mlxsw_sp_acl_tcam_vregion *vregion;
219 220 221 222
	unsigned int ref_count;
};

struct mlxsw_sp_acl_tcam_entry {
223
	struct mlxsw_sp_acl_tcam_ventry *ventry;
224
	struct mlxsw_sp_acl_tcam_chunk *chunk;
225 226
	unsigned long priv[0];
	/* priv has to be always the last item */
227 228
};

229 230
struct mlxsw_sp_acl_tcam_ventry {
	struct mlxsw_sp_acl_tcam_entry *entry;
231
	struct list_head list; /* Member of a TCAM vchunk */
232
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
233
	struct mlxsw_sp_acl_rule_info *rulei;
234 235
};

236
static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
237
	.key_len = sizeof(unsigned int),
238 239
	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
240 241 242 243 244 245
	.automatic_shrinking = true,
};

static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
					  struct mlxsw_sp_acl_tcam_group *group)
{
246
	struct mlxsw_sp_acl_tcam_region *region;
247 248 249 250
	char pagt_pl[MLXSW_REG_PAGT_LEN];
	int acl_index = 0;

	mlxsw_reg_pagt_pack(pagt_pl, group->id);
251 252 253 254 255 256 257
	list_for_each_entry(region, &group->region_list, list) {
		bool multi = false;

		/* Check if the next entry in the list has the same vregion. */
		if (region->list.next != &group->region_list &&
		    list_next_entry(region, list)->vregion == region->vregion)
			multi = true;
258
		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
259
					   region->id, multi);
260
	}
261 262 263 264 265
	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
}

static int
266 267
mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
			    struct mlxsw_sp_acl_tcam_group *group)
268 269 270 271
{
	int err;

	group->tcam = tcam;
272
	mutex_init(&group->lock);
273 274 275 276 277 278 279 280 281 282 283 284 285
	INIT_LIST_HEAD(&group->region_list);

	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
	if (err)
		return err;

	return 0;
}

static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
{
	struct mlxsw_sp_acl_tcam *tcam = group->tcam;

286
	mutex_destroy(&group->lock);
287 288 289 290 291 292 293 294 295 296
	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
	WARN_ON(!list_empty(&group->region_list));
}

static int
mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_tcam *tcam,
			     struct mlxsw_sp_acl_tcam_vgroup *vgroup,
			     const struct mlxsw_sp_acl_tcam_pattern *patterns,
			     unsigned int patterns_count,
297 298
			     struct mlxsw_afk_element_usage *tmplt_elusage,
			     bool vregion_rehash_enabled)
299 300 301 302 303
{
	int err;

	vgroup->patterns = patterns;
	vgroup->patterns_count = patterns_count;
304 305
	vgroup->vregion_rehash_enabled = vregion_rehash_enabled;

306
	if (tmplt_elusage) {
307 308 309
		vgroup->tmplt_elusage_set = true;
		memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
		       sizeof(vgroup->tmplt_elusage));
310
	}
311 312 313
	INIT_LIST_HEAD(&vgroup->vregion_list);

	err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
314 315 316
	if (err)
		return err;

317
	err = rhashtable_init(&vgroup->vchunk_ht,
318
			      &mlxsw_sp_acl_tcam_vchunk_ht_params);
319 320 321 322 323 324
	if (err)
		goto err_rhashtable_init;

	return 0;

err_rhashtable_init:
325
	mlxsw_sp_acl_tcam_group_del(&vgroup->group);
326 327 328
	return err;
}

329 330
static void
mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
331
{
332 333 334
	rhashtable_destroy(&vgroup->vchunk_ht);
	mlxsw_sp_acl_tcam_group_del(&vgroup->group);
	WARN_ON(!list_empty(&vgroup->vregion_list));
335 336 337 338 339
}

static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_tcam_group *group,
340 341
			     struct mlxsw_sp_port *mlxsw_sp_port,
			     bool ingress)
342 343 344
{
	char ppbt_pl[MLXSW_REG_PPBT_LEN];

345 346 347
	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
					       MLXSW_REG_PXBT_E_EACL,
			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
348 349 350 351 352 353
			    group->id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}

static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
354
			       struct mlxsw_sp_acl_tcam_group *group,
355 356
			       struct mlxsw_sp_port *mlxsw_sp_port,
			       bool ingress)
357 358 359
{
	char ppbt_pl[MLXSW_REG_PPBT_LEN];

360 361 362
	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
					       MLXSW_REG_PXBT_E_EACL,
			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
363 364 365 366
			    group->id);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
}

367 368 369 370 371 372
static u16
mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
{
	return group->id;
}

373
static unsigned int
374
mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
375
{
376
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
377

378
	if (list_empty(&vregion->vchunk_list))
379
		return 0;
380 381 382 383
	/* As a priority of a vregion, return priority of the first vchunk */
	vchunk = list_first_entry(&vregion->vchunk_list,
				  typeof(*vchunk), list);
	return vchunk->priority;
384 385 386
}

static unsigned int
387
mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
388
{
389
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
390

391
	if (list_empty(&vregion->vchunk_list))
392
		return 0;
393 394 395
	vchunk = list_last_entry(&vregion->vchunk_list,
				 typeof(*vchunk), list);
	return vchunk->priority;
396 397
}

398 399
static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
400 401
				      struct mlxsw_sp_acl_tcam_group *group,
				      struct mlxsw_sp_acl_tcam_region *region,
402
				      unsigned int priority,
403
				      struct mlxsw_sp_acl_tcam_region *next_region)
404
{
405 406
	struct mlxsw_sp_acl_tcam_region *region2;
	struct list_head *pos;
407 408
	int err;

409 410 411 412 413
	mutex_lock(&group->lock);
	if (group->region_count == group->tcam->max_group_size) {
		err = -ENOBUFS;
		goto err_region_count_check;
	}
414

415 416 417 418 419 420 421 422 423 424
	if (next_region) {
		/* If the next region is defined, place the new one
		 * before it. The next one is a sibling.
		 */
		pos = &next_region->list;
	} else {
		/* Position the region inside the list according to priority */
		list_for_each(pos, &group->region_list) {
			region2 = list_entry(pos, typeof(*region2), list);
			if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
425
			    priority)
426 427 428 429 430 431
				break;
		}
	}
	list_add_tail(&region->list, pos);
	region->group = group;

432 433
	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
	if (err)
434
		goto err_group_update;
435 436

	group->region_count++;
437
	mutex_unlock(&group->lock);
438
	return 0;
439 440 441

err_group_update:
	list_del(&region->list);
442 443
err_region_count_check:
	mutex_unlock(&group->lock);
444
	return err;
445 446 447
}

static void
448 449
mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
				      struct mlxsw_sp_acl_tcam_region *region)
450
{
451
	struct mlxsw_sp_acl_tcam_group *group = region->group;
452

453
	mutex_lock(&group->lock);
454
	list_del(&region->list);
455
	group->region_count--;
456
	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
457
	mutex_unlock(&group->lock);
458 459 460
}

static int
461 462
mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_acl_tcam_vgroup *vgroup,
463 464
					struct mlxsw_sp_acl_tcam_vregion *vregion,
					unsigned int priority)
465
{
466 467
	struct mlxsw_sp_acl_tcam_vregion *vregion2;
	struct list_head *pos;
468 469
	int err;

470
	/* Position the vregion inside the list according to priority */
471
	list_for_each(pos, &vgroup->vregion_list) {
472
		vregion2 = list_entry(pos, typeof(*vregion2), list);
473
		if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
474 475 476
			break;
	}
	list_add_tail(&vregion->list, pos);
477

478
	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
479 480
						    vregion->region,
						    priority, NULL);
481
	if (err)
482
		goto err_region_attach;
483 484 485

	return 0;

486 487
err_region_attach:
	list_del(&vregion->list);
488 489 490 491
	return err;
}

static void
492 493
mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_acl_tcam_vregion *vregion)
494
{
495
	list_del(&vregion->list);
496 497 498
	if (vregion->region2)
		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
						      vregion->region2);
499
	mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
500 501
}

502
static struct mlxsw_sp_acl_tcam_vregion *
503 504 505 506
mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
				      unsigned int priority,
				      struct mlxsw_afk_element_usage *elusage,
				      bool *p_need_split)
507
{
508
	struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
509 510 511
	struct list_head *pos;
	bool issubset;

512
	list_for_each(pos, &vgroup->vregion_list) {
513
		vregion = list_entry(pos, typeof(*vregion), list);
514 515

		/* First, check if the requested priority does not rather belong
516
		 * under some of the next vregions.
517
		 */
518
		if (pos->next != &vgroup->vregion_list) { /* not last */
519 520 521 522
			vregion2 = list_entry(pos->next, typeof(*vregion2),
					      list);
			if (priority >=
			    mlxsw_sp_acl_tcam_vregion_prio(vregion2))
523 524 525
				continue;
		}

526 527
		issubset = mlxsw_afk_key_info_subset(vregion->key_info,
						     elusage);
528 529

		/* If requested element usage would not fit and the priority
530 531
		 * is lower than the currently inspected vregion we cannot
		 * use this region, so return NULL to indicate new vregion has
532 533 534
		 * to be created.
		 */
		if (!issubset &&
535
		    priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
536 537 538
			return NULL;

		/* If requested element usage would not fit and the priority
539 540 541
		 * is higher than the currently inspected vregion we cannot
		 * use this vregion. There is still some hope that the next
		 * vregion would be the fit. So let it be processed and
542 543 544
		 * eventually break at the check right above this.
		 */
		if (!issubset &&
545
		    priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
546 547
			continue;

548
		/* Indicate if the vregion needs to be split in order to add
549
		 * the requested priority. Split is needed when requested
550
		 * element usage won't fit into the found vregion.
551 552
		 */
		*p_need_split = !issubset;
553
		return vregion;
554
	}
555
	return NULL; /* New vregion has to be created. */
556 557 558
}

static void
559 560 561
mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
				      struct mlxsw_afk_element_usage *elusage,
				      struct mlxsw_afk_element_usage *out)
562 563 564 565
{
	const struct mlxsw_sp_acl_tcam_pattern *pattern;
	int i;

566 567 568
	/* In case the template is set, we don't have to look up the pattern
	 * and just use the template.
	 */
569 570
	if (vgroup->tmplt_elusage_set) {
		memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
571 572 573 574
		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
		return;
	}

575 576
	for (i = 0; i < vgroup->patterns_count; i++) {
		pattern = &vgroup->patterns[i];
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
		mlxsw_afk_element_usage_fill(out, pattern->elements,
					     pattern->elements_count);
		if (mlxsw_afk_element_usage_subset(elusage, out))
			return;
	}
	memcpy(out, elusage, sizeof(*out));
}

static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_acl_tcam_region *region)
{
	struct mlxsw_afk_key_info *key_info = region->key_info;
	char ptar_pl[MLXSW_REG_PTAR_LEN];
	unsigned int encodings_count;
	int i;
	int err;

	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
596
			    region->key_type,
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
			    region->id, region->tcam_region_info);
	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
	for (i = 0; i < encodings_count; i++) {
		u16 encoding;

		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
	}
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
	if (err)
		return err;
	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
	return 0;
}

static void
mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_acl_tcam_region *region)
{
	char ptar_pl[MLXSW_REG_PTAR_LEN];

619 620
	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
			    region->key_type, 0, region->id,
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
			    region->tcam_region_info);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
}

static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_tcam_region *region)
{
	char pacl_pl[MLXSW_REG_PACL_LEN];

	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
			    region->tcam_region_info);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}

static void
mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_region *region)
{
	char pacl_pl[MLXSW_REG_PACL_LEN];

	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
			    region->tcam_region_info);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
}

static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_tcam *tcam,
650 651
				struct mlxsw_sp_acl_tcam_vregion *vregion,
				void *hints_priv)
652
{
653
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
654 655 656
	struct mlxsw_sp_acl_tcam_region *region;
	int err;

657
	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
658 659 660
	if (!region)
		return ERR_PTR(-ENOMEM);
	region->mlxsw_sp = mlxsw_sp;
661 662
	region->vregion = vregion;
	region->key_info = vregion->key_info;
663 664 665 666 667

	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
	if (err)
		goto err_region_id_get;

668 669 670 671
	err = ops->region_associate(mlxsw_sp, region);
	if (err)
		goto err_tcam_region_associate;

672
	region->key_type = ops->key_type;
673 674 675 676 677 678 679 680
	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
	if (err)
		goto err_tcam_region_alloc;

	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
	if (err)
		goto err_tcam_region_enable;

681
	err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
682
			       region, hints_priv);
683
	if (err)
684
		goto err_tcam_region_init;
685 686 687

	return region;

688
err_tcam_region_init:
689 690 691 692
	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
err_tcam_region_enable:
	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
err_tcam_region_alloc:
693
err_tcam_region_associate:
694 695 696 697 698 699 700 701 702 703
	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
err_region_id_get:
	kfree(region);
	return ERR_PTR(err);
}

static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_region *region)
{
704 705 706
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	ops->region_fini(mlxsw_sp, region->priv);
707 708
	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
709
	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
710
					region->id);
711 712 713
	kfree(region);
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
static void
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
{
	unsigned long interval = vregion->tcam->vregion_rehash_intrvl;

	if (!interval)
		return;
	mlxsw_core_schedule_dw(&vregion->rehash_dw,
			       msecs_to_jiffies(interval));
}

static int
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vregion *vregion);

static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
{
	struct mlxsw_sp_acl_tcam_vregion *vregion =
		container_of(work, struct mlxsw_sp_acl_tcam_vregion,
			     rehash_dw.work);

	mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion);
	mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
}

739 740
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
741 742
				 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
				 unsigned int priority,
743 744
				 struct mlxsw_afk_element_usage *elusage)
{
745
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
746
	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
747
	struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
748 749 750 751 752 753
	struct mlxsw_sp_acl_tcam_vregion *vregion;
	int err;

	vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
	if (!vregion)
		return ERR_PTR(-ENOMEM);
754
	INIT_LIST_HEAD(&vregion->vchunk_list);
755
	mutex_init(&vregion->lock);
756 757
	vregion->tcam = tcam;
	vregion->mlxsw_sp = mlxsw_sp;
758
	vregion->vgroup = vgroup;
759
	vregion->ref_count = 1;
760 761 762 763 764 765 766 767

	vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
	if (IS_ERR(vregion->key_info)) {
		err = PTR_ERR(vregion->key_info);
		goto err_key_info_get;
	}

	vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
768
							  vregion, NULL);
769 770 771 772 773
	if (IS_ERR(vregion->region)) {
		err = PTR_ERR(vregion->region);
		goto err_region_create;
	}

774 775 776 777 778
	err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
						      priority);
	if (err)
		goto err_vgroup_vregion_attach;

779
	if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
780 781 782 783
		/* Create the delayed work for vregion periodic rehash */
		INIT_DELAYED_WORK(&vregion->rehash_dw,
				  mlxsw_sp_acl_tcam_vregion_rehash_work);
		mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
784
		mutex_lock(&tcam->lock);
785
		list_add_tail(&vregion->tlist, &tcam->vregion_list);
786
		mutex_unlock(&tcam->lock);
787 788
	}

789 790
	return vregion;

791 792
err_vgroup_vregion_attach:
	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
793 794 795 796 797 798 799 800 801 802 803
err_region_create:
	mlxsw_afk_key_info_put(vregion->key_info);
err_key_info_get:
	kfree(vregion);
	return ERR_PTR(err);
}

static void
mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_acl_tcam_vregion *vregion)
{
804
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
805
	struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
806
	struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
807

808
	if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
809
		mutex_lock(&tcam->lock);
810
		list_del(&vregion->tlist);
811
		mutex_unlock(&tcam->lock);
812
		cancel_delayed_work_sync(&vregion->rehash_dw);
813
	}
814
	mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
815 816
	if (vregion->region2)
		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
817 818
	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
	mlxsw_afk_key_info_put(vregion->key_info);
819
	mutex_destroy(&vregion->lock);
820 821 822
	kfree(vregion);
}

823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
						struct mlxsw_sp_acl_tcam *tcam)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
	u32 vregion_rehash_intrvl;

	if (WARN_ON(!ops->region_rehash_hints_get))
		return 0;
	vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
	return vregion_rehash_intrvl;
}

int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
						struct mlxsw_sp_acl_tcam *tcam,
						u32 val)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
	struct mlxsw_sp_acl_tcam_vregion *vregion;

	if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
		return -EINVAL;
	if (WARN_ON(!ops->region_rehash_hints_get))
		return -EOPNOTSUPP;
	tcam->vregion_rehash_intrvl = val;
847
	mutex_lock(&tcam->lock);
848 849 850 851 852 853
	list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
		if (val)
			mlxsw_core_schedule_dw(&vregion->rehash_dw, 0);
		else
			cancel_delayed_work_sync(&vregion->rehash_dw);
	}
854
	mutex_unlock(&tcam->lock);
855 856 857
	return 0;
}

858 859 860 861 862
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_acl_tcam_vgroup *vgroup,
			      unsigned int priority,
			      struct mlxsw_afk_element_usage *elusage)
863
{
864
	struct mlxsw_afk_element_usage vregion_elusage;
865
	struct mlxsw_sp_acl_tcam_vregion *vregion;
866 867
	bool need_split;

868 869
	vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
							elusage, &need_split);
870 871 872 873 874 875 876 877 878 879 880 881 882
	if (vregion) {
		if (need_split) {
			/* According to priority, new vchunk should belong to
			 * an existing vregion. However, this vchunk needs
			 * elements that vregion does not contain. We need
			 * to split the existing vregion into two and create
			 * a new vregion for the new vchunk in between.
			 * This is not supported now.
			 */
			return ERR_PTR(-EOPNOTSUPP);
		}
		vregion->ref_count++;
		return vregion;
883 884
	}

885 886
	mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
					      &vregion_elusage);
887

888 889
	return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
						&vregion_elusage);
890 891 892
}

static void
893 894
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_acl_tcam_vregion *vregion)
895
{
896 897 898
	if (--vregion->ref_count)
		return;
	mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
899 900 901 902
}

static struct mlxsw_sp_acl_tcam_chunk *
mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
903 904
			       struct mlxsw_sp_acl_tcam_vchunk *vchunk,
			       struct mlxsw_sp_acl_tcam_region *region)
905
{
906
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
907
	struct mlxsw_sp_acl_tcam_chunk *chunk;
908 909 910 911 912

	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
	if (!chunk)
		return ERR_PTR(-ENOMEM);
	chunk->vchunk = vchunk;
913
	chunk->region = region;
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930

	ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
	return chunk;
}

static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_tcam_chunk *chunk)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

	ops->chunk_fini(chunk->priv);
	kfree(chunk);
}

static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
931
				struct mlxsw_sp_acl_tcam_vgroup *vgroup,
932 933 934
				unsigned int priority,
				struct mlxsw_afk_element_usage *elusage)
{
935
	struct mlxsw_sp_acl_tcam_vregion *vregion;
936
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
937 938 939 940 941
	int err;

	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
		return ERR_PTR(-EINVAL);

942 943
	vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
	if (!vchunk)
944
		return ERR_PTR(-ENOMEM);
945
	INIT_LIST_HEAD(&vchunk->ventry_list);
946
	vchunk->priority = priority;
947
	vchunk->vgroup = vgroup;
948
	vchunk->ref_count = 1;
949

950 951 952 953 954 955 956 957
	vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
						priority, elusage);
	if (IS_ERR(vregion)) {
		err = PTR_ERR(vregion);
		goto err_vregion_get;
	}

	vchunk->vregion = vregion;
958

959
	err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
960
				     mlxsw_sp_acl_tcam_vchunk_ht_params);
961 962 963
	if (err)
		goto err_rhashtable_insert;

964
	mutex_lock(&vregion->lock);
965 966 967
	vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
						       vchunk->vregion->region);
	if (IS_ERR(vchunk->chunk)) {
968
		mutex_unlock(&vregion->lock);
969 970 971 972
		err = PTR_ERR(vchunk->chunk);
		goto err_chunk_create;
	}

973
	list_add_tail(&vchunk->list, &vregion->vchunk_list);
974
	mutex_unlock(&vregion->lock);
975

976
	return vchunk;
977

978
err_chunk_create:
979
	rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
980
			       mlxsw_sp_acl_tcam_vchunk_ht_params);
981
err_rhashtable_insert:
982 983
	mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
err_vregion_get:
984
	kfree(vchunk);
985 986 987 988
	return ERR_PTR(err);
}

static void
989 990
mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
991
{
992
	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
993
	struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
994

995
	mutex_lock(&vregion->lock);
996
	list_del(&vchunk->list);
997 998
	if (vchunk->chunk2)
		mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
999
	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1000
	mutex_unlock(&vregion->lock);
1001
	rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1002
			       mlxsw_sp_acl_tcam_vchunk_ht_params);
1003
	mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1004
	kfree(vchunk);
1005 1006
}

1007 1008
static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1009
			     struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1010 1011
			     unsigned int priority,
			     struct mlxsw_afk_element_usage *elusage)
1012
{
1013
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1014

1015
	vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1016 1017 1018
					mlxsw_sp_acl_tcam_vchunk_ht_params);
	if (vchunk) {
		if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1019 1020
						       elusage)))
			return ERR_PTR(-EINVAL);
1021 1022
		vchunk->ref_count++;
		return vchunk;
1023
	}
1024
	return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1025
					       priority, elusage);
1026 1027
}

1028 1029 1030
static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1031
{
1032
	if (--vchunk->ref_count)
1033
		return;
1034
	mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1035 1036
}

1037 1038 1039
static struct mlxsw_sp_acl_tcam_entry *
mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_acl_tcam_ventry *ventry,
1040
			       struct mlxsw_sp_acl_tcam_chunk *chunk)
1041
{
1042
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1043
	struct mlxsw_sp_acl_tcam_entry *entry;
1044 1045
	int err;

1046 1047 1048 1049
	entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
	if (!entry)
		return ERR_PTR(-ENOMEM);
	entry->ventry = ventry;
1050
	entry->chunk = chunk;
1051

1052 1053
	err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
			     entry->priv, ventry->rulei);
1054
	if (err)
1055
		goto err_entry_add;
1056

1057
	return entry;
1058

1059
err_entry_add:
1060 1061
	kfree(entry);
	return ERR_PTR(err);
1062 1063
}

1064 1065
static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
					    struct mlxsw_sp_acl_tcam_entry *entry)
1066
{
1067
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1068

1069 1070
	ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
		       entry->chunk->priv, entry->priv);
1071
	kfree(entry);
1072 1073
}

1074 1075
static int
mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1076
				       struct mlxsw_sp_acl_tcam_region *region,
1077 1078 1079 1080 1081
				       struct mlxsw_sp_acl_tcam_entry *entry,
				       struct mlxsw_sp_acl_rule_info *rulei)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;

1082
	return ops->entry_action_replace(mlxsw_sp, region->priv,
1083 1084 1085
					 entry->priv, rulei);
}

1086 1087 1088 1089 1090
static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_tcam_entry *entry,
				     bool *activity)
{
1091
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1092

1093
	return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1094
				       entry->priv, activity);
1095 1096
}

1097
static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1098
					struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1099 1100 1101
					struct mlxsw_sp_acl_tcam_ventry *ventry,
					struct mlxsw_sp_acl_rule_info *rulei)
{
1102
	struct mlxsw_sp_acl_tcam_vregion *vregion;
1103 1104 1105
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
	int err;

1106
	vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1107 1108 1109 1110 1111
					      &rulei->values.elusage);
	if (IS_ERR(vchunk))
		return PTR_ERR(vchunk);

	ventry->vchunk = vchunk;
1112
	ventry->rulei = rulei;
1113 1114 1115
	vregion = vchunk->vregion;

	mutex_lock(&vregion->lock);
1116
	ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1117
						       vchunk->chunk);
1118
	if (IS_ERR(ventry->entry)) {
1119
		mutex_unlock(&vregion->lock);
1120 1121 1122 1123
		err = PTR_ERR(ventry->entry);
		goto err_entry_create;
	}

1124
	list_add_tail(&ventry->list, &vchunk->ventry_list);
1125
	mutex_unlock(&vregion->lock);
1126

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
	return 0;

err_entry_create:
	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
	return err;
}

static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_acl_tcam_ventry *ventry)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1138
	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1139

1140
	mutex_lock(&vregion->lock);
1141 1142
	list_del(&ventry->list);
	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1143
	mutex_unlock(&vregion->lock);
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
}

static int
mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_acl_tcam_ventry *ventry,
					struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;

	return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
						      vchunk->vregion->region,
						      ventry->entry, rulei);
}

static int
mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
				      struct mlxsw_sp_acl_tcam_ventry *ventry,
				      bool *activity)
{
	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
						    ventry->entry, activity);
}

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
static int
mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_ventry *ventry,
				 struct mlxsw_sp_acl_tcam_chunk *chunk2)
{
	struct mlxsw_sp_acl_tcam_entry *entry2;

	entry2 = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk2);
	if (IS_ERR(entry2))
		return PTR_ERR(entry2);
	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
	ventry->entry = entry2;
	return 0;
}

static int
mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
				     struct mlxsw_sp_acl_tcam_region *region,
				     bool this_is_rollback)
{
	struct mlxsw_sp_acl_tcam_ventry *ventry;
	struct mlxsw_sp_acl_tcam_chunk *chunk2;
	int err;
	int err2;

	chunk2 = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
	if (IS_ERR(chunk2)) {
		if (this_is_rollback)
			vchunk->vregion->failed_rollback = true;
		return PTR_ERR(chunk2);
	}
	vchunk->chunk2 = chunk2;
	list_for_each_entry(ventry, &vchunk->ventry_list, list) {
		err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
						       vchunk->chunk2);
		if (err) {
			if (this_is_rollback) {
				vchunk->vregion->failed_rollback = true;
				return err;
			}
			goto rollback;
		}
	}
	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
	vchunk->chunk = chunk2;
	vchunk->chunk2 = NULL;
	return 0;

rollback:
	/* Migrate the entries back to the original chunk. If some entry
	 * migration fails, there's no good way how to proceed. Set the
	 * vregion with "failed_rollback" flag.
	 */
	list_for_each_entry_continue_reverse(ventry, &vchunk->ventry_list,
					     list) {
		err2 = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
							vchunk->chunk);
		if (err2) {
			vchunk->vregion->failed_rollback = true;
			goto err_rollback;
		}
	}

	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
	vchunk->chunk2 = NULL;

err_rollback:
	return err;
}

static int
mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_tcam_vregion *vregion)
{
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
	int err;

	list_for_each_entry(vchunk, &vregion->vchunk_list, list) {
		err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
							   vregion->region2,
							   false);
		if (err)
			goto rollback;
	}
	return 0;

rollback:
	list_for_each_entry_continue_reverse(vchunk, &vregion->vchunk_list,
					     list) {
		mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
						     vregion->region, true);
	}
	return err;
}

static int
mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_acl_tcam_vregion *vregion,
				  void *hints_priv)
{
1269
	unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1270 1271 1272
	struct mlxsw_sp_acl_tcam_region *region2, *unused_region;
	int err;

1273 1274
	trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);

1275 1276
	region2 = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
						  vregion, hints_priv);
1277 1278 1279 1280
	if (IS_ERR(region2)) {
		err = PTR_ERR(region2);
		goto out;
	}
1281 1282

	vregion->region2 = region2;
1283 1284
	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
						    vregion->region->group,
1285 1286
						    region2, priority,
						    vregion->region);
1287 1288 1289
	if (err)
		goto err_group_region_attach;

1290 1291
	mutex_lock(&vregion->lock);

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
	err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion);
	if (!vregion->failed_rollback) {
		if (!err) {
			/* In case of successful migration, region2 is used and
			 * the original is unused.
			 */
			unused_region = vregion->region;
			vregion->region = vregion->region2;
		} else {
			/* In case of failure during migration, the original
			 * region is still used.
			 */
			unused_region = vregion->region2;
		}
1306
		mutex_unlock(&vregion->lock);
1307 1308 1309
		vregion->region2 = NULL;
		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1310 1311
	} else {
		mutex_unlock(&vregion->lock);
1312
	}
1313

1314
	goto out;
1315 1316 1317 1318

err_group_region_attach:
	vregion->region2 = NULL;
	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region2);
1319 1320 1321
out:
	trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);

1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
	return err;
}

static int
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam_vregion *vregion)
{
	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
	void *hints_priv;
	int err;

1333
	trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
	if (vregion->failed_rollback)
		return -EBUSY;

	hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
	if (IS_ERR(hints_priv)) {
		err = PTR_ERR(hints_priv);
		if (err != -EAGAIN)
			dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
		return err;
	}

	err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, hints_priv);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1348 1349 1350
		if (vregion->failed_rollback) {
			trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
								   vregion);
1351
			dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1352
		}
1353 1354 1355 1356 1357 1358
	}

	ops->region_rehash_hints_put(hints_priv);
	return err;
}

1359 1360
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1361 1362 1363 1364
	MLXSW_AFK_ELEMENT_DMAC_32_47,
	MLXSW_AFK_ELEMENT_DMAC_0_31,
	MLXSW_AFK_ELEMENT_SMAC_32_47,
	MLXSW_AFK_ELEMENT_SMAC_0_31,
1365 1366
	MLXSW_AFK_ELEMENT_ETHERTYPE,
	MLXSW_AFK_ELEMENT_IP_PROTO,
1367 1368
	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1369 1370
	MLXSW_AFK_ELEMENT_DST_L4_PORT,
	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1371 1372
	MLXSW_AFK_ELEMENT_VID,
	MLXSW_AFK_ELEMENT_PCP,
1373
	MLXSW_AFK_ELEMENT_TCP_FLAGS,
1374
	MLXSW_AFK_ELEMENT_IP_TTL_,
1375 1376
	MLXSW_AFK_ELEMENT_IP_ECN,
	MLXSW_AFK_ELEMENT_IP_DSCP,
1377 1378 1379 1380 1381
};

static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
	MLXSW_AFK_ELEMENT_ETHERTYPE,
	MLXSW_AFK_ELEMENT_IP_PROTO,
1382 1383 1384 1385 1386 1387 1388 1389
	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
	MLXSW_AFK_ELEMENT_DST_IP_96_127,
	MLXSW_AFK_ELEMENT_DST_IP_64_95,
	MLXSW_AFK_ELEMENT_DST_IP_32_63,
	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	MLXSW_AFK_ELEMENT_DST_L4_PORT,
	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
};

static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
	{
		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
	},
	{
		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
	},
};

#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)

struct mlxsw_sp_acl_tcam_flower_ruleset {
1409
	struct mlxsw_sp_acl_tcam_vgroup vgroup;
1410 1411 1412
};

struct mlxsw_sp_acl_tcam_flower_rule {
1413
	struct mlxsw_sp_acl_tcam_ventry ventry;
1414 1415 1416 1417
};

static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1418
				     struct mlxsw_sp_acl_tcam *tcam,
1419 1420
				     void *ruleset_priv,
				     struct mlxsw_afk_element_usage *tmplt_elusage)
1421 1422 1423
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1424 1425 1426
	return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
					    mlxsw_sp_acl_tcam_patterns,
					    MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1427
					    tmplt_elusage, true);
1428 1429 1430 1431 1432 1433 1434 1435
}

static void
mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
				     void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1436
	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1437 1438 1439 1440 1441
}

static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
				      void *ruleset_priv,
1442 1443
				      struct mlxsw_sp_port *mlxsw_sp_port,
				      bool ingress)
1444 1445 1446
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1447
	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1448
					    mlxsw_sp_port, ingress);
1449 1450 1451 1452
}

static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1453
					void *ruleset_priv,
1454 1455
					struct mlxsw_sp_port *mlxsw_sp_port,
					bool ingress)
1456 1457 1458
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1459
	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1460
				       mlxsw_sp_port, ingress);
1461 1462
}

1463 1464 1465 1466 1467
static u16
mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;

1468
	return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1469 1470
}

1471 1472 1473 1474 1475 1476 1477 1478
static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
				  void *ruleset_priv, void *rule_priv,
				  struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;

1479
	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1480
					    &rule->ventry, rulei);
1481 1482 1483 1484 1485 1486 1487
}

static void
mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
{
	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;

1488
	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1489 1490
}

1491 1492 1493 1494 1495 1496 1497 1498
static int
mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
					     void *rule_priv,
					     struct mlxsw_sp_acl_rule_info *rulei)
{
	return -EOPNOTSUPP;
}

1499 1500 1501 1502 1503 1504
static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
					   void *rule_priv, bool *activity)
{
	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;

1505 1506
	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
						     activity);
1507 1508
}

1509 1510 1511 1512 1513 1514
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1515
	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1516
	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1517 1518
	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
1519
	.rule_action_replace	= mlxsw_sp_acl_tcam_flower_rule_action_replace,
1520
	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
1521 1522
};

1523
struct mlxsw_sp_acl_tcam_mr_ruleset {
1524
	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1525
	struct mlxsw_sp_acl_tcam_vgroup vgroup;
1526 1527 1528
};

struct mlxsw_sp_acl_tcam_mr_rule {
1529
	struct mlxsw_sp_acl_tcam_ventry ventry;
1530 1531
};

1532 1533 1534 1535 1536 1537 1538 1539 1540
static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_tcam *tcam,
				 void *ruleset_priv,
				 struct mlxsw_afk_element_usage *tmplt_elusage)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
	int err;

1541 1542 1543
	err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
					   mlxsw_sp_acl_tcam_patterns,
					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1544
					   tmplt_elusage, false);
1545 1546 1547 1548 1549 1550 1551 1552 1553
	if (err)
		return err;

	/* For most of the TCAM clients it would make sense to take a tcam chunk
	 * only when the first rule is written. This is not the case for
	 * multicast router as it is required to bind the multicast router to a
	 * specific ACL Group ID which must exist in HW before multicast router
	 * is initialized.
	 */
1554
	ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1555
						       &ruleset->vgroup, 1,
1556 1557 1558
						       tmplt_elusage);
	if (IS_ERR(ruleset->vchunk)) {
		err = PTR_ERR(ruleset->vchunk);
1559 1560 1561 1562 1563 1564
		goto err_chunk_get;
	}

	return 0;

err_chunk_get:
1565
	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1566 1567 1568 1569 1570 1571 1572 1573
	return err;
}

static void
mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;

1574
	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1575
	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
}

static int
mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
				  struct mlxsw_sp_port *mlxsw_sp_port,
				  bool ingress)
{
	/* Binding is done when initializing multicast router */
	return 0;
}

static void
mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
				    void *ruleset_priv,
				    struct mlxsw_sp_port *mlxsw_sp_port,
				    bool ingress)
{
}

static u16
mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;

1600
	return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
}

static int
mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
			      void *rule_priv,
			      struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1611
	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1612
					   &rule->ventry, rulei);
1613 1614 1615 1616 1617 1618 1619
}

static void
mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
{
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1620
	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1621 1622
}

1623 1624
static int
mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1625
					 void *rule_priv,
1626 1627 1628 1629
					 struct mlxsw_sp_acl_rule_info *rulei)
{
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1630 1631
	return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
						       rulei);
1632 1633
}

1634 1635 1636 1637 1638 1639
static int
mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
				       void *rule_priv, bool *activity)
{
	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;

1640 1641
	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
						     activity);
1642 1643 1644 1645 1646 1647 1648 1649 1650
}

static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
	.ruleset_add		= mlxsw_sp_acl_tcam_mr_ruleset_add,
	.ruleset_del		= mlxsw_sp_acl_tcam_mr_ruleset_del,
	.ruleset_bind		= mlxsw_sp_acl_tcam_mr_ruleset_bind,
	.ruleset_unbind		= mlxsw_sp_acl_tcam_mr_ruleset_unbind,
	.ruleset_group_id	= mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1651
	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1652 1653
	.rule_add		= mlxsw_sp_acl_tcam_mr_rule_add,
	.rule_del		= mlxsw_sp_acl_tcam_mr_rule_del,
1654
	.rule_action_replace	= mlxsw_sp_acl_tcam_mr_rule_action_replace,
1655 1656 1657
	.rule_activity_get	= mlxsw_sp_acl_tcam_mr_rule_activity_get,
};

1658 1659 1660
static const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops_arr[] = {
	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1661
	[MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1662 1663
};

1664
const struct mlxsw_sp_acl_profile_ops *
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
			      enum mlxsw_sp_acl_profile profile)
{
	const struct mlxsw_sp_acl_profile_ops *ops;

	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
		return NULL;
	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
	if (WARN_ON(!ops))
		return NULL;
	return ops;
}