spectrum.h 18.6 KB
Newer Older
1 2
/*
 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
3 4
 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H

#include <linux/types.h>
#include <linux/netdevice.h>
42
#include <linux/rhashtable.h>
43 44
#include <linux/bitops.h>
#include <linux/if_vlan.h>
45
#include <linux/list.h>
46
#include <linux/dcbnl.h>
47
#include <linux/in6.h>
48
#include <linux/notifier.h>
49
#include <net/psample.h>
50
#include <net/pkt_cls.h>
51

52
#include "port.h"
53
#include "core.h"
54 55
#include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h"
56 57

#define MLXSW_SP_VFID_BASE VLAN_N_VID
N
Nogah Frankel 已提交
58
#define MLXSW_SP_VFID_MAX 1024	/* Bridged VLAN interfaces */
59

60 61
#define MLXSW_SP_DUMMY_FID 15359

62
#define MLXSW_SP_RFID_BASE 15360
63

64 65
#define MLXSW_SP_MID_MAX 7000

66 67 68 69
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4

#define MLXSW_SP_PORT_BASE_SPEED 25000	/* Mb/s */

70
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
71
#define MLXSW_SP_KVD_GRANULARITY 128
72

73
struct mlxsw_sp_port;
74
struct mlxsw_sp_rif;
75

76 77 78 79 80
struct mlxsw_sp_upper {
	struct net_device *dev;
	unsigned int ref_count;
};

81
struct mlxsw_sp_fid {
82
	void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
83
	struct list_head list;
84 85
	unsigned int ref_count;
	struct net_device *dev;
86
	struct mlxsw_sp_rif *rif;
87
	u16 fid;
88 89
};

90 91 92
struct mlxsw_sp_mid {
	struct list_head list;
	unsigned char addr[ETH_ALEN];
93
	u16 fid;
94 95 96 97
	u16 mid;
	unsigned int ref_count;
};

98 99 100 101 102
static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
{
	return MLXSW_SP_VFID_BASE + vfid;
}

103 104 105 106 107 108 109
static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
{
	return fid - MLXSW_SP_VFID_BASE;
}

static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
110
	return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
111 112
}

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
enum mlxsw_sp_span_type {
	MLXSW_SP_SPAN_EGRESS,
	MLXSW_SP_SPAN_INGRESS
};

struct mlxsw_sp_span_inspected_port {
	struct list_head list;
	enum mlxsw_sp_span_type type;
	u8 local_port;
};

struct mlxsw_sp_span_entry {
	u8 local_port;
	bool used;
	struct list_head bound_ports_list;
	int ref_count;
	int id;
};

enum mlxsw_sp_port_mall_action_type {
	MLXSW_SP_PORT_MALL_MIRROR,
134
	MLXSW_SP_PORT_MALL_SAMPLE,
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
};

struct mlxsw_sp_port_mall_mirror_tc_entry {
	u8 to_local_port;
	bool ingress;
};

struct mlxsw_sp_port_mall_tc_entry {
	struct list_head list;
	unsigned long cookie;
	enum mlxsw_sp_port_mall_action_type type;
	union {
		struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
	};
};

151
struct mlxsw_sp_sb;
152
struct mlxsw_sp_bridge;
153
struct mlxsw_sp_router;
154
struct mlxsw_sp_acl;
155
struct mlxsw_sp_counter_pool;
156

157
struct mlxsw_sp {
158 159
	struct {
		struct list_head list;
160
		DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
161
	} vfids;
162
	struct list_head fids;	/* VLAN-aware bridge FIDs */
163 164 165 166
	struct mlxsw_sp_port **ports;
	struct mlxsw_core *core;
	const struct mlxsw_bus_info *bus_info;
	unsigned char base_mac[ETH_ALEN];
167
	struct mlxsw_sp_upper *lags;
168
	u8 *port_to_module;
169
	struct mlxsw_sp_sb *sb;
170
	struct mlxsw_sp_bridge *bridge;
171
	struct mlxsw_sp_router *router;
172
	struct mlxsw_sp_acl *acl;
173 174 175
	struct {
		DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
	} kvdl;
176

177
	struct mlxsw_sp_counter_pool *counter_pool;
178 179 180 181
	struct {
		struct mlxsw_sp_span_entry *entries;
		int entries_count;
	} span;
182 183
};

184 185 186 187 188 189
static inline struct mlxsw_sp_upper *
mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
	return &mlxsw_sp->lags[lag_id];
}

190 191 192 193 194 195 196 197 198
struct mlxsw_sp_port_pcpu_stats {
	u64			rx_packets;
	u64			rx_bytes;
	u64			tx_packets;
	u64			tx_bytes;
	struct u64_stats_sync	syncp;
	u32			tx_dropped;
};

199 200 201 202 203 204 205
struct mlxsw_sp_port_sample {
	struct psample_group __rcu *psample_group;
	u32 trunc_size;
	u32 rate;
	bool truncate;
};

206 207 208 209 210 211
struct mlxsw_sp_port {
	struct net_device *dev;
	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
	struct mlxsw_sp *mlxsw_sp;
	u8 local_port;
	u8 stp_state;
212
	u16 learning:1,
213
	   learning_sync:1,
214
	   uc_flood:1,
215
	   mc_flood:1,
216 217
	   mc_router:1,
	   mc_disabled:1,
218
	   bridged:1,
219 220
	   lagged:1,
	   split:1;
221
	u16 pvid;
222
	u16 lag_id;
223 224
	struct {
		struct list_head list;
225
		struct mlxsw_sp_fid *f;
226 227
		u16 vid;
	} vport;
228 229
	struct {
		u8 tx_pause:1,
230 231
		   rx_pause:1,
		   autoneg:1;
232
	} link;
233 234
	struct {
		struct ieee_ets *ets;
235
		struct ieee_maxrate *maxrate;
236
		struct ieee_pfc *pfc;
237
	} dcb;
238 239 240 241 242
	struct {
		u8 module;
		u8 width;
		u8 lane;
	} mapping;
243
	/* 802.1Q bridge VLANs */
244
	unsigned long *active_vlans;
E
Elad Raz 已提交
245
	unsigned long *untagged_vlans;
246
	/* VLAN interfaces */
247
	struct list_head vports_list;
248 249
	/* TC handles */
	struct list_head mall_tc_list;
250 251 252 253 254
	struct {
		#define MLXSW_HW_STATS_UPDATE_TIME HZ
		struct rtnl_link_stats64 *cache;
		struct delayed_work update_dw;
	} hw_stats;
255
	struct mlxsw_sp_port_sample *sample;
256 257
};

258
bool mlxsw_sp_port_dev_check(const struct net_device *dev);
259
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
260 261 262
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);

263 264 265 266 267 268
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
	return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
}

269 270 271 272 273 274 275 276 277 278 279 280
static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
	struct mlxsw_sp_port *mlxsw_sp_port;
	u8 local_port;

	local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
						lag_id, port_index);
	mlxsw_sp_port = mlxsw_sp->ports[local_port];
	return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}

281 282 283 284 285 286
static inline u16
mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
	return mlxsw_sp_vport->vport.vid;
}

287 288 289 290 291 292 293 294
static inline bool
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
{
	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);

	return vid != 0;
}

295 296 297 298 299 300 301
static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
					  struct mlxsw_sp_fid *f)
{
	mlxsw_sp_vport->vport.f = f;
}

static inline struct mlxsw_sp_fid *
302
mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
303
{
304 305 306 307
	return mlxsw_sp_vport->vport.f;
}

static inline struct net_device *
308
mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
309 310 311
{
	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);

312
	return f ? f->dev : NULL;
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
}

static inline struct mlxsw_sp_port *
mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
	struct mlxsw_sp_port *mlxsw_sp_vport;

	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
			    vport.list) {
		if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
			return mlxsw_sp_vport;
	}

	return NULL;
}

329
static inline struct mlxsw_sp_port *
330 331
mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
				u16 fid)
332 333 334 335 336
{
	struct mlxsw_sp_port *mlxsw_sp_vport;

	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
			    vport.list) {
337 338
		struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);

339
		if (f && f->fid == fid)
340 341 342 343 344 345
			return mlxsw_sp_vport;
	}

	return NULL;
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
						     u16 fid)
{
	struct mlxsw_sp_fid *f;

	list_for_each_entry(f, &mlxsw_sp->fids, list)
		if (f->fid == fid)
			return f;

	return NULL;
}

static inline struct mlxsw_sp_fid *
mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
		   const struct net_device *br_dev)
{
	struct mlxsw_sp_fid *f;

	list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
		if (f->dev == br_dev)
			return f;

	return NULL;
}

371 372
enum mlxsw_sp_flood_table {
	MLXSW_SP_FLOOD_TABLE_UC,
373 374
	MLXSW_SP_FLOOD_TABLE_BC,
	MLXSW_SP_FLOOD_TABLE_MC,
375 376 377
};

int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
378
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
379
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index,
			 struct devlink_sb_pool_info *pool_info);
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index, u32 size,
			 enum devlink_sb_threshold_type threshold_type);
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 *p_threshold);
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 threshold);
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 *p_pool_index, u32 *p_threshold);
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 pool_index, u32 threshold);
400 401 402 403 404 405 406 407 408 409 410
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
			     unsigned int sb_index);
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
			      unsigned int sb_index);
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
				  unsigned int sb_index, u16 pool_index,
				  u32 *p_cur, u32 *p_max);
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				     unsigned int sb_index, u16 tc_index,
				     enum devlink_sb_pool_type pool_type,
				     u32 *p_cur, u32 *p_max);
411 412
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
413

414
struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp);
415 416 417 418 419 420 421 422 423 424
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
				 u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
			   u16 vid_end, bool is_member, bool untagged);
425
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
426
			     bool set);
427
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
428
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
429
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
430 431
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
			bool adding);
432 433
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
434 435 436 437 438 439
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
			  bool dwrr, u8 dwrr_weight);
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
			      u8 switch_prio, u8 tclass);
int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
440 441
				 u8 *prio_tc, bool pause_en,
				 struct ieee_pfc *my_pfc);
442 443 444
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
				  enum mlxsw_reg_qeec_hr hr, u8 index,
				  u8 next_index, u32 maxrate);
445 446 447 448
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
			      u8 state);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
				   bool learn_enable);
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
#ifdef CONFIG_MLXSW_SPECTRUM_DCB

int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);

#else

static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	return 0;
}

static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{}

#endif

467 468
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
469 470
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
				   unsigned long event, void *ptr);
471 472 473 474
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
			    unsigned long event, void *ptr);
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
475
				 struct mlxsw_sp_rif *rif);
476 477
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
				 struct netdev_notifier_changeupper_info *info);
478

479 480
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
			u32 *p_entry_index);
481 482
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);

483 484 485 486 487 488
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);

struct mlxsw_sp_acl_rule_info {
	unsigned int priority;
	struct mlxsw_afk_element_values values;
	struct mlxsw_afa_block *act_block;
489 490
	unsigned int counter_index;
	bool counter_valid;
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
};

enum mlxsw_sp_acl_profile {
	MLXSW_SP_ACL_PROFILE_FLOWER,
};

struct mlxsw_sp_acl_profile_ops {
	size_t ruleset_priv_size;
	int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
			   void *priv, void *ruleset_priv);
	void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
	int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
			    struct net_device *dev, bool ingress);
	void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
	size_t rule_priv_size;
	int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
			void *ruleset_priv, void *rule_priv,
			struct mlxsw_sp_acl_rule_info *rulei);
	void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
510 511
	int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
				 bool *activity);
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
};

struct mlxsw_sp_acl_ops {
	size_t priv_size;
	int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
	void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
	const struct mlxsw_sp_acl_profile_ops *
			(*profile_ops)(struct mlxsw_sp *mlxsw_sp,
				       enum mlxsw_sp_acl_profile profile);
};

struct mlxsw_sp_acl_ruleset;

struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
			 struct net_device *dev, bool ingress,
			 enum mlxsw_sp_acl_profile profile);
void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_acl_ruleset *ruleset);

struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
				 unsigned int priority);
void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
				    enum mlxsw_afk_element element,
				    u32 key_value, u32 mask_value);
void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
				    enum mlxsw_afk_element element,
				    const char *key_value,
				    const char *mask_value, unsigned int len);
void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
				 u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_acl_rule_info *rulei,
			       struct net_device *out_dev);
552 553 554
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_rule_info *rulei,
				u32 action, u16 vid, u16 proto, u8 prio);
555 556
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_rule_info *rulei);
557 558 559
int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_acl_rule_info *rulei,
				   u16 fid);
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578

struct mlxsw_sp_acl_rule;

struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
			 struct mlxsw_sp_acl_ruleset *ruleset,
			 unsigned long cookie);
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
			  struct mlxsw_sp_acl_rule *rule);
void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
			   struct mlxsw_sp_acl_rule *rule);
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
			 struct mlxsw_sp_acl_ruleset *ruleset,
			 unsigned long cookie);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
579 580 581
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_acl_rule *rule,
				u64 *packets, u64 *bytes, u64 *last_use);
582 583 584 585 586 587

int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);

extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;

588 589 590 591
int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
			    __be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
			     struct tc_cls_flower_offload *f);
592 593
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
			  struct tc_cls_flower_offload *f);
594 595 596 597 598 599 600
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
			      unsigned int counter_index, u64 *packets,
			      u64 *bytes);
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
				unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
				unsigned int counter_index);
601

602
#endif