spectrum.h 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H

#include <linux/types.h>
#include <linux/netdevice.h>
42
#include <linux/rhashtable.h>
43 44
#include <linux/bitops.h>
#include <linux/if_vlan.h>
45
#include <linux/list.h>
46
#include <linux/dcbnl.h>
47
#include <linux/in6.h>
48 49
#include <net/switchdev.h>

50
#include "port.h"
51 52 53
#include "core.h"

#define MLXSW_SP_VFID_BASE VLAN_N_VID
54 55 56 57
#define MLXSW_SP_VFID_MAX 6656	/* Bridged VLAN interfaces */

#define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_RIF_MAX 800
58

59 60
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
61

62 63
#define MLXSW_SP_MID_MAX 7000

64 65
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4

66 67 68 69
#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)

70 71
#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256

72 73
#define MLXSW_SP_PORT_BASE_SPEED 25000	/* Mb/s */

74 75 76
#define MLXSW_SP_BYTES_PER_CELL 96

#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
77
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
78

79 80 81 82
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */

83 84 85 86 87
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
 * Assumes 100m cable and maximum MTU.
 */
#define MLXSW_SP_PAUSE_DELAY 612

88 89 90 91 92 93 94 95
#define MLXSW_SP_CELL_FACTOR 2	/* 2 * cell_size / (IPG + cell_size + 1) */

static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
{
	delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
	return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
}

96 97
struct mlxsw_sp_port;

98 99 100 101 102
struct mlxsw_sp_upper {
	struct net_device *dev;
	unsigned int ref_count;
};

103
struct mlxsw_sp_fid {
104
	void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
105
	struct list_head list;
106 107
	unsigned int ref_count;
	struct net_device *dev;
108
	struct mlxsw_sp_rif *r;
109
	u16 fid;
110 111
};

112 113
struct mlxsw_sp_rif {
	struct net_device *dev;
114
	unsigned int ref_count;
115 116 117
	struct mlxsw_sp_fid *f;
	unsigned char addr[ETH_ALEN];
	int mtu;
118 119 120
	u16 rif;
};

121 122 123 124 125 126 127 128
struct mlxsw_sp_mid {
	struct list_head list;
	unsigned char addr[ETH_ALEN];
	u16 vid;
	u16 mid;
	unsigned int ref_count;
};

129 130 131 132 133
static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
{
	return MLXSW_SP_VFID_BASE + vfid;
}

134 135 136 137 138 139 140
static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
{
	return fid - MLXSW_SP_VFID_BASE;
}

static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
141 142 143 144 145 146 147 148 149 150 151
	return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
}

static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
{
	return fid >= MLXSW_SP_RFID_BASE;
}

static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
{
	return MLXSW_SP_RFID_BASE + rif;
152 153
}

154 155 156 157 158
struct mlxsw_sp_sb_pr {
	enum mlxsw_reg_sbpr_mode mode;
	u32 size;
};

159 160 161 162 163
struct mlxsw_cp_sb_occ {
	u32 cur;
	u32 max;
};

164 165 166 167
struct mlxsw_sp_sb_cm {
	u32 min_buff;
	u32 max_buff;
	u8 pool;
168
	struct mlxsw_cp_sb_occ occ;
169 170 171 172 173
};

struct mlxsw_sp_sb_pm {
	u32 min_buff;
	u32 max_buff;
174
	struct mlxsw_cp_sb_occ occ;
175 176 177 178 179 180 181 182 183 184 185 186 187
};

#define MLXSW_SP_SB_POOL_COUNT	4
#define MLXSW_SP_SB_TC_COUNT	8

struct mlxsw_sp_sb {
	struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
	struct {
		struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
		struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
	} ports[MLXSW_PORT_MAX_PORTS];
};

188 189 190 191 192 193
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)

struct mlxsw_sp_prefix_usage {
	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
};

194 195 196 197 198 199 200 201 202 203 204 205
enum mlxsw_sp_l3proto {
	MLXSW_SP_L3_PROTO_IPV4,
	MLXSW_SP_L3_PROTO_IPV6,
};

struct mlxsw_sp_lpm_tree {
	u8 id; /* tree ID */
	unsigned int ref_count;
	enum mlxsw_sp_l3proto proto;
	struct mlxsw_sp_prefix_usage prefix_usage;
};

206 207 208 209 210 211 212 213 214 215 216
struct mlxsw_sp_fib;

struct mlxsw_sp_vr {
	u16 id; /* virtual router ID */
	bool used;
	enum mlxsw_sp_l3proto proto;
	u32 tb_id; /* kernel fib table id */
	struct mlxsw_sp_lpm_tree *lpm_tree;
	struct mlxsw_sp_fib *fib;
};

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
enum mlxsw_sp_span_type {
	MLXSW_SP_SPAN_EGRESS,
	MLXSW_SP_SPAN_INGRESS
};

struct mlxsw_sp_span_inspected_port {
	struct list_head list;
	enum mlxsw_sp_span_type type;
	u8 local_port;
};

struct mlxsw_sp_span_entry {
	u8 local_port;
	bool used;
	struct list_head bound_ports_list;
	int ref_count;
	int id;
};

enum mlxsw_sp_port_mall_action_type {
	MLXSW_SP_PORT_MALL_MIRROR,
};

struct mlxsw_sp_port_mall_mirror_tc_entry {
	u8 to_local_port;
	bool ingress;
};

struct mlxsw_sp_port_mall_tc_entry {
	struct list_head list;
	unsigned long cookie;
	enum mlxsw_sp_port_mall_action_type type;
	union {
		struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
	};
};

254 255
struct mlxsw_sp_router {
	struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
256
	struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
257
	struct rhashtable neigh_ht;
258 259 260 261
	struct {
		struct delayed_work dw;
		unsigned long interval;	/* ms */
	} neighs_update;
262 263
	struct delayed_work nexthop_probe_dw;
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
264
	struct list_head nexthop_group_list;
265
	struct list_head nexthop_neighs_list;
266 267
};

268
struct mlxsw_sp {
269 270
	struct {
		struct list_head list;
271
		DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
272
	} vfids;
273 274
	struct {
		struct list_head list;
275
		DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
276
	} br_mids;
277
	struct list_head fids;	/* VLAN-aware bridge FIDs */
278
	struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
279 280 281 282 283 284 285 286 287
	struct mlxsw_sp_port **ports;
	struct mlxsw_core *core;
	const struct mlxsw_bus_info *bus_info;
	unsigned char base_mac[ETH_ALEN];
	struct {
		struct delayed_work dw;
#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
		unsigned int interval; /* ms */
	} fdb_notify;
288 289
#define MLXSW_SP_MIN_AGEING_TIME 10
#define MLXSW_SP_MAX_AGEING_TIME 1000000
290 291
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
	u32 ageing_time;
292 293
	struct mlxsw_sp_upper master_bridge;
	struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
294
	u8 port_to_module[MLXSW_PORT_MAX_PORTS];
295
	struct mlxsw_sp_sb sb;
296
	struct mlxsw_sp_router router;
297 298 299
	struct {
		DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
	} kvdl;
300 301 302 303 304

	struct {
		struct mlxsw_sp_span_entry *entries;
		int entries_count;
	} span;
305 306
};

307 308 309 310 311 312
static inline struct mlxsw_sp_upper *
mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
	return &mlxsw_sp->lags[lag_id];
}

313 314 315 316 317 318 319 320 321 322
struct mlxsw_sp_port_pcpu_stats {
	u64			rx_packets;
	u64			rx_bytes;
	u64			tx_packets;
	u64			tx_bytes;
	struct u64_stats_sync	syncp;
	u32			tx_dropped;
};

struct mlxsw_sp_port {
323
	struct mlxsw_core_port core_port; /* must be first */
324 325 326 327 328
	struct net_device *dev;
	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
	struct mlxsw_sp *mlxsw_sp;
	u8 local_port;
	u8 stp_state;
329 330
	u8 learning:1,
	   learning_sync:1,
331
	   uc_flood:1,
332
	   bridged:1,
333 334
	   lagged:1,
	   split:1;
335
	u16 pvid;
336
	u16 lag_id;
337 338
	struct {
		struct list_head list;
339
		struct mlxsw_sp_fid *f;
340 341
		u16 vid;
	} vport;
342 343
	struct {
		u8 tx_pause:1,
344 345
		   rx_pause:1,
		   autoneg:1;
346
	} link;
347 348
	struct {
		struct ieee_ets *ets;
349
		struct ieee_maxrate *maxrate;
350
		struct ieee_pfc *pfc;
351
	} dcb;
352 353 354 355 356
	struct {
		u8 module;
		u8 width;
		u8 lane;
	} mapping;
357
	/* 802.1Q bridge VLANs */
358
	unsigned long *active_vlans;
E
Elad Raz 已提交
359
	unsigned long *untagged_vlans;
360
	/* VLAN interfaces */
361
	struct list_head vports_list;
362 363
	/* TC handles */
	struct list_head mall_tc_list;
364 365
};

366 367 368
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);

369 370 371 372 373 374
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
	return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
}

375 376 377 378 379 380 381 382 383 384 385 386
static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
	struct mlxsw_sp_port *mlxsw_sp_port;
	u8 local_port;

	local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
						lag_id, port_index);
	mlxsw_sp_port = mlxsw_sp->ports[local_port];
	return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}

387 388 389 390 391 392
static inline u16
mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
	return mlxsw_sp_vport->vport.vid;
}

393 394 395 396 397 398 399 400
static inline bool
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
{
	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);

	return vid != 0;
}

401 402 403 404 405 406 407
static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
					  struct mlxsw_sp_fid *f)
{
	mlxsw_sp_vport->vport.f = f;
}

static inline struct mlxsw_sp_fid *
408
mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
409
{
410 411 412 413
	return mlxsw_sp_vport->vport.f;
}

static inline struct net_device *
414
mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
415 416 417
{
	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);

418
	return f ? f->dev : NULL;
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
}

static inline struct mlxsw_sp_port *
mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
	struct mlxsw_sp_port *mlxsw_sp_vport;

	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
			    vport.list) {
		if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
			return mlxsw_sp_vport;
	}

	return NULL;
}

435
static inline struct mlxsw_sp_port *
436 437
mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
				u16 fid)
438 439 440 441 442
{
	struct mlxsw_sp_port *mlxsw_sp_vport;

	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
			    vport.list) {
443 444
		struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);

445
		if (f && f->fid == fid)
446 447 448 449 450 451
			return mlxsw_sp_vport;
	}

	return NULL;
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
						     u16 fid)
{
	struct mlxsw_sp_fid *f;

	list_for_each_entry(f, &mlxsw_sp->fids, list)
		if (f->fid == fid)
			return f;

	return NULL;
}

static inline struct mlxsw_sp_fid *
mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
		   const struct net_device *br_dev)
{
	struct mlxsw_sp_fid *f;

	list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
		if (f->dev == br_dev)
			return f;

	return NULL;
}

477 478 479 480 481 482 483 484 485 486 487 488 489
static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
			 const struct net_device *dev)
{
	int i;

	for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
		if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
			return mlxsw_sp->rifs[i];

	return NULL;
}

490 491 492 493 494 495
enum mlxsw_sp_flood_table {
	MLXSW_SP_FLOOD_TABLE_UC,
	MLXSW_SP_FLOOD_TABLE_BM,
};

int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
496
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
497
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index,
			 struct devlink_sb_pool_info *pool_info);
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index, u32 size,
			 enum devlink_sb_threshold_type threshold_type);
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 *p_threshold);
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 threshold);
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 *p_pool_index, u32 *p_threshold);
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 pool_index, u32 threshold);
518 519 520 521 522 523 524 525 526 527 528
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
			     unsigned int sb_index);
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
			      unsigned int sb_index);
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
				  unsigned int sb_index, u16 pool_index,
				  u32 *p_cur, u32 *p_max);
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				     unsigned int sb_index, u16 tc_index,
				     enum devlink_sb_pool_type pool_type,
				     u32 *p_cur, u32 *p_max);
529 530 531 532 533 534 535 536 537 538 539

int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
				 u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
			   u16 vid_end, bool is_member, bool untagged);
540
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
541
			     bool set);
542
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
543
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
544
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
545 546
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
			bool adding);
547 548
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
549 550
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_rif *r);
551 552 553 554 555 556
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
			  bool dwrr, u8 dwrr_weight);
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
			      u8 switch_prio, u8 tclass);
int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
557 558
				 u8 *prio_tc, bool pause_en,
				 struct ieee_pfc *my_pfc);
559 560 561
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
				  enum mlxsw_reg_qeec_hr hr, u8 index,
				  u8 next_index, u32 maxrate);
562 563 564
int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
				     u16 vid_begin, u16 vid_end,
				     bool learn_enable);
565

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
#ifdef CONFIG_MLXSW_SPECTRUM_DCB

int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);

#else

static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	return 0;
}

static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{}

#endif

583 584
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
585 586 587 588 589
int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
			     const struct switchdev_obj_ipv4_fib *fib4,
			     struct switchdev_trans *trans);
int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
			     const struct switchdev_obj_ipv4_fib *fib4);
590 591 592 593
int mlxsw_sp_router_neigh_construct(struct net_device *dev,
				    struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
				   struct neighbour *n);
594 595
int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
				   unsigned long event, void *ptr);
596

597 598 599
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);

600
#endif