spectrum.c 123.3 KB
Newer Older
1 2
/*
 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 4
 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the names of the copyright holders nor the names of its
 *    contributors may be used to endorse or promote products derived from
 *    this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
40
#include <linux/pci.h>
41 42 43 44 45 46 47 48 49 50 51
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/if_bridge.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
52
#include <linux/list.h>
53
#include <linux/notifier.h>
54
#include <linux/dcbnl.h>
55
#include <linux/inetdevice.h>
56
#include <net/switchdev.h>
57 58
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
59
#include <net/netevent.h>
60
#include <net/tc_act/tc_sample.h>
61 62

#include "spectrum.h"
63
#include "pci.h"
64 65 66 67 68
#include "core.h"
#include "reg.h"
#include "port.h"
#include "trap.h"
#include "txheader.h"
69
#include "spectrum_cnt.h"
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";

/* tx_hdr_version
 * Tx header version.
 * Must be set to 1.
 */
MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);

/* tx_hdr_ctl
 * Packet control type.
 * 0 - Ethernet control (e.g. EMADs, LACP)
 * 1 - Ethernet data
 */
MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);

/* tx_hdr_proto
 * Packet protocol type. Must be set to 1 (Ethernet).
 */
MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);

/* tx_hdr_rx_is_router
 * Packet is sent from the router. Valid for data packets only.
 */
MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);

/* tx_hdr_fid_valid
 * Indicates if the 'fid' field is valid and should be used for
 * forwarding lookup. Valid for data packets only.
 */
MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);

/* tx_hdr_swid
 * Switch partition ID. Must be set to 0.
 */
MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);

/* tx_hdr_control_tclass
 * Indicates if the packet should use the control TClass and not one
 * of the data TClasses.
 */
MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);

/* tx_hdr_etclass
 * Egress TClass to be used on the egress device on the egress port.
 */
MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);

/* tx_hdr_port_mid
 * Destination local port for unicast packets.
 * Destination multicast ID for multicast packets.
 *
 * Control packets are directed to a specific egress port, while data
 * packets are transmitted through the CPU port (0) into the switch partition,
 * where forwarding rules are applied.
 */
MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);

/* tx_hdr_fid
 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
 * Valid for data packets only.
 */
MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);

/* tx_hdr_type
 * 0 - Data packets
 * 6 - Control packets
 */
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
			      unsigned int counter_index, u64 *packets,
			      u64 *bytes)
{
	char mgpc_pl[MLXSW_REG_MGPC_LEN];
	int err;

	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
			    MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
	if (err)
		return err;
	*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
	*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
	return 0;
}

static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
				       unsigned int counter_index)
{
	char mgpc_pl[MLXSW_REG_MGPC_LEN];

	mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
			    MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
}

int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
				unsigned int *p_counter_index)
{
	int err;

	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
				     p_counter_index);
	if (err)
		return err;
	err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
	if (err)
		goto err_counter_clear;
	return 0;

err_counter_clear:
	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
			      *p_counter_index);
	return err;
}

void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
				unsigned int counter_index)
{
	 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
			       counter_index);
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
				     const struct mlxsw_tx_info *tx_info)
{
	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);

	memset(txhdr, 0, MLXSW_TXHDR_LEN);

	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
	mlxsw_tx_hdr_swid_set(txhdr, 0);
	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}

static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
{
214
	char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
215 216 217 218 219 220 221 222 223
	int err;

	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
	if (err)
		return err;
	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
	return 0;
}

224 225 226 227
static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
	int i;

J
Jiri Pirko 已提交
228
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
229 230
		return -EIO;

J
Jiri Pirko 已提交
231 232
	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
							  MAX_SPAN);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
					 sizeof(struct mlxsw_sp_span_entry),
					 GFP_KERNEL);
	if (!mlxsw_sp->span.entries)
		return -ENOMEM;

	for (i = 0; i < mlxsw_sp->span.entries_count; i++)
		INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);

	return 0;
}

static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
{
	int i;

	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];

		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
	}
	kfree(mlxsw_sp->span.entries);
}

static struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
{
	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
	struct mlxsw_sp_span_entry *span_entry;
	char mpat_pl[MLXSW_REG_MPAT_LEN];
	u8 local_port = port->local_port;
	int index;
	int i;
	int err;

	/* find a free entry to use */
	index = -1;
	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
		if (!mlxsw_sp->span.entries[i].used) {
			index = i;
			span_entry = &mlxsw_sp->span.entries[i];
			break;
		}
	}
	if (index < 0)
		return NULL;

	/* create a new port analayzer entry for local_port */
	mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
	if (err)
		return NULL;

	span_entry->used = true;
	span_entry->id = index;
288
	span_entry->ref_count = 1;
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	span_entry->local_port = local_port;
	return span_entry;
}

static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_span_entry *span_entry)
{
	u8 local_port = span_entry->local_port;
	char mpat_pl[MLXSW_REG_MPAT_LEN];
	int pa_id = span_entry->id;

	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
	span_entry->used = false;
}

305 306
static struct mlxsw_sp_span_entry *
mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
307 308 309 310 311 312 313 314 315 316 317 318 319
{
	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
	int i;

	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];

		if (curr->used && curr->local_port == port->local_port)
			return curr;
	}
	return NULL;
}

320 321
static struct mlxsw_sp_span_entry
*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
322 323 324 325 326
{
	struct mlxsw_sp_span_entry *span_entry;

	span_entry = mlxsw_sp_span_entry_find(port);
	if (span_entry) {
327
		/* Already exists, just take a reference */
328 329 330 331 332 333 334 335 336 337
		span_entry->ref_count++;
		return span_entry;
	}

	return mlxsw_sp_span_entry_create(port);
}

static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_span_entry *span_entry)
{
338
	WARN_ON(!span_entry->ref_count);
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	if (--span_entry->ref_count == 0)
		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
	return 0;
}

static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
{
	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
	struct mlxsw_sp_span_inspected_port *p;
	int i;

	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];

		list_for_each_entry(p, &curr->bound_ports_list, list)
			if (p->local_port == port->local_port &&
			    p->type == MLXSW_SP_SPAN_EGRESS)
				return true;
	}

	return false;
}

static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
{
	return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
}

static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
{
	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
	char sbib_pl[MLXSW_REG_SBIB_LEN];
	int err;

	/* If port is egress mirrored, the shared buffer size should be
	 * updated according to the mtu value
	 */
	if (mlxsw_sp_span_is_egress_mirror(port)) {
		mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
				    mlxsw_sp_span_mtu_to_buffsize(mtu));
		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
		if (err) {
			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
			return err;
		}
	}

	return 0;
}

static struct mlxsw_sp_span_inspected_port *
mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
				    struct mlxsw_sp_span_entry *span_entry)
{
	struct mlxsw_sp_span_inspected_port *p;

	list_for_each_entry(p, &span_entry->bound_ports_list, list)
		if (port->local_port == p->local_port)
			return p;
	return NULL;
}

static int
mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
				  struct mlxsw_sp_span_entry *span_entry,
				  enum mlxsw_sp_span_type type)
{
	struct mlxsw_sp_span_inspected_port *inspected_port;
	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
	char mpar_pl[MLXSW_REG_MPAR_LEN];
	char sbib_pl[MLXSW_REG_SBIB_LEN];
	int pa_id = span_entry->id;
	int err;

	/* if it is an egress SPAN, bind a shared buffer to it */
	if (type == MLXSW_SP_SPAN_EGRESS) {
		mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
				    mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
		if (err) {
			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
			return err;
		}
	}

	/* bind the port to the SPAN entry */
425 426
	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
			    (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
	if (err)
		goto err_mpar_reg_write;

	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
	if (!inspected_port) {
		err = -ENOMEM;
		goto err_inspected_port_alloc;
	}
	inspected_port->local_port = port->local_port;
	inspected_port->type = type;
	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);

	return 0;

err_mpar_reg_write:
err_inspected_port_alloc:
	if (type == MLXSW_SP_SPAN_EGRESS) {
		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
	}
	return err;
}

static void
mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
				    struct mlxsw_sp_span_entry *span_entry,
				    enum mlxsw_sp_span_type type)
{
	struct mlxsw_sp_span_inspected_port *inspected_port;
	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
	char mpar_pl[MLXSW_REG_MPAR_LEN];
	char sbib_pl[MLXSW_REG_SBIB_LEN];
	int pa_id = span_entry->id;

	inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
	if (!inspected_port)
		return;

	/* remove the inspected port */
467 468
	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
			    (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);

	/* remove the SBIB buffer if it was egress SPAN */
	if (type == MLXSW_SP_SPAN_EGRESS) {
		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
	}

	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);

	list_del(&inspected_port->list);
	kfree(inspected_port);
}

static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
				    struct mlxsw_sp_port *to,
				    enum mlxsw_sp_span_type type)
{
	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
	struct mlxsw_sp_span_entry *span_entry;
	int err;

	span_entry = mlxsw_sp_span_entry_get(to);
	if (!span_entry)
		return -ENOENT;

	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
		   span_entry->id);

	err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
	if (err)
		goto err_port_bind;

	return 0;

err_port_bind:
	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
	return err;
}

static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
					struct mlxsw_sp_port *to,
					enum mlxsw_sp_span_type type)
{
	struct mlxsw_sp_span_entry *span_entry;

	span_entry = mlxsw_sp_span_entry_find(to);
	if (!span_entry) {
		netdev_err(from->dev, "no span entry found\n");
		return;
	}

	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
		   span_entry->id);
	mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
}

526 527 528 529 530 531 532 533 534 535
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
				    bool enable, u32 rate)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char mpsc_pl[MLXSW_REG_MPSC_LEN];

	mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
}

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
					  bool is_up)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char paos_pl[MLXSW_REG_PAOS_LEN];

	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
			    MLXSW_PORT_ADMIN_STATUS_DOWN);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}

static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
				      unsigned char *addr)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char ppad_pl[MLXSW_REG_PPAD_LEN];

	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
}

static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;

	ether_addr_copy(addr, mlxsw_sp->base_mac);
	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}

static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char pmtu_pl[MLXSW_REG_PMTU_LEN];
	int max_mtu;
	int err;

	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
	if (err)
		return err;
	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);

	if (mtu > max_mtu)
		return -EINVAL;

	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}

590 591
static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
				    u8 swid)
592 593 594
{
	char pspa_pl[MLXSW_REG_PSPA_LEN];

595
	mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
596 597 598
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}

599 600 601 602 603 604 605 606
static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;

	return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
					swid);
}

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
				     bool enable)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char svpe_pl[MLXSW_REG_SVPE_LEN];

	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
}

int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
				 u16 vid)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char svfa_pl[MLXSW_REG_SVFA_LEN];

	mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
			    fid, vid);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
}

629 630 631
int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
				     u16 vid_begin, u16 vid_end,
				     bool learn_enable)
632 633 634 635 636 637 638 639
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char *spvmlr_pl;
	int err;

	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
	if (!spvmlr_pl)
		return -ENOMEM;
640 641
	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
			      vid_end, learn_enable);
642 643 644 645 646
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
	kfree(spvmlr_pl);
	return err;
}

647 648 649 650 651 652 653
static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
					  u16 vid, bool learn_enable)
{
	return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
						learn_enable);
}

654 655 656 657 658 659 660 661 662 663
static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char sspr_pl[MLXSW_REG_SSPR_LEN];

	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}

664 665 666
static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
					 u8 local_port, u8 *p_module,
					 u8 *p_width, u8 *p_lane)
667 668 669 670
{
	char pmlp_pl[MLXSW_REG_PMLP_LEN];
	int err;

671
	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
672 673 674
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
	if (err)
		return err;
675 676
	*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
	*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
677
	*p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
678 679 680
	return 0;
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
				    u8 module, u8 width, u8 lane)
{
	char pmlp_pl[MLXSW_REG_PMLP_LEN];
	int i;

	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
	mlxsw_reg_pmlp_width_set(pmlp_pl, width);
	for (i = 0; i < width; i++) {
		mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
		mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i);  /* Rx & Tx */
	}

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
}

697 698 699 700 701 702 703 704 705
static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
	char pmlp_pl[MLXSW_REG_PMLP_LEN];

	mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
	mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
}

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
static int mlxsw_sp_port_open(struct net_device *dev)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	int err;

	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
	if (err)
		return err;
	netif_start_queue(dev);
	return 0;
}

static int mlxsw_sp_port_stop(struct net_device *dev)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);

	netif_stop_queue(dev);
	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
}

static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
				      struct net_device *dev)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
	const struct mlxsw_tx_info tx_info = {
		.local_port = mlxsw_sp_port->local_port,
		.is_emad = false,
	};
	u64 len;
	int err;

739
	if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
740 741 742 743 744 745 746 747 748 749 750
		return NETDEV_TX_BUSY;

	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
		struct sk_buff *skb_orig = skb;

		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
		if (!skb) {
			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
			dev_kfree_skb_any(skb_orig);
			return NETDEV_TX_OK;
		}
751
		dev_consume_skb_any(skb_orig);
752 753 754 755 756 757 758 759
	}

	if (eth_skb_pad(skb)) {
		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
		return NETDEV_TX_OK;
	}

	mlxsw_sp_txhdr_construct(skb, &tx_info);
760 761 762 763 764
	/* TX header is consumed by HW on the way so we shouldn't count its
	 * bytes as being sent.
	 */
	len = skb->len - MLXSW_TXHDR_LEN;

765 766 767
	/* Due to a race we might fail here because of a full queue. In that
	 * unlikely case we simply drop the packet.
	 */
768
	err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
769 770 771 772 773 774 775 776 777 778 779 780 781 782

	if (!err) {
		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
		u64_stats_update_begin(&pcpu_stats->syncp);
		pcpu_stats->tx_packets++;
		pcpu_stats->tx_bytes += len;
		u64_stats_update_end(&pcpu_stats->syncp);
	} else {
		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
		dev_kfree_skb_any(skb);
	}
	return NETDEV_TX_OK;
}

783 784 785 786
static void mlxsw_sp_set_rx_mode(struct net_device *dev)
{
}

787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct sockaddr *addr = p;
	int err;

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
	if (err)
		return err;
	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
	return 0;
}

803
static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
804
				 bool pause_en, bool pfc_en, u16 delay)
805 806
{
	u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
807

808 809
	delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
			 MLXSW_SP_PAUSE_DELAY;
810

811
	if (pause_en || pfc_en)
812
		mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
813 814
						    pg_size + delay, pg_size);
	else
815
		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
816 817 818
}

int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
819 820
				 u8 *prio_tc, bool pause_en,
				 struct ieee_pfc *my_pfc)
821 822
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
823 824
	u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
	u16 delay = !!my_pfc ? my_pfc->delay : 0;
825
	char pbmc_pl[MLXSW_REG_PBMC_LEN];
826
	int i, j, err;
827 828 829 830 831

	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
	if (err)
		return err;
832 833 834

	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		bool configure = false;
835
		bool pfc = false;
836 837 838

		for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
			if (prio_tc[j] == i) {
839
				pfc = pfc_en & BIT(j);
840 841 842 843 844 845 846
				configure = true;
				break;
			}
		}

		if (!configure)
			continue;
847
		mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
848 849
	}

850 851 852
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
}

853
static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
854
				      int mtu, bool pause_en)
855 856 857
{
	u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
	bool dcb_en = !!mlxsw_sp_port->dcb.ets;
858
	struct ieee_pfc *my_pfc;
859 860 861
	u8 *prio_tc;

	prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
862
	my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
863

864
	return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
865
					    pause_en, my_pfc);
866 867
}

868 869 870
static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
871
	bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
872 873
	int err;

874
	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
875 876
	if (err)
		return err;
877 878 879
	err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
	if (err)
		goto err_span_port_mtu_update;
880 881 882
	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
	if (err)
		goto err_port_mtu_set;
883 884
	dev->mtu = mtu;
	return 0;
885 886

err_port_mtu_set:
887 888
	mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
err_span_port_mtu_update:
889
	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
890
	return err;
891 892
}

893
static int
894 895
mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
			     struct rtnl_link_stats64 *stats)
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp_port_pcpu_stats *p;
	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
	u32 tx_dropped = 0;
	unsigned int start;
	int i;

	for_each_possible_cpu(i) {
		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
		do {
			start = u64_stats_fetch_begin_irq(&p->syncp);
			rx_packets	= p->rx_packets;
			rx_bytes	= p->rx_bytes;
			tx_packets	= p->tx_packets;
			tx_bytes	= p->tx_bytes;
		} while (u64_stats_fetch_retry_irq(&p->syncp, start));

		stats->rx_packets	+= rx_packets;
		stats->rx_bytes		+= rx_bytes;
		stats->tx_packets	+= tx_packets;
		stats->tx_bytes		+= tx_bytes;
		/* tx_dropped is u32, updated without syncp protection. */
		tx_dropped	+= p->tx_dropped;
	}
	stats->tx_dropped	= tx_dropped;
922 923 924
	return 0;
}

925
static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
926 927 928 929 930 931 932 933 934
{
	switch (attr_id) {
	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
		return true;
	}

	return false;
}

935 936
static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
					   void *sp)
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
{
	switch (attr_id) {
	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
		return mlxsw_sp_port_get_sw_stats64(dev, sp);
	}

	return -EINVAL;
}

static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
				       int prio, char *ppcnt_pl)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;

	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
	return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
}

static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
				      struct rtnl_link_stats64 *stats)
{
	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
	int err;

	err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
					  0, ppcnt_pl);
	if (err)
		goto out;

	stats->tx_packets =
		mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
	stats->rx_packets =
		mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
	stats->tx_bytes =
		mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
	stats->rx_bytes =
		mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
	stats->multicast =
		mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);

	stats->rx_crc_errors =
		mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
	stats->rx_frame_errors =
		mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);

	stats->rx_length_errors = (
		mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
		mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
		mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));

	stats->rx_errors = (stats->rx_crc_errors +
		stats->rx_frame_errors + stats->rx_length_errors);

out:
	return err;
}

static void update_stats_cache(struct work_struct *work)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
		container_of(work, struct mlxsw_sp_port,
			     hw_stats.update_dw.work);

	if (!netif_carrier_ok(mlxsw_sp_port->dev))
		goto out;

	mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
				   mlxsw_sp_port->hw_stats.cache);

out:
	mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
			       MLXSW_HW_STATS_UPDATE_TIME);
}

/* Return the stats from a cache that is updated periodically,
 * as this function might get called in an atomic context.
 */
1015
static void
1016 1017 1018 1019 1020 1021
mlxsw_sp_port_get_stats64(struct net_device *dev,
			  struct rtnl_link_stats64 *stats)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);

	memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
}

int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
			   u16 vid_end, bool is_member, bool untagged)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char *spvm_pl;
	int err;

	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
	if (!spvm_pl)
		return -ENOMEM;

	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
			    vid_end, is_member, untagged);
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
	kfree(spvm_pl);
	return err;
}

static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
	u16 vid, last_visited_vid;
	int err;

	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
						   vid);
		if (err) {
			last_visited_vid = vid;
			goto err_port_vid_to_fid_set;
		}
	}

	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
	if (err) {
		last_visited_vid = VLAN_N_VID;
		goto err_port_vid_to_fid_set;
	}

	return 0;

err_port_vid_to_fid_set:
	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
		mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
					     vid);
	return err;
}

static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
{
	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
	u16 vid;
	int err;

	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
	if (err)
		return err;

	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
						   vid, vid);
		if (err)
			return err;
	}

	return 0;
}

1092
static struct mlxsw_sp_port *
1093
mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
{
	struct mlxsw_sp_port *mlxsw_sp_vport;

	mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
	if (!mlxsw_sp_vport)
		return NULL;

	/* dev will be set correctly after the VLAN device is linked
	 * with the real device. In case of bridge SELF invocation, dev
	 * will remain as is.
	 */
	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
	mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
	mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
1109 1110
	mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
	mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
1111
	mlxsw_sp_vport->vport.vid = vid;
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123

	list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);

	return mlxsw_sp_vport;
}

static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
{
	list_del(&mlxsw_sp_vport->vport.list);
	kfree(mlxsw_sp_vport);
}

1124 1125
static int mlxsw_sp_port_add_vid(struct net_device *dev,
				 __be16 __always_unused proto, u16 vid)
1126 1127
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1128
	struct mlxsw_sp_port *mlxsw_sp_vport;
1129
	bool untagged = vid == 1;
1130 1131 1132 1133 1134 1135 1136 1137
	int err;

	/* VLAN 0 is added to HW filter when device goes up, but it is
	 * reserved in our case, so simply return.
	 */
	if (!vid)
		return 0;

1138
	if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
1139 1140
		return 0;

1141
	mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
1142
	if (!mlxsw_sp_vport)
1143
		return -ENOMEM;
1144 1145 1146 1147 1148

	/* When adding the first VLAN interface on a bridged port we need to
	 * transition all the active 802.1Q bridge VLANs to use explicit
	 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
	 */
1149
	if (list_is_singular(&mlxsw_sp_port->vports_list)) {
1150
		err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
1151
		if (err)
1152
			goto err_port_vp_mode_trans;
1153 1154
	}

1155
	err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
1156
	if (err)
1157 1158 1159 1160 1161
		goto err_port_add_vid;

	return 0;

err_port_add_vid:
1162 1163 1164 1165
	if (list_is_singular(&mlxsw_sp_port->vports_list))
		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1166 1167 1168
	return err;
}

1169 1170
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
				  __be16 __always_unused proto, u16 vid)
1171 1172
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1173
	struct mlxsw_sp_port *mlxsw_sp_vport;
1174
	struct mlxsw_sp_fid *f;
1175 1176 1177 1178 1179 1180 1181

	/* VLAN 0 is removed from HW filter when device goes down, but
	 * it is reserved in our case, so simply return.
	 */
	if (!vid)
		return 0;

1182
	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1183
	if (WARN_ON(!mlxsw_sp_vport))
1184 1185
		return 0;

1186
	mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1187

1188 1189 1190 1191 1192 1193
	/* Drop FID reference. If this was the last reference the
	 * resources will be freed.
	 */
	f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
	if (f && !WARN_ON(!f->leave))
		f->leave(mlxsw_sp_vport);
1194 1195 1196 1197 1198

	/* When removing the last VLAN interface on a bridged port we need to
	 * transition all active 802.1Q bridge VLANs to use VID to FID
	 * mappings and set port's mode to VLAN mode.
	 */
1199 1200
	if (list_is_singular(&mlxsw_sp_port->vports_list))
		mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1201

1202 1203
	mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);

1204 1205 1206
	return 0;
}

1207 1208 1209 1210
static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
					    size_t len)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1211 1212 1213
	u8 module = mlxsw_sp_port->mapping.module;
	u8 width = mlxsw_sp_port->mapping.width;
	u8 lane = mlxsw_sp_port->mapping.lane;
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	int err;

	if (!mlxsw_sp_port->split)
		err = snprintf(name, len, "p%d", module + 1);
	else
		err = snprintf(name, len, "p%ds%d", module + 1,
			       lane / width);

	if (err >= len)
		return -EINVAL;

	return 0;
}

1228
static struct mlxsw_sp_port_mall_tc_entry *
1229 1230
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
				 unsigned long cookie) {
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;

	list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
		if (mall_tc_entry->cookie == cookie)
			return mall_tc_entry;

	return NULL;
}

static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1242
				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
				      const struct tc_action *a,
				      bool ingress)
{
	struct net *net = dev_net(mlxsw_sp_port->dev);
	enum mlxsw_sp_span_type span_type;
	struct mlxsw_sp_port *to_port;
	struct net_device *to_dev;
	int ifindex;

	ifindex = tcf_mirred_ifindex(a);
	to_dev = __dev_get_by_index(net, ifindex);
	if (!to_dev) {
		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
		return -EINVAL;
	}

	if (!mlxsw_sp_port_dev_check(to_dev)) {
		netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1261
		return -EOPNOTSUPP;
1262 1263 1264
	}
	to_port = netdev_priv(to_dev);

1265 1266
	mirror->to_local_port = to_port->local_port;
	mirror->ingress = ingress;
1267
	span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1268 1269
	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
}
1270

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
static void
mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
				      struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	enum mlxsw_sp_span_type span_type;
	struct mlxsw_sp_port *to_port;

	to_port = mlxsw_sp->ports[mirror->to_local_port];
	span_type = mirror->ingress ?
			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
	mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1283 1284
}

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
				      struct tc_cls_matchall_offload *cls,
				      const struct tc_action *a,
				      bool ingress)
{
	int err;

	if (!mlxsw_sp_port->sample)
		return -EOPNOTSUPP;
	if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
		netdev_err(mlxsw_sp_port->dev, "sample already active\n");
		return -EEXIST;
	}
	if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
		netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
		return -EOPNOTSUPP;
	}

	rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
			   tcf_sample_psample_group(a));
	mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
	mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
	mlxsw_sp_port->sample->rate = tcf_sample_rate(a);

	err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
	if (err)
		goto err_port_sample_set;
	return 0;

err_port_sample_set:
	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
	return err;
}

static void
mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
{
	if (!mlxsw_sp_port->sample)
		return;

	mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
	RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
}

1330 1331 1332 1333 1334
static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
					  __be16 protocol,
					  struct tc_cls_matchall_offload *cls,
					  bool ingress)
{
1335
	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1336
	const struct tc_action *a;
1337
	LIST_HEAD(actions);
1338 1339
	int err;

1340
	if (!tc_single_action(cls->exts)) {
1341
		netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1342
		return -EOPNOTSUPP;
1343 1344
	}

1345 1346 1347 1348 1349
	mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
	if (!mall_tc_entry)
		return -ENOMEM;
	mall_tc_entry->cookie = cls->cookie;

1350
	tcf_exts_to_list(cls->exts, &actions);
1351
	a = list_first_entry(&actions, struct tc_action, list);
1352

1353 1354 1355 1356 1357 1358 1359
	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
		struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;

		mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
		mirror = &mall_tc_entry->mirror;
		err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
							    mirror, a, ingress);
1360 1361 1362 1363
	} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
		mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
		err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
							    a, ingress);
1364 1365
	} else {
		err = -EOPNOTSUPP;
1366 1367
	}

1368 1369 1370 1371
	if (err)
		goto err_add_action;

	list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1372
	return 0;
1373 1374 1375 1376

err_add_action:
	kfree(mall_tc_entry);
	return err;
1377 1378 1379 1380 1381 1382 1383
}

static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
					   struct tc_cls_matchall_offload *cls)
{
	struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;

1384 1385
	mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
							 cls->cookie);
1386 1387 1388 1389
	if (!mall_tc_entry) {
		netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
		return;
	}
1390
	list_del(&mall_tc_entry->list);
1391 1392 1393

	switch (mall_tc_entry->type) {
	case MLXSW_SP_PORT_MALL_MIRROR:
1394 1395
		mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
						      &mall_tc_entry->mirror);
1396
		break;
1397 1398 1399
	case MLXSW_SP_PORT_MALL_SAMPLE:
		mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
		break;
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
	default:
		WARN_ON(1);
	}

	kfree(mall_tc_entry);
}

static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
			     __be16 proto, struct tc_to_netdev *tc)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);

1413 1414
	switch (tc->type) {
	case TC_SETUP_MATCHALL:
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
		switch (tc->cls_mall->command) {
		case TC_CLSMATCHALL_REPLACE:
			return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
							      proto,
							      tc->cls_mall,
							      ingress);
		case TC_CLSMATCHALL_DESTROY:
			mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
						       tc->cls_mall);
			return 0;
		default:
			return -EINVAL;
		}
1428 1429 1430 1431 1432 1433 1434 1435 1436
	case TC_SETUP_CLSFLOWER:
		switch (tc->cls_flower->command) {
		case TC_CLSFLOWER_REPLACE:
			return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
						       proto, tc->cls_flower);
		case TC_CLSFLOWER_DESTROY:
			mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
						tc->cls_flower);
			return 0;
1437 1438 1439
		case TC_CLSFLOWER_STATS:
			return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
						     tc->cls_flower);
1440 1441 1442
		default:
			return -EOPNOTSUPP;
		}
1443 1444
	}

1445
	return -EOPNOTSUPP;
1446 1447
}

1448 1449 1450 1451
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
	.ndo_open		= mlxsw_sp_port_open,
	.ndo_stop		= mlxsw_sp_port_stop,
	.ndo_start_xmit		= mlxsw_sp_port_xmit,
1452
	.ndo_setup_tc           = mlxsw_sp_setup_tc,
1453
	.ndo_set_rx_mode	= mlxsw_sp_set_rx_mode,
1454 1455 1456
	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
1457 1458
	.ndo_has_offload_stats	= mlxsw_sp_port_has_offload_stats,
	.ndo_get_offload_stats	= mlxsw_sp_port_get_offload_stats,
1459 1460 1461 1462 1463 1464 1465 1466
	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
	.ndo_fdb_add		= switchdev_port_fdb_add,
	.ndo_fdb_del		= switchdev_port_fdb_del,
	.ndo_fdb_dump		= switchdev_port_fdb_dump,
	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
1467
	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
};

static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
				      struct ethtool_drvinfo *drvinfo)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;

	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
		sizeof(drvinfo->version));
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
		 "%d.%d.%d",
		 mlxsw_sp->bus_info->fw_rev.major,
		 mlxsw_sp->bus_info->fw_rev.minor,
		 mlxsw_sp->bus_info->fw_rev.subminor);
	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
		sizeof(drvinfo->bus_info));
}

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
					 struct ethtool_pauseparam *pause)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);

	pause->rx_pause = mlxsw_sp_port->link.rx_pause;
	pause->tx_pause = mlxsw_sp_port->link.tx_pause;
}

static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
				   struct ethtool_pauseparam *pause)
{
	char pfcc_pl[MLXSW_REG_PFCC_LEN];

	mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
	mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
	mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);

	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
			       pfcc_pl);
}

static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
					struct ethtool_pauseparam *pause)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	bool pause_en = pause->tx_pause || pause->rx_pause;
	int err;

1517 1518 1519 1520 1521
	if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
		netdev_err(dev, "PFC already enabled on port\n");
		return -EINVAL;
	}

1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
	if (pause->autoneg) {
		netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
		return -EINVAL;
	}

	err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
	if (err) {
		netdev_err(dev, "Failed to configure port's headroom\n");
		return err;
	}

	err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
	if (err) {
		netdev_err(dev, "Failed to set PAUSE parameters\n");
		goto err_port_pause_configure;
	}

	mlxsw_sp_port->link.rx_pause = pause->rx_pause;
	mlxsw_sp_port->link.tx_pause = pause->tx_pause;

	return 0;

err_port_pause_configure:
	pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
	mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
	return err;
}

1550 1551
struct mlxsw_sp_port_hw_stats {
	char str[ETH_GSTRING_LEN];
1552
	u64 (*getter)(const char *payload);
1553 1554
};

1555
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
	{
		.str = "a_frames_transmitted_ok",
		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
	},
	{
		.str = "a_frames_received_ok",
		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
	},
	{
		.str = "a_frame_check_sequence_errors",
		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
	},
	{
		.str = "a_alignment_errors",
		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
	},
	{
		.str = "a_octets_transmitted_ok",
		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
	},
	{
		.str = "a_octets_received_ok",
		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
	},
	{
		.str = "a_multicast_frames_xmitted_ok",
		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
	},
	{
		.str = "a_broadcast_frames_xmitted_ok",
		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
	},
	{
		.str = "a_multicast_frames_received_ok",
		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
	},
	{
		.str = "a_broadcast_frames_received_ok",
		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
	},
	{
		.str = "a_in_range_length_errors",
		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
	},
	{
		.str = "a_out_of_range_length_field",
		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
	},
	{
		.str = "a_frame_too_long_errors",
		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
	},
	{
		.str = "a_symbol_error_during_carrier",
		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
	},
	{
		.str = "a_mac_control_frames_transmitted",
		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
	},
	{
		.str = "a_mac_control_frames_received",
		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
	},
	{
		.str = "a_unsupported_opcodes_received",
		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
	},
	{
		.str = "a_pause_mac_ctrl_frames_received",
		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
	},
	{
		.str = "a_pause_mac_ctrl_frames_xmitted",
		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
	},
};

#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
	{
		.str = "rx_octets_prio",
		.getter = mlxsw_reg_ppcnt_rx_octets_get,
	},
	{
		.str = "rx_frames_prio",
		.getter = mlxsw_reg_ppcnt_rx_frames_get,
	},
	{
		.str = "tx_octets_prio",
		.getter = mlxsw_reg_ppcnt_tx_octets_get,
	},
	{
		.str = "tx_frames_prio",
		.getter = mlxsw_reg_ppcnt_tx_frames_get,
	},
	{
		.str = "rx_pause_prio",
		.getter = mlxsw_reg_ppcnt_rx_pause_get,
	},
	{
		.str = "rx_pause_duration_prio",
		.getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
	},
	{
		.str = "tx_pause_prio",
		.getter = mlxsw_reg_ppcnt_tx_pause_get,
	},
	{
		.str = "tx_pause_duration_prio",
		.getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
	},
};

#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)

1673
static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
{
	u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);

	return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
}

static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
	{
		.str = "tc_transmit_queue_tc",
		.getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
	},
	{
		.str = "tc_no_buffer_discard_uc_tc",
		.getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
	},
};

#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)

1693
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1694 1695
					 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
					  MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
					 IEEE_8021QAZ_MAX_TCS)

static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
{
	int i;

	for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
			 mlxsw_sp_port_hw_prio_stats[i].str, prio);
		*p += ETH_GSTRING_LEN;
	}
}

1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
{
	int i;

	for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
		snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
			 mlxsw_sp_port_hw_tc_stats[i].str, tc);
		*p += ETH_GSTRING_LEN;
	}
}

1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
static void mlxsw_sp_port_get_strings(struct net_device *dev,
				      u32 stringset, u8 *data)
{
	u8 *p = data;
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
1733 1734 1735 1736

		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
			mlxsw_sp_port_get_prio_strings(&p, i);

1737 1738 1739
		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
			mlxsw_sp_port_get_tc_strings(&p, i);

1740 1741 1742 1743
		break;
	}
}

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
				     enum ethtool_phys_id_state state)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char mlcr_pl[MLXSW_REG_MLCR_LEN];
	bool active;

	switch (state) {
	case ETHTOOL_ID_ACTIVE:
		active = true;
		break;
	case ETHTOOL_ID_INACTIVE:
		active = false;
		break;
	default:
		return -EOPNOTSUPP;
	}

	mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
}

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
static int
mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
			       int *p_len, enum mlxsw_reg_ppcnt_grp grp)
{
	switch (grp) {
	case  MLXSW_REG_PPCNT_IEEE_8023_CNT:
		*p_hw_stats = mlxsw_sp_port_hw_stats;
		*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
		break;
	case MLXSW_REG_PPCNT_PRIO_CNT:
		*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
		*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
		break;
1780 1781 1782 1783
	case MLXSW_REG_PPCNT_TC_CNT:
		*p_hw_stats = mlxsw_sp_port_hw_tc_stats;
		*p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
		break;
1784 1785
	default:
		WARN_ON(1);
1786
		return -EOPNOTSUPP;
1787 1788 1789 1790 1791 1792 1793
	}
	return 0;
}

static void __mlxsw_sp_port_get_stats(struct net_device *dev,
				      enum mlxsw_reg_ppcnt_grp grp, int prio,
				      u64 *data, int data_index)
1794
{
1795
	struct mlxsw_sp_port_hw_stats *hw_stats;
1796
	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1797
	int i, len;
1798 1799
	int err;

1800 1801 1802
	err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
	if (err)
		return;
1803
	mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
1804
	for (i = 0; i < len; i++)
1805
		data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
}

static void mlxsw_sp_port_get_stats(struct net_device *dev,
				    struct ethtool_stats *stats, u64 *data)
{
	int i, data_index = 0;

	/* IEEE 802.3 Counters */
	__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
				  data, data_index);
	data_index = MLXSW_SP_PORT_HW_STATS_LEN;

	/* Per-Priority Counters */
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
					  data, data_index);
		data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
	}
1824 1825 1826 1827 1828 1829 1830

	/* Per-TC Counters */
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
					  data, data_index);
		data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
	}
1831 1832 1833 1834 1835 1836
}

static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
1837
		return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1838 1839 1840 1841 1842 1843
	default:
		return -EOPNOTSUPP;
	}
}

struct mlxsw_sp_port_link_mode {
1844
	enum ethtool_link_mode_bit_indices mask_ethtool;
1845 1846 1847 1848 1849 1850 1851
	u32 mask;
	u32 speed;
};

static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1852 1853
		.mask_ethtool	= ETHTOOL_LINK_MODE_100baseT_Full_BIT,
		.speed		= SPEED_100,
1854 1855 1856 1857
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1858 1859
		.mask_ethtool	= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
		.speed		= SPEED_1000,
1860 1861 1862
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1863 1864
		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
		.speed		= SPEED_10000,
1865 1866 1867 1868
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1869 1870
		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
		.speed		= SPEED_10000,
1871 1872 1873 1874 1875 1876
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1877 1878
		.mask_ethtool	= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
		.speed		= SPEED_10000,
1879 1880 1881
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1882 1883
		.mask_ethtool	= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
		.speed		= SPEED_20000,
1884 1885 1886
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1887 1888
		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
		.speed		= SPEED_40000,
1889 1890 1891
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1892 1893
		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
		.speed		= SPEED_40000,
1894 1895 1896
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1897 1898
		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
		.speed		= SPEED_40000,
1899 1900 1901
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
		.mask_ethtool	= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
		.speed		= SPEED_40000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
		.speed		= SPEED_25000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
		.speed		= SPEED_25000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
		.speed		= SPEED_25000,
1919 1920
	},
	{
1921 1922 1923
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
		.mask_ethtool	= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
		.speed		= SPEED_25000,
1924 1925
	},
	{
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
		.speed		= SPEED_50000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
		.speed		= SPEED_50000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
		.mask_ethtool	= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
		.speed		= SPEED_50000,
1939 1940 1941
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1942 1943
		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
		.speed		= SPEED_56000,
1944 1945
	},
	{
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
		.speed		= SPEED_56000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
		.speed		= SPEED_56000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
		.speed		= SPEED_56000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
		.speed		= SPEED_100000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
		.speed		= SPEED_100000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
		.speed		= SPEED_100000,
	},
	{
		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
		.mask_ethtool	= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
		.speed		= SPEED_100000,
1979 1980 1981 1982 1983
	},
};

#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)

1984 1985 1986
static void
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
				  struct ethtool_link_ksettings *cmd)
1987 1988 1989 1990 1991 1992 1993
{
	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1994
		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
1995 1996 1997 1998 1999 2000

	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2001
		ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2002 2003
}

2004
static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2005 2006 2007 2008 2009
{
	int i;

	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2010 2011
			__set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
				  mode);
2012 2013 2014 2015
	}
}

static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2016
					    struct ethtool_link_ksettings *cmd)
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
{
	u32 speed = SPEED_UNKNOWN;
	u8 duplex = DUPLEX_UNKNOWN;
	int i;

	if (!carrier_ok)
		goto out;

	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
			speed = mlxsw_sp_port_link_mode[i].speed;
			duplex = DUPLEX_FULL;
			break;
		}
	}
out:
2033 2034
	cmd->base.speed = speed;
	cmd->base.duplex = duplex;
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
}

static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
{
	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
		return PORT_FIBRE;

	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
		return PORT_DA;

	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
		return PORT_NONE;

	return PORT_OTHER;
}

2059 2060
static u32
mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2061 2062 2063 2064 2065
{
	u32 ptys_proto = 0;
	int i;

	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2066 2067
		if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
			     cmd->link_modes.advertising))
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084
			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
	}
	return ptys_proto;
}

static u32 mlxsw_sp_to_ptys_speed(u32 speed)
{
	u32 ptys_proto = 0;
	int i;

	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
		if (speed == mlxsw_sp_port_link_mode[i].speed)
			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
	}
	return ptys_proto;
}

2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
{
	u32 ptys_proto = 0;
	int i;

	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
		if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
	}
	return ptys_proto;
}

2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
					     struct ethtool_link_ksettings *cmd)
{
	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);

	mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
	mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
}

static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
					     struct ethtool_link_ksettings *cmd)
2110
{
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
	if (!autoneg)
		return;

	ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
	mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
}

static void
mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
				    struct ethtool_link_ksettings *cmd)
{
	if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
		return;

	ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
	mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
}

static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
					    struct ethtool_link_ksettings *cmd)
{
	u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2133 2134 2135
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char ptys_pl[MLXSW_REG_PTYS_LEN];
2136
	u8 autoneg_status;
2137
	bool autoneg;
2138 2139
	int err;

2140
	autoneg = mlxsw_sp_port->link.autoneg;
2141
	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2142 2143 2144
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
	if (err)
		return err;
2145 2146
	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
				  &eth_proto_oper);
2147 2148

	mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2149

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
	mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);

	eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
	autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
	mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);

	cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
	cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
					cmd);

	return 0;
}

static int
mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
				 const struct ethtool_link_ksettings *cmd)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char ptys_pl[MLXSW_REG_PTYS_LEN];
	u32 eth_proto_cap, eth_proto_new;
	bool autoneg;
	int err;
2174

2175
	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2176
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2177
	if (err)
2178
		return err;
2179
	mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2180 2181 2182 2183 2184

	autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
	eth_proto_new = autoneg ?
		mlxsw_sp_to_ptys_advert_link(cmd) :
		mlxsw_sp_to_ptys_speed(cmd->base.speed);
2185 2186 2187

	eth_proto_new = eth_proto_new & eth_proto_cap;
	if (!eth_proto_new) {
2188
		netdev_err(dev, "No supported speed requested\n");
2189 2190 2191
		return -EINVAL;
	}

2192 2193
	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
				eth_proto_new);
2194
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2195
	if (err)
2196 2197
		return err;

2198
	if (!netif_running(dev))
2199 2200
		return 0;

2201 2202
	mlxsw_sp_port->link.autoneg = autoneg;

2203 2204
	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
	mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2205 2206 2207 2208 2209 2210 2211

	return 0;
}

static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
	.get_link		= ethtool_op_get_link,
2212 2213
	.get_pauseparam		= mlxsw_sp_port_get_pauseparam,
	.set_pauseparam		= mlxsw_sp_port_set_pauseparam,
2214
	.get_strings		= mlxsw_sp_port_get_strings,
2215
	.set_phys_id		= mlxsw_sp_port_set_phys_id,
2216 2217
	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
	.get_sset_count		= mlxsw_sp_port_get_sset_count,
2218 2219
	.get_link_ksettings	= mlxsw_sp_port_get_link_ksettings,
	.set_link_ksettings	= mlxsw_sp_port_set_link_ksettings,
2220 2221
};

2222 2223 2224 2225 2226 2227 2228 2229 2230
static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
	char ptys_pl[MLXSW_REG_PTYS_LEN];
	u32 eth_proto_admin;

	eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2231 2232
	mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
				eth_proto_admin);
2233 2234 2235
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}

2236 2237 2238
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
			  enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
			  bool dwrr, u8 dwrr_weight)
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char qeec_pl[MLXSW_REG_QEEC_LEN];

	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
			    next_index);
	mlxsw_reg_qeec_de_set(qeec_pl, true);
	mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
	mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}

2251 2252 2253
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
				  enum mlxsw_reg_qeec_hr hr, u8 index,
				  u8 next_index, u32 maxrate)
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char qeec_pl[MLXSW_REG_QEEC_LEN];

	mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
			    next_index);
	mlxsw_reg_qeec_mase_set(qeec_pl, true);
	mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
}

2265 2266
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
			      u8 switch_prio, u8 tclass)
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char qtct_pl[MLXSW_REG_QTCT_LEN];

	mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
			    tclass);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
}

static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	int err, i;

	/* Setup the elements hierarcy, so that each TC is linked to
	 * one subgroup, which are all member in the same group.
	 */
	err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
				    MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
				    0);
	if (err)
		return err;
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
					    MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
					    0, false, 0);
		if (err)
			return err;
	}
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
					    MLXSW_REG_QEEC_HIERARCY_TC, i, i,
					    false, 0);
		if (err)
			return err;
	}

	/* Make sure the max shaper is disabled in all hierarcies that
	 * support it.
	 */
	err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
					    MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
					    MLXSW_REG_QEEC_MAS_DIS);
	if (err)
		return err;
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
						    MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
						    i, 0,
						    MLXSW_REG_QEEC_MAS_DIS);
		if (err)
			return err;
	}
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
						    MLXSW_REG_QEEC_HIERARCY_TC,
						    i, i,
						    MLXSW_REG_QEEC_MAS_DIS);
		if (err)
			return err;
	}

	/* Map all priorities to traffic class 0. */
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
		err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
		if (err)
			return err;
	}

	return 0;
}

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
{
	mlxsw_sp_port->pvid = 1;

	return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
}

static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
{
	return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
}

2350 2351
static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
				  bool split, u8 module, u8 width, u8 lane)
2352 2353 2354
{
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct net_device *dev;
2355
	size_t bytes;
2356 2357 2358 2359 2360
	int err;

	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
	if (!dev)
		return -ENOMEM;
2361
	SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2362 2363 2364 2365
	mlxsw_sp_port = netdev_priv(dev);
	mlxsw_sp_port->dev = dev;
	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
	mlxsw_sp_port->local_port = local_port;
2366
	mlxsw_sp_port->split = split;
2367 2368 2369
	mlxsw_sp_port->mapping.module = module;
	mlxsw_sp_port->mapping.width = width;
	mlxsw_sp_port->mapping.lane = lane;
2370
	mlxsw_sp_port->link.autoneg = 1;
2371 2372 2373 2374 2375 2376
	bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
	mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
	if (!mlxsw_sp_port->active_vlans) {
		err = -ENOMEM;
		goto err_port_active_vlans_alloc;
	}
E
Elad Raz 已提交
2377 2378 2379 2380 2381
	mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
	if (!mlxsw_sp_port->untagged_vlans) {
		err = -ENOMEM;
		goto err_port_untagged_vlans_alloc;
	}
2382
	INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2383
	INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2384 2385 2386 2387 2388 2389 2390 2391

	mlxsw_sp_port->pcpu_stats =
		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
	if (!mlxsw_sp_port->pcpu_stats) {
		err = -ENOMEM;
		goto err_alloc_stats;
	}

2392 2393 2394 2395 2396 2397 2398
	mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
					GFP_KERNEL);
	if (!mlxsw_sp_port->sample) {
		err = -ENOMEM;
		goto err_alloc_sample;
	}

2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
	mlxsw_sp_port->hw_stats.cache =
		kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);

	if (!mlxsw_sp_port->hw_stats.cache) {
		err = -ENOMEM;
		goto err_alloc_hw_stats;
	}
	INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
			  &update_stats_cache);

2409 2410 2411
	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;

2412 2413 2414 2415 2416 2417 2418
	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
			mlxsw_sp_port->local_port);
		goto err_port_swid_set;
	}

2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
			mlxsw_sp_port->local_port);
		goto err_dev_addr_init;
	}

	netif_carrier_off(dev);

	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2429 2430
			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
	dev->hw_features |= NETIF_F_HW_TC;
2431

2432 2433 2434
	dev->min_mtu = 0;
	dev->max_mtu = ETH_MAX_MTU;

2435 2436 2437
	/* Each packet needs to have a Tx header (metadata) on top all other
	 * headers.
	 */
2438
	dev->needed_headroom = MLXSW_TXHDR_LEN;
2439 2440 2441 2442 2443 2444 2445 2446

	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
			mlxsw_sp_port->local_port);
		goto err_port_system_port_mapping_set;
	}

2447 2448 2449 2450 2451 2452 2453
	err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
			mlxsw_sp_port->local_port);
		goto err_port_speed_by_width_set;
	}

2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
			mlxsw_sp_port->local_port);
		goto err_port_mtu_set;
	}

	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
	if (err)
		goto err_port_admin_status_set;

	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
			mlxsw_sp_port->local_port);
		goto err_port_buffers_init;
	}

2472 2473 2474 2475 2476 2477 2478
	err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
			mlxsw_sp_port->local_port);
		goto err_port_ets_init;
	}

2479 2480 2481 2482 2483 2484 2485 2486
	/* ETS and buffers must be initialized before DCB. */
	err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
			mlxsw_sp_port->local_port);
		goto err_port_dcb_init;
	}

2487 2488 2489 2490 2491 2492 2493
	err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
			mlxsw_sp_port->local_port);
		goto err_port_pvid_vport_create;
	}

2494
	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2495
	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2496 2497 2498 2499 2500 2501 2502
	err = register_netdev(dev);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
			mlxsw_sp_port->local_port);
		goto err_register_netdev;
	}

2503 2504 2505
	mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
				mlxsw_sp_port, dev, mlxsw_sp_port->split,
				module);
2506
	mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2507 2508 2509
	return 0;

err_register_netdev:
2510
	mlxsw_sp->ports[local_port] = NULL;
2511
	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2512 2513
	mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
err_port_pvid_vport_create:
2514
	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2515
err_port_dcb_init:
2516
err_port_ets_init:
2517 2518 2519
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
2520
err_port_speed_by_width_set:
2521 2522
err_port_system_port_mapping_set:
err_dev_addr_init:
2523 2524
	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
2525 2526
	kfree(mlxsw_sp_port->hw_stats.cache);
err_alloc_hw_stats:
2527 2528
	kfree(mlxsw_sp_port->sample);
err_alloc_sample:
2529 2530
	free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
E
Elad Raz 已提交
2531 2532
	kfree(mlxsw_sp_port->untagged_vlans);
err_port_untagged_vlans_alloc:
2533 2534
	kfree(mlxsw_sp_port->active_vlans);
err_port_active_vlans_alloc:
2535 2536 2537 2538
	free_netdev(dev);
	return err;
}

2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
				bool split, u8 module, u8 width, u8 lane)
{
	int err;

	err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
			local_port);
		return err;
	}
2550
	err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
				     module, width, lane);
	if (err)
		goto err_port_create;
	return 0;

err_port_create:
	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
	return err;
}

static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2562 2563 2564
{
	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];

2565
	cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2566
	mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2567
	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2568
	mlxsw_sp->ports[local_port] = NULL;
2569
	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2570
	mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2571
	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2572 2573
	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
	mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2574
	kfree(mlxsw_sp_port->hw_stats.cache);
2575
	kfree(mlxsw_sp_port->sample);
2576
	free_percpu(mlxsw_sp_port->pcpu_stats);
E
Elad Raz 已提交
2577
	kfree(mlxsw_sp_port->untagged_vlans);
2578
	kfree(mlxsw_sp_port->active_vlans);
2579
	WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2580 2581 2582
	free_netdev(mlxsw_sp_port->dev);
}

2583 2584 2585 2586 2587 2588
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
	__mlxsw_sp_port_remove(mlxsw_sp, local_port);
	mlxsw_core_port_fini(mlxsw_sp->core, local_port);
}

2589 2590 2591 2592 2593
static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
	return mlxsw_sp->ports[local_port] != NULL;
}

2594 2595 2596 2597 2598
static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
{
	int i;

	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
2599 2600
		if (mlxsw_sp_port_created(mlxsw_sp, i))
			mlxsw_sp_port_remove(mlxsw_sp, i);
2601 2602 2603 2604 2605
	kfree(mlxsw_sp->ports);
}

static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
2606
	u8 module, width, lane;
2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
	size_t alloc_size;
	int i;
	int err;

	alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
	if (!mlxsw_sp->ports)
		return -ENOMEM;

	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
2617
		err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2618
						    &width, &lane);
2619 2620 2621 2622 2623
		if (err)
			goto err_port_module_info_get;
		if (!width)
			continue;
		mlxsw_sp->port_to_module[i] = module;
2624 2625
		err = mlxsw_sp_port_create(mlxsw_sp, i, false,
					   module, width, lane);
2626 2627 2628 2629 2630 2631
		if (err)
			goto err_port_create;
	}
	return 0;

err_port_create:
2632
err_port_module_info_get:
2633
	for (i--; i >= 1; i--)
2634 2635
		if (mlxsw_sp_port_created(mlxsw_sp, i))
			mlxsw_sp_port_remove(mlxsw_sp, i);
2636 2637 2638 2639
	kfree(mlxsw_sp->ports);
	return err;
}

2640 2641 2642 2643 2644 2645 2646
static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
{
	u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;

	return local_port - offset;
}

2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
				      u8 module, unsigned int count)
{
	u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
	int err, i;

	for (i = 0; i < count; i++) {
		err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
					       width, i * width);
		if (err)
			goto err_port_module_map;
	}

	for (i = 0; i < count; i++) {
		err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
		if (err)
			goto err_port_swid_set;
	}

	for (i = 0; i < count; i++) {
		err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2668
					   module, width, i * width);
2669 2670 2671 2672 2673 2674 2675 2676
		if (err)
			goto err_port_create;
	}

	return 0;

err_port_create:
	for (i--; i >= 0; i--)
2677 2678
		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
	i = count;
err_port_swid_set:
	for (i--; i >= 0; i--)
		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
					 MLXSW_PORT_SWID_DISABLED_PORT);
	i = count;
err_port_module_map:
	for (i--; i >= 0; i--)
		mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
	return err;
}

static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
					 u8 base_port, unsigned int count)
{
	u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
	int i;

	/* Split by four means we need to re-create two ports, otherwise
	 * only one.
	 */
	count = count / 2;

	for (i = 0; i < count; i++) {
		local_port = base_port + i * 2;
		module = mlxsw_sp->port_to_module[local_port];

		mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
					 0);
	}

	for (i = 0; i < count; i++)
		__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);

	for (i = 0; i < count; i++) {
		local_port = base_port + i * 2;
		module = mlxsw_sp->port_to_module[local_port];

		mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2718
				     width, 0);
2719 2720 2721
	}
}

2722 2723
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
			       unsigned int count)
2724
{
2725
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
	struct mlxsw_sp_port *mlxsw_sp_port;
	u8 module, cur_width, base_port;
	int i;
	int err;

	mlxsw_sp_port = mlxsw_sp->ports[local_port];
	if (!mlxsw_sp_port) {
		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
			local_port);
		return -EINVAL;
	}

2738 2739 2740
	module = mlxsw_sp_port->mapping.module;
	cur_width = mlxsw_sp_port->mapping.width;

2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767
	if (count != 2 && count != 4) {
		netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
		return -EINVAL;
	}

	if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
		netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
		return -EINVAL;
	}

	/* Make sure we have enough slave (even) ports for the split. */
	if (count == 2) {
		base_port = local_port;
		if (mlxsw_sp->ports[base_port + 1]) {
			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
			return -EINVAL;
		}
	} else {
		base_port = mlxsw_sp_cluster_base_port_get(local_port);
		if (mlxsw_sp->ports[base_port + 1] ||
		    mlxsw_sp->ports[base_port + 3]) {
			netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
			return -EINVAL;
		}
	}

	for (i = 0; i < count; i++)
2768 2769
		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2770

2771 2772 2773 2774
	err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
		goto err_port_split_create;
2775 2776 2777 2778
	}

	return 0;

2779 2780
err_port_split_create:
	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2781 2782 2783
	return err;
}

2784
static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2785
{
2786
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2787
	struct mlxsw_sp_port *mlxsw_sp_port;
2788
	u8 cur_width, base_port;
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
	unsigned int count;
	int i;

	mlxsw_sp_port = mlxsw_sp->ports[local_port];
	if (!mlxsw_sp_port) {
		dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
			local_port);
		return -EINVAL;
	}

	if (!mlxsw_sp_port->split) {
		netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
		return -EINVAL;
	}

2804
	cur_width = mlxsw_sp_port->mapping.width;
2805 2806 2807 2808 2809 2810 2811 2812 2813
	count = cur_width == 1 ? 4 : 2;

	base_port = mlxsw_sp_cluster_base_port_get(local_port);

	/* Determine which ports to remove. */
	if (count == 2 && local_port >= base_port + 2)
		base_port = base_port + 2;

	for (i = 0; i < count; i++)
2814 2815
		if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
			mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2816

2817
	mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2818 2819 2820 2821

	return 0;
}

2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
				     char *pude_pl, void *priv)
{
	struct mlxsw_sp *mlxsw_sp = priv;
	struct mlxsw_sp_port *mlxsw_sp_port;
	enum mlxsw_reg_pude_oper_status status;
	u8 local_port;

	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2832
	if (!mlxsw_sp_port)
2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
		return;

	status = mlxsw_reg_pude_oper_status_get(pude_pl);
	if (status == MLXSW_PORT_OPER_STATUS_UP) {
		netdev_info(mlxsw_sp_port->dev, "link up\n");
		netif_carrier_on(mlxsw_sp_port->dev);
	} else {
		netdev_info(mlxsw_sp_port->dev, "link down\n");
		netif_carrier_off(mlxsw_sp_port->dev);
	}
}

2845 2846
static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
					      u8 local_port, void *priv)
2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
{
	struct mlxsw_sp *mlxsw_sp = priv;
	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;

	if (unlikely(!mlxsw_sp_port)) {
		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
				     local_port);
		return;
	}

	skb->dev = mlxsw_sp_port->dev;

	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
	u64_stats_update_begin(&pcpu_stats->syncp);
	pcpu_stats->rx_packets++;
	pcpu_stats->rx_bytes += skb->len;
	u64_stats_update_end(&pcpu_stats->syncp);

	skb->protocol = eth_type_trans(skb, skb->dev);
	netif_receive_skb(skb);
}

2870 2871 2872 2873
static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
					   void *priv)
{
	skb->offload_fwd_mark = 1;
2874
	return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2875 2876
}

2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
					     void *priv)
{
	struct mlxsw_sp *mlxsw_sp = priv;
	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
	struct psample_group *psample_group;
	u32 size;

	if (unlikely(!mlxsw_sp_port)) {
		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
				     local_port);
		goto out;
	}
	if (unlikely(!mlxsw_sp_port->sample)) {
		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
				     local_port);
		goto out;
	}

	size = mlxsw_sp_port->sample->truncate ?
		  mlxsw_sp_port->sample->trunc_size : skb->len;

	rcu_read_lock();
	psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
	if (!psample_group)
		goto out_unlock;
	psample_sample_packet(psample_group, skb, size,
			      mlxsw_sp_port->dev->ifindex, 0,
			      mlxsw_sp_port->sample->rate);
out_unlock:
	rcu_read_unlock();
out:
	consume_skb(skb);
}

2912
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2913
	MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action,	\
2914
		  _is_ctrl, SP_##_trap_group, DISCARD)
2915

2916
#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)	\
2917
	MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,	\
2918 2919 2920 2921
		_is_ctrl, SP_##_trap_group, DISCARD)

#define MLXSW_SP_EVENTL(_func, _trap_id)		\
	MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2922

2923 2924
static const struct mlxsw_listener mlxsw_sp_listener[] = {
	/* Events */
2925
	MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2926
	/* L2 traps */
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
	MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
	MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
	MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
	MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
	MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
	MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
	MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
	MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
	MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
	MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
	MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
2938
	/* L3 traps */
2939 2940 2941 2942 2943 2944 2945 2946
	MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
	MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
	MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
	MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
	MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
	MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
	MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
	MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
2947 2948 2949
	/* PKT Sample trap */
	MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
		  false, SP_IP2ME, DISCARD)
2950 2951
};

2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
{
	char qpcr_pl[MLXSW_REG_QPCR_LEN];
	enum mlxsw_reg_qpcr_ir_units ir_units;
	int max_cpu_policers;
	bool is_bytes;
	u8 burst_size;
	u32 rate;
	int i, err;

	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
		return -EIO;

	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);

	ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
	for (i = 0; i < max_cpu_policers; i++) {
		is_bytes = false;
		switch (i) {
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
			rate = 128;
			burst_size = 7;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
			rate = 16 * 1024;
			burst_size = 10;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
			rate = 1024;
			burst_size = 7;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
			is_bytes = true;
			rate = 4 * 1024;
			burst_size = 4;
			break;
		default:
			continue;
		}

		mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
				    burst_size);
		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
		if (err)
			return err;
	}

	return 0;
}

N
Nogah Frankel 已提交
3010
static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3011 3012
{
	char htgt_pl[MLXSW_REG_HTGT_LEN];
3013
	enum mlxsw_reg_htgt_trap_group i;
3014
	int max_cpu_policers;
N
Nogah Frankel 已提交
3015 3016
	int max_trap_groups;
	u8 priority, tc;
3017
	u16 policer_id;
3018
	int err;
N
Nogah Frankel 已提交
3019 3020 3021 3022 3023

	if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
		return -EIO;

	max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3024
	max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
N
Nogah Frankel 已提交
3025 3026

	for (i = 0; i < max_trap_groups; i++) {
3027
		policer_id = i;
N
Nogah Frankel 已提交
3028
		switch (i) {
3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
			priority = 5;
			tc = 5;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
			priority = 4;
			tc = 4;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
			priority = 3;
			tc = 3;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
			priority = 2;
			tc = 2;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
			priority = 1;
			tc = 1;
			break;
		case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
N
Nogah Frankel 已提交
3057 3058
			priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
			tc = MLXSW_REG_HTGT_DEFAULT_TC;
3059
			policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
N
Nogah Frankel 已提交
3060 3061 3062 3063
			break;
		default:
			continue;
		}
3064

3065 3066 3067 3068 3069
		if (max_cpu_policers <= policer_id &&
		    policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
			return -EIO;

		mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
N
Nogah Frankel 已提交
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
		err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
		if (err)
			return err;
	}

	return 0;
}

static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
3080 3081 3082
	int i;
	int err;

3083 3084 3085 3086
	err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
	if (err)
		return err;

N
Nogah Frankel 已提交
3087
	err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3088 3089 3090
	if (err)
		return err;

3091
	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3092
		err = mlxsw_core_trap_register(mlxsw_sp->core,
3093
					       &mlxsw_sp_listener[i],
3094
					       mlxsw_sp);
3095
		if (err)
3096
			goto err_listener_register;
3097 3098 3099 3100

	}
	return 0;

3101
err_listener_register:
3102
	for (i--; i >= 0; i--) {
3103
		mlxsw_core_trap_unregister(mlxsw_sp->core,
3104
					   &mlxsw_sp_listener[i],
3105
					   mlxsw_sp);
3106 3107 3108 3109 3110 3111 3112 3113
	}
	return err;
}

static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
{
	int i;

3114
	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3115
		mlxsw_core_trap_unregister(mlxsw_sp->core,
3116
					   &mlxsw_sp_listener[i],
3117
					   mlxsw_sp);
3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
	}
}

static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
				 enum mlxsw_reg_sfgc_type type,
				 enum mlxsw_reg_sfgc_bridge_type bridge_type)
{
	enum mlxsw_flood_table_type table_type;
	enum mlxsw_sp_flood_table flood_table;
	char sfgc_pl[MLXSW_REG_SFGC_LEN];

3129
	if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
3130
		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
3131
	else
3132
		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3133

3134 3135
	switch (type) {
	case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
3136
		flood_table = MLXSW_SP_FLOOD_TABLE_UC;
3137 3138 3139 3140 3141 3142 3143
		break;
	case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
		flood_table = MLXSW_SP_FLOOD_TABLE_MC;
		break;
	default:
		flood_table = MLXSW_SP_FLOOD_TABLE_BC;
	}
3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171

	mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
			    flood_table);
	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
}

static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
{
	int type, err;

	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
			continue;

		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
					    MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
		if (err)
			return err;

		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
					    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
		if (err)
			return err;
	}

	return 0;
}

3172 3173 3174
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
	char slcr_pl[MLXSW_REG_SLCR_LEN];
3175
	int err;
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185

	mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
				     MLXSW_REG_SLCR_LAG_HASH_DMAC |
				     MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
				     MLXSW_REG_SLCR_LAG_HASH_VLANID |
				     MLXSW_REG_SLCR_LAG_HASH_SIP |
				     MLXSW_REG_SLCR_LAG_HASH_DIP |
				     MLXSW_REG_SLCR_LAG_HASH_SPORT |
				     MLXSW_REG_SLCR_LAG_HASH_DPORT |
				     MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3186 3187 3188 3189
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
	if (err)
		return err;

J
Jiri Pirko 已提交
3190 3191
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
	    !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3192 3193
		return -EIO;

J
Jiri Pirko 已提交
3194
	mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
				 sizeof(struct mlxsw_sp_upper),
				 GFP_KERNEL);
	if (!mlxsw_sp->lags)
		return -ENOMEM;

	return 0;
}

static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
{
	kfree(mlxsw_sp->lags);
3206 3207
}

3208 3209 3210 3211
static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
{
	char htgt_pl[MLXSW_REG_HTGT_LEN];

N
Nogah Frankel 已提交
3212 3213 3214 3215
	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
			    MLXSW_REG_HTGT_INVALID_POLICER,
			    MLXSW_REG_HTGT_DEFAULT_PRIORITY,
			    MLXSW_REG_HTGT_DEFAULT_TC);
3216 3217 3218
	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}

3219
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3220 3221
			 const struct mlxsw_bus_info *mlxsw_bus_info)
{
3222
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3223 3224 3225 3226
	int err;

	mlxsw_sp->core = mlxsw_core;
	mlxsw_sp->bus_info = mlxsw_bus_info;
3227
	INIT_LIST_HEAD(&mlxsw_sp->fids);
3228
	INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3229
	INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
3230 3231 3232 3233 3234 3235 3236 3237 3238

	err = mlxsw_sp_base_mac_get(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
		return err;
	}

	err = mlxsw_sp_traps_init(mlxsw_sp);
	if (err) {
3239 3240
		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
		return err;
3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254
	}

	err = mlxsw_sp_flood_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
		goto err_flood_init;
	}

	err = mlxsw_sp_buffers_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
		goto err_buffers_init;
	}

3255 3256 3257 3258 3259 3260
	err = mlxsw_sp_lag_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
		goto err_lag_init;
	}

3261 3262 3263 3264 3265 3266
	err = mlxsw_sp_switchdev_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
		goto err_switchdev_init;
	}

3267 3268 3269 3270 3271 3272
	err = mlxsw_sp_router_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
		goto err_router_init;
	}

3273 3274 3275 3276 3277 3278
	err = mlxsw_sp_span_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
		goto err_span_init;
	}

3279 3280 3281 3282 3283 3284
	err = mlxsw_sp_acl_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
		goto err_acl_init;
	}

3285 3286 3287 3288 3289 3290
	err = mlxsw_sp_counter_pool_init(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
		goto err_counter_pool_init;
	}

3291 3292 3293 3294 3295 3296
	err = mlxsw_sp_ports_create(mlxsw_sp);
	if (err) {
		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
		goto err_ports_create;
	}

3297 3298
	return 0;

3299
err_ports_create:
3300 3301
	mlxsw_sp_counter_pool_fini(mlxsw_sp);
err_counter_pool_init:
3302 3303
	mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
3304 3305
	mlxsw_sp_span_fini(mlxsw_sp);
err_span_init:
3306 3307
	mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
3308
	mlxsw_sp_switchdev_fini(mlxsw_sp);
3309
err_switchdev_init:
3310
	mlxsw_sp_lag_fini(mlxsw_sp);
3311
err_lag_init:
3312
	mlxsw_sp_buffers_fini(mlxsw_sp);
3313 3314 3315 3316 3317 3318
err_buffers_init:
err_flood_init:
	mlxsw_sp_traps_fini(mlxsw_sp);
	return err;
}

3319
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3320
{
3321
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3322

3323
	mlxsw_sp_ports_remove(mlxsw_sp);
3324
	mlxsw_sp_counter_pool_fini(mlxsw_sp);
3325
	mlxsw_sp_acl_fini(mlxsw_sp);
3326
	mlxsw_sp_span_fini(mlxsw_sp);
3327
	mlxsw_sp_router_fini(mlxsw_sp);
3328
	mlxsw_sp_switchdev_fini(mlxsw_sp);
3329
	mlxsw_sp_lag_fini(mlxsw_sp);
3330
	mlxsw_sp_buffers_fini(mlxsw_sp);
3331
	mlxsw_sp_traps_fini(mlxsw_sp);
3332
	WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3333
	WARN_ON(!list_empty(&mlxsw_sp->fids));
3334 3335 3336 3337 3338 3339
}

static struct mlxsw_config_profile mlxsw_sp_config_profile = {
	.used_max_vepa_channels		= 1,
	.max_vepa_channels		= 0,
	.used_max_mid			= 1,
3340
	.max_mid			= MLXSW_SP_MID_MAX,
3341 3342 3343 3344 3345
	.used_max_pgt			= 1,
	.max_pgt			= 0,
	.used_flood_tables		= 1,
	.used_flood_mode		= 1,
	.flood_mode			= 3,
3346
	.max_fid_offset_flood_tables	= 3,
3347
	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
3348
	.max_fid_flood_tables		= 3,
3349
	.fid_flood_table_size		= MLXSW_SP_VFID_MAX,
3350 3351 3352 3353
	.used_max_ib_mc			= 1,
	.max_ib_mc			= 0,
	.used_max_pkey			= 1,
	.max_pkey			= 0,
3354 3355 3356 3357
	.used_kvd_split_data		= 1,
	.kvd_hash_granularity		= MLXSW_SP_KVD_GRANULARITY,
	.kvd_hash_single_parts		= 2,
	.kvd_hash_double_parts		= 1,
3358
	.kvd_linear_size		= MLXSW_SP_KVD_LINEAR_SIZE,
3359 3360 3361 3362 3363 3364
	.swid_config			= {
		{
			.used_type	= 1,
			.type		= MLXSW_PORT_SWID_TYPE_ETH,
		}
	},
3365
	.resource_query_enable		= 1,
3366 3367 3368
};

static struct mlxsw_driver mlxsw_sp_driver = {
3369
	.kind				= mlxsw_sp_driver_name,
3370 3371 3372
	.priv_size			= sizeof(struct mlxsw_sp),
	.init				= mlxsw_sp_init,
	.fini				= mlxsw_sp_fini,
3373
	.basic_trap_groups_set		= mlxsw_sp_basic_trap_groups_set,
3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
	.port_split			= mlxsw_sp_port_split,
	.port_unsplit			= mlxsw_sp_port_unsplit,
	.sb_pool_get			= mlxsw_sp_sb_pool_get,
	.sb_pool_set			= mlxsw_sp_sb_pool_set,
	.sb_port_pool_get		= mlxsw_sp_sb_port_pool_get,
	.sb_port_pool_set		= mlxsw_sp_sb_port_pool_set,
	.sb_tc_pool_bind_get		= mlxsw_sp_sb_tc_pool_bind_get,
	.sb_tc_pool_bind_set		= mlxsw_sp_sb_tc_pool_bind_set,
	.sb_occ_snapshot		= mlxsw_sp_sb_occ_snapshot,
	.sb_occ_max_clear		= mlxsw_sp_sb_occ_max_clear,
	.sb_occ_port_pool_get		= mlxsw_sp_sb_occ_port_pool_get,
	.sb_occ_tc_port_bind_get	= mlxsw_sp_sb_occ_tc_port_bind_get,
	.txhdr_construct		= mlxsw_sp_txhdr_construct,
	.txhdr_len			= MLXSW_TXHDR_LEN,
	.profile			= &mlxsw_sp_config_profile,
3389 3390
};

3391
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3392 3393 3394 3395
{
	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
}

3396
static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
D
David Ahern 已提交
3397
{
3398
	struct mlxsw_sp_port **p_mlxsw_sp_port = data;
D
David Ahern 已提交
3399 3400 3401
	int ret = 0;

	if (mlxsw_sp_port_dev_check(lower_dev)) {
3402
		*p_mlxsw_sp_port = netdev_priv(lower_dev);
D
David Ahern 已提交
3403 3404 3405 3406 3407 3408
		ret = 1;
	}

	return ret;
}

3409 3410
static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
{
3411
	struct mlxsw_sp_port *mlxsw_sp_port;
3412 3413 3414 3415

	if (mlxsw_sp_port_dev_check(dev))
		return netdev_priv(dev);

3416 3417
	mlxsw_sp_port = NULL;
	netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
D
David Ahern 已提交
3418

3419
	return mlxsw_sp_port;
3420 3421
}

3422
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3423 3424 3425 3426 3427 3428 3429 3430 3431
{
	struct mlxsw_sp_port *mlxsw_sp_port;

	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
	return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
}

static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
{
3432
	struct mlxsw_sp_port *mlxsw_sp_port;
3433 3434 3435 3436

	if (mlxsw_sp_port_dev_check(dev))
		return netdev_priv(dev);

3437 3438 3439
	mlxsw_sp_port = NULL;
	netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
				      &mlxsw_sp_port);
D
David Ahern 已提交
3440

3441
	return mlxsw_sp_port;
3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460
}

struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
{
	struct mlxsw_sp_port *mlxsw_sp_port;

	rcu_read_lock();
	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
	if (mlxsw_sp_port)
		dev_hold(mlxsw_sp_port->dev);
	rcu_read_unlock();
	return mlxsw_sp_port;
}

void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
{
	dev_put(mlxsw_sp_port->dev);
}

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
					 u16 fid)
{
	if (mlxsw_sp_fid_is_vfid(fid))
		return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
	else
		return test_bit(fid, lag_port->active_vlans);
}

static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
					   u16 fid)
3472 3473
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3474 3475
	u8 local_port = mlxsw_sp_port->local_port;
	u16 lag_id = mlxsw_sp_port->lag_id;
J
Jiri Pirko 已提交
3476
	u64 max_lag_members;
3477
	int i, count = 0;
3478

3479 3480
	if (!mlxsw_sp_port->lagged)
		return true;
3481

J
Jiri Pirko 已提交
3482 3483 3484
	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
					     MAX_LAG_MEMBERS);
	for (i = 0; i < max_lag_members; i++) {
3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
		struct mlxsw_sp_port *lag_port;

		lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
		if (!lag_port || lag_port->local_port == local_port)
			continue;
		if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
			count++;
	}

	return !count;
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508
}

static int
mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
				    u16 fid)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char sfdf_pl[MLXSW_REG_SFDF_LEN];

	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
						mlxsw_sp_port->local_port);

I
Ido Schimmel 已提交
3509 3510 3511
	netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
		   mlxsw_sp_port->local_port, fid);

3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}

static int
mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
				      u16 fid)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char sfdf_pl[MLXSW_REG_SFDF_LEN];

	mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
	mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);

I
Ido Schimmel 已提交
3526 3527 3528
	netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
		   mlxsw_sp_port->lag_id, fid);

3529 3530 3531
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}

3532
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3533
{
3534 3535
	if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
		return 0;
3536

3537 3538
	if (mlxsw_sp_port->lagged)
		return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3539 3540
							     fid);
	else
3541
		return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3542 3543
}

3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554
static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
{
	struct mlxsw_sp_fid *f, *tmp;

	list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
		if (--f->ref_count == 0)
			mlxsw_sp_fid_destroy(mlxsw_sp, f);
		else
			WARN_ON_ONCE(1);
}

3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570
static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
					 struct net_device *br_dev)
{
	return !mlxsw_sp->master_bridge.dev ||
	       mlxsw_sp->master_bridge.dev == br_dev;
}

static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
				       struct net_device *br_dev)
{
	mlxsw_sp->master_bridge.dev = br_dev;
	mlxsw_sp->master_bridge.ref_count++;
}

static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{
3571
	if (--mlxsw_sp->master_bridge.ref_count == 0) {
3572
		mlxsw_sp->master_bridge.dev = NULL;
3573 3574 3575 3576 3577 3578 3579
		/* It's possible upper VLAN devices are still holding
		 * references to underlying FIDs. Drop the reference
		 * and release the resources if it was the last one.
		 * If it wasn't, then something bad happened.
		 */
		mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
	}
3580 3581 3582 3583
}

static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
				     struct net_device *br_dev)
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
{
	struct net_device *dev = mlxsw_sp_port->dev;
	int err;

	/* When port is not bridged untagged packets are tagged with
	 * PVID=VID=1, thereby creating an implicit VLAN interface in
	 * the device. Remove it and let bridge code take care of its
	 * own VLANs.
	 */
	err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3594 3595
	if (err)
		return err;
3596

3597 3598
	mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);

3599 3600 3601
	mlxsw_sp_port->learning = 1;
	mlxsw_sp_port->learning_sync = 1;
	mlxsw_sp_port->uc_flood = 1;
3602
	mlxsw_sp_port->mc_flood = 1;
3603 3604
	mlxsw_sp_port->mc_router = 0;
	mlxsw_sp_port->mc_disabled = 1;
3605 3606 3607
	mlxsw_sp_port->bridged = 1;

	return 0;
3608 3609
}

3610
static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3611 3612
{
	struct net_device *dev = mlxsw_sp_port->dev;
3613

3614 3615
	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);

3616 3617
	mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);

3618 3619 3620
	mlxsw_sp_port->learning = 0;
	mlxsw_sp_port->learning_sync = 0;
	mlxsw_sp_port->uc_flood = 0;
3621
	mlxsw_sp_port->mc_flood = 0;
3622
	mlxsw_sp_port->mc_router = 0;
3623
	mlxsw_sp_port->bridged = 0;
3624 3625 3626 3627

	/* Add implicit VLAN interface in the device, so that untagged
	 * packets will be classified to the default vFID.
	 */
3628
	mlxsw_sp_port_add_vid(dev, 0, 1);
3629 3630
}

3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
	char sldr_pl[MLXSW_REG_SLDR_LEN];

	mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}

static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
	char sldr_pl[MLXSW_REG_SLDR_LEN];

	mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}

static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
				     u16 lag_id, u8 port_index)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char slcor_pl[MLXSW_REG_SLCOR_LEN];

	mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
				      lag_id, port_index);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}

static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
					u16 lag_id)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char slcor_pl[MLXSW_REG_SLCOR_LEN];

	mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
					 lag_id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}

static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
					u16 lag_id)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char slcor_pl[MLXSW_REG_SLCOR_LEN];

	mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
					lag_id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}

static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
					 u16 lag_id)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char slcor_pl[MLXSW_REG_SLCOR_LEN];

	mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
					 lag_id);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}

static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
				  struct net_device *lag_dev,
				  u16 *p_lag_id)
{
	struct mlxsw_sp_upper *lag;
	int free_lag_id = -1;
J
Jiri Pirko 已提交
3697
	u64 max_lag;
3698 3699
	int i;

J
Jiri Pirko 已提交
3700 3701
	max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
	for (i = 0; i < max_lag; i++) {
3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734
		lag = mlxsw_sp_lag_get(mlxsw_sp, i);
		if (lag->ref_count) {
			if (lag->dev == lag_dev) {
				*p_lag_id = i;
				return 0;
			}
		} else if (free_lag_id < 0) {
			free_lag_id = i;
		}
	}
	if (free_lag_id < 0)
		return -EBUSY;
	*p_lag_id = free_lag_id;
	return 0;
}

static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
			  struct net_device *lag_dev,
			  struct netdev_lag_upper_info *lag_upper_info)
{
	u16 lag_id;

	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
		return false;
	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
		return false;
	return true;
}

static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
				       u16 lag_id, u8 *p_port_index)
{
J
Jiri Pirko 已提交
3735
	u64 max_lag_members;
3736 3737
	int i;

J
Jiri Pirko 已提交
3738 3739 3740
	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
					     MAX_LAG_MEMBERS);
	for (i = 0; i < max_lag_members; i++) {
3741 3742 3743 3744 3745 3746 3747 3748
		if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
			*p_port_index = i;
			return 0;
		}
	}
	return -EBUSY;
}

3749 3750
static void
mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3751
				  struct net_device *lag_dev, u16 lag_id)
3752 3753
{
	struct mlxsw_sp_port *mlxsw_sp_vport;
3754
	struct mlxsw_sp_fid *f;
3755 3756 3757 3758 3759

	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
	if (WARN_ON(!mlxsw_sp_vport))
		return;

3760 3761 3762 3763 3764 3765 3766
	/* If vPort is assigned a RIF, then leave it since it's no
	 * longer valid.
	 */
	f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
	if (f)
		f->leave(mlxsw_sp_vport);

3767 3768
	mlxsw_sp_vport->lag_id = lag_id;
	mlxsw_sp_vport->lagged = 1;
3769
	mlxsw_sp_vport->dev = lag_dev;
3770 3771 3772 3773 3774 3775
}

static void
mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
	struct mlxsw_sp_port *mlxsw_sp_vport;
3776
	struct mlxsw_sp_fid *f;
3777 3778 3779 3780 3781

	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
	if (WARN_ON(!mlxsw_sp_vport))
		return;

3782 3783 3784 3785
	f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
	if (f)
		f->leave(mlxsw_sp_vport);

3786
	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3787 3788 3789
	mlxsw_sp_vport->lagged = 0;
}

3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
				  struct net_device *lag_dev)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	struct mlxsw_sp_upper *lag;
	u16 lag_id;
	u8 port_index;
	int err;

	err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
	if (err)
		return err;
	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
	if (!lag->ref_count) {
		err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
		if (err)
			return err;
		lag->dev = lag_dev;
	}

	err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
	if (err)
		return err;
	err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
	if (err)
		goto err_col_port_add;
	err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
	if (err)
		goto err_col_port_enable;

	mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
				   mlxsw_sp_port->local_port);
	mlxsw_sp_port->lag_id = lag_id;
	mlxsw_sp_port->lagged = 1;
	lag->ref_count++;
3825

3826
	mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
3827

3828 3829
	return 0;

3830 3831
err_col_port_enable:
	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3832 3833 3834 3835 3836 3837
err_col_port_add:
	if (!lag->ref_count)
		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
	return err;
}

3838 3839
static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
				    struct net_device *lag_dev)
3840 3841 3842
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u16 lag_id = mlxsw_sp_port->lag_id;
3843
	struct mlxsw_sp_upper *lag;
3844 3845

	if (!mlxsw_sp_port->lagged)
3846
		return;
3847 3848 3849
	lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
	WARN_ON(lag->ref_count == 0);

3850 3851
	mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
	mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3852

3853 3854
	if (mlxsw_sp_port->bridged) {
		mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3855
		mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3856 3857
	}

3858
	if (lag->ref_count == 1)
3859
		mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3860 3861 3862 3863 3864

	mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
				     mlxsw_sp_port->local_port);
	mlxsw_sp_port->lagged = 0;
	lag->ref_count--;
3865 3866

	mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3867 3868
}

3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
				      u16 lag_id)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char sldr_pl[MLXSW_REG_SLDR_LEN];

	mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
					 mlxsw_sp_port->local_port);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}

static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
					 u16 lag_id)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	char sldr_pl[MLXSW_REG_SLDR_LEN];

	mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
					    mlxsw_sp_port->local_port);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}

static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
				       bool lag_tx_enabled)
{
	if (lag_tx_enabled)
		return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
						  mlxsw_sp_port->lag_id);
	else
		return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
						     mlxsw_sp_port->lag_id);
}

static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
				     struct netdev_lag_lower_state_info *info)
{
	return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
}

3908 3909 3910 3911 3912 3913 3914
static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
				   struct net_device *vlan_dev)
{
	struct mlxsw_sp_port *mlxsw_sp_vport;
	u16 vid = vlan_dev_vlan_id(vlan_dev);

	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3915
	if (WARN_ON(!mlxsw_sp_vport))
3916 3917 3918 3919 3920 3921 3922
		return -EINVAL;

	mlxsw_sp_vport->dev = vlan_dev;

	return 0;
}

3923 3924
static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
				      struct net_device *vlan_dev)
3925 3926 3927 3928 3929
{
	struct mlxsw_sp_port *mlxsw_sp_vport;
	u16 vid = vlan_dev_vlan_id(vlan_dev);

	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3930
	if (WARN_ON(!mlxsw_sp_vport))
3931
		return;
3932 3933 3934 3935

	mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
}

3936 3937
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
					       unsigned long event, void *ptr)
3938 3939 3940 3941 3942
{
	struct netdev_notifier_changeupper_info *info;
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct net_device *upper_dev;
	struct mlxsw_sp *mlxsw_sp;
3943
	int err = 0;
3944 3945 3946 3947 3948 3949 3950 3951

	mlxsw_sp_port = netdev_priv(dev);
	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	info = ptr;

	switch (event) {
	case NETDEV_PRECHANGEUPPER:
		upper_dev = info->upper_dev;
3952 3953
		if (!is_vlan_dev(upper_dev) &&
		    !netif_is_lag_master(upper_dev) &&
3954 3955
		    !netif_is_bridge_master(upper_dev) &&
		    !netif_is_l3_master(upper_dev))
3956
			return -EINVAL;
3957
		if (!info->linking)
3958
			break;
3959
		/* HW limitation forbids to put ports to multiple bridges. */
3960
		if (netif_is_bridge_master(upper_dev) &&
3961
		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3962
			return -EINVAL;
3963 3964 3965
		if (netif_is_lag_master(upper_dev) &&
		    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
					       info->upper_info))
3966
			return -EINVAL;
3967 3968 3969 3970 3971
		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
			return -EINVAL;
		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
			return -EINVAL;
3972 3973 3974
		break;
	case NETDEV_CHANGEUPPER:
		upper_dev = info->upper_dev;
3975
		if (is_vlan_dev(upper_dev)) {
3976
			if (info->linking)
3977 3978
				err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
							      upper_dev);
3979
			else
3980 3981
				 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
							   upper_dev);
3982
		} else if (netif_is_bridge_master(upper_dev)) {
3983 3984 3985 3986
			if (info->linking)
				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
								upper_dev);
			else
3987
				mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3988
		} else if (netif_is_lag_master(upper_dev)) {
3989
			if (info->linking)
3990 3991
				err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
							     upper_dev);
3992
			else
3993 3994
				mlxsw_sp_port_lag_leave(mlxsw_sp_port,
							upper_dev);
3995 3996 3997 3998 3999
		} else if (netif_is_l3_master(upper_dev)) {
			if (info->linking)
				err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
			else
				mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
4000 4001 4002
		} else {
			err = -EINVAL;
			WARN_ON(1);
4003 4004 4005 4006
		}
		break;
	}

4007
	return err;
4008 4009
}

4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
					       unsigned long event, void *ptr)
{
	struct netdev_notifier_changelowerstate_info *info;
	struct mlxsw_sp_port *mlxsw_sp_port;
	int err;

	mlxsw_sp_port = netdev_priv(dev);
	info = ptr;

	switch (event) {
	case NETDEV_CHANGELOWERSTATE:
		if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
			err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
							info->lower_state_info);
			if (err)
				netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
		}
		break;
	}

4031
	return 0;
4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044
}

static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
					 unsigned long event, void *ptr)
{
	switch (event) {
	case NETDEV_PRECHANGEUPPER:
	case NETDEV_CHANGEUPPER:
		return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
	case NETDEV_CHANGELOWERSTATE:
		return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
	}

4045
	return 0;
4046 4047
}

4048 4049 4050 4051 4052 4053 4054 4055 4056 4057
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
					unsigned long event, void *ptr)
{
	struct net_device *dev;
	struct list_head *iter;
	int ret;

	netdev_for_each_lower_dev(lag_dev, dev, iter) {
		if (mlxsw_sp_port_dev_check(dev)) {
			ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4058
			if (ret)
4059 4060 4061 4062
				return ret;
		}
	}

4063
	return 0;
4064 4065
}

4066 4067
static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
					    struct net_device *vlan_dev)
4068
{
4069
	u16 fid = vlan_dev_vlan_id(vlan_dev);
4070
	struct mlxsw_sp_fid *f;
4071

4072 4073 4074 4075 4076
	f = mlxsw_sp_fid_find(mlxsw_sp, fid);
	if (!f) {
		f = mlxsw_sp_fid_create(mlxsw_sp, fid);
		if (IS_ERR(f))
			return PTR_ERR(f);
4077 4078
	}

4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090
	f->ref_count++;

	return 0;
}

static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
					       struct net_device *vlan_dev)
{
	u16 fid = vlan_dev_vlan_id(vlan_dev);
	struct mlxsw_sp_fid *f;

	f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4091 4092
	if (f && f->r)
		mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4093 4094 4095 4096 4097 4098 4099 4100 4101 4102
	if (f && --f->ref_count == 0)
		mlxsw_sp_fid_destroy(mlxsw_sp, f);
}

static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
					   unsigned long event, void *ptr)
{
	struct netdev_notifier_changeupper_info *info;
	struct net_device *upper_dev;
	struct mlxsw_sp *mlxsw_sp;
4103
	int err = 0;
4104 4105 4106 4107 4108 4109 4110 4111

	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
	if (!mlxsw_sp)
		return 0;

	info = ptr;

	switch (event) {
4112
	case NETDEV_PRECHANGEUPPER:
4113
		upper_dev = info->upper_dev;
4114
		if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128
			return -EINVAL;
		if (is_vlan_dev(upper_dev) &&
		    br_dev != mlxsw_sp->master_bridge.dev)
			return -EINVAL;
		break;
	case NETDEV_CHANGEUPPER:
		upper_dev = info->upper_dev;
		if (is_vlan_dev(upper_dev)) {
			if (info->linking)
				err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
								       upper_dev);
			else
				mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
								   upper_dev);
4129 4130 4131 4132 4133 4134
		} else if (netif_is_l3_master(upper_dev)) {
			if (info->linking)
				err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
							       br_dev);
			else
				mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
4135
		} else {
4136 4137
			err = -EINVAL;
			WARN_ON(1);
4138 4139 4140 4141
		}
		break;
	}

4142
	return err;
4143 4144
}

4145
static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4146
{
4147
	return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4148
				   MLXSW_SP_VFID_MAX);
4149 4150
}

4151
static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4152
{
4153
	char sfmr_pl[MLXSW_REG_SFMR_LEN];
4154

4155 4156
	mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4157 4158
}

4159
static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4160

4161 4162
static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
						 struct net_device *br_dev)
4163 4164
{
	struct device *dev = mlxsw_sp->bus_info->dev;
4165
	struct mlxsw_sp_fid *f;
4166
	u16 vfid, fid;
4167 4168
	int err;

4169
	vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4170
	if (vfid == MLXSW_SP_VFID_MAX) {
4171 4172 4173 4174
		dev_err(dev, "No available vFIDs\n");
		return ERR_PTR(-ERANGE);
	}

4175 4176
	fid = mlxsw_sp_vfid_to_fid(vfid);
	err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4177
	if (err) {
4178
		dev_err(dev, "Failed to create FID=%d\n", fid);
4179 4180 4181
		return ERR_PTR(err);
	}

4182 4183
	f = kzalloc(sizeof(*f), GFP_KERNEL);
	if (!f)
4184 4185
		goto err_allocate_vfid;

4186
	f->leave = mlxsw_sp_vport_vfid_leave;
4187 4188
	f->fid = fid;
	f->dev = br_dev;
4189

4190 4191
	list_add(&f->list, &mlxsw_sp->vfids.list);
	set_bit(vfid, mlxsw_sp->vfids.mapped);
4192

4193
	return f;
4194 4195

err_allocate_vfid:
4196
	mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4197 4198 4199
	return ERR_PTR(-ENOMEM);
}

4200 4201
static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_fid *f)
4202
{
4203
	u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4204
	u16 fid = f->fid;
4205

4206
	clear_bit(vfid, mlxsw_sp->vfids.mapped);
4207
	list_del(&f->list);
4208

4209 4210
	if (f->r)
		mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
4211

4212
	kfree(f);
4213 4214

	mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4215 4216
}

4217 4218 4219 4220 4221 4222 4223 4224 4225 4226
static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
				  bool valid)
{
	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);

	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
					    vid);
}

4227 4228
static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
				    struct net_device *br_dev)
4229
{
4230
	struct mlxsw_sp_fid *f;
4231 4232
	int err;

4233
	f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4234
	if (!f) {
4235
		f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4236 4237
		if (IS_ERR(f))
			return PTR_ERR(f);
4238 4239
	}

4240 4241 4242
	err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
	if (err)
		goto err_vport_flood_set;
4243

4244 4245
	err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
	if (err)
4246
		goto err_vport_fid_map;
4247

4248
	mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4249
	f->ref_count++;
4250

I
Ido Schimmel 已提交
4251 4252
	netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);

4253
	return 0;
4254

4255 4256 4257
err_vport_fid_map:
	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set:
4258
	if (!f->ref_count)
4259
		mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4260 4261
	return err;
}
4262

4263
static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4264
{
4265
	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4266

I
Ido Schimmel 已提交
4267 4268
	netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);

4269
	mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4270

4271 4272
	mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);

4273 4274
	mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);

4275
	mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4276
	if (--f->ref_count == 0)
4277
		mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4278 4279 4280 4281 4282
}

static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
				      struct net_device *br_dev)
{
4283
	struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4284 4285 4286 4287
	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
	struct net_device *dev = mlxsw_sp_vport->dev;
	int err;

4288 4289
	if (f && !WARN_ON(!f->leave))
		f->leave(mlxsw_sp_vport);
4290

4291
	err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4292
	if (err) {
4293
		netdev_err(dev, "Failed to join vFID\n");
4294
		return err;
4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305
	}

	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
	if (err) {
		netdev_err(dev, "Failed to enable learning\n");
		goto err_port_vid_learning_set;
	}

	mlxsw_sp_vport->learning = 1;
	mlxsw_sp_vport->learning_sync = 1;
	mlxsw_sp_vport->uc_flood = 1;
4306
	mlxsw_sp_vport->mc_flood = 1;
4307 4308
	mlxsw_sp_vport->mc_router = 0;
	mlxsw_sp_vport->mc_disabled = 1;
4309 4310 4311 4312 4313
	mlxsw_sp_vport->bridged = 1;

	return 0;

err_port_vid_learning_set:
4314
	mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4315 4316 4317
	return err;
}

4318
static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4319 4320 4321 4322 4323
{
	u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);

	mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);

4324
	mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4325 4326 4327 4328

	mlxsw_sp_vport->learning = 0;
	mlxsw_sp_vport->learning_sync = 0;
	mlxsw_sp_vport->uc_flood = 0;
4329
	mlxsw_sp_vport->mc_flood = 0;
4330
	mlxsw_sp_vport->mc_router = 0;
4331 4332 4333
	mlxsw_sp_vport->bridged = 0;
}

4334 4335 4336 4337 4338 4339 4340 4341
static bool
mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
				  const struct net_device *br_dev)
{
	struct mlxsw_sp_port *mlxsw_sp_vport;

	list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
			    vport.list) {
4342
		struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4343 4344

		if (dev && dev == br_dev)
4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
			return false;
	}

	return true;
}

static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
					  unsigned long event, void *ptr,
					  u16 vid)
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
	struct netdev_notifier_changeupper_info *info = ptr;
	struct mlxsw_sp_port *mlxsw_sp_vport;
	struct net_device *upper_dev;
4359
	int err = 0;
4360 4361

	mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4362 4363
	if (!mlxsw_sp_vport)
		return 0;
4364 4365 4366 4367

	switch (event) {
	case NETDEV_PRECHANGEUPPER:
		upper_dev = info->upper_dev;
4368 4369
		if (!netif_is_bridge_master(upper_dev) &&
		    !netif_is_l3_master(upper_dev))
4370
			return -EINVAL;
4371 4372
		if (!info->linking)
			break;
4373 4374 4375
		/* We can't have multiple VLAN interfaces configured on
		 * the same port and being members in the same bridge.
		 */
4376 4377
		if (netif_is_bridge_master(upper_dev) &&
		    !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4378
						       upper_dev))
4379
			return -EINVAL;
4380 4381 4382
		break;
	case NETDEV_CHANGEUPPER:
		upper_dev = info->upper_dev;
4383 4384 4385 4386 4387 4388
		if (netif_is_bridge_master(upper_dev)) {
			if (info->linking)
				err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
								 upper_dev);
			else
				mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4389 4390 4391 4392 4393
		} else if (netif_is_l3_master(upper_dev)) {
			if (info->linking)
				err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
			else
				mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
4394
		} else {
4395 4396
			err = -EINVAL;
			WARN_ON(1);
4397
		}
4398
		break;
4399 4400
	}

4401
	return err;
4402 4403
}

4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
					      unsigned long event, void *ptr,
					      u16 vid)
{
	struct net_device *dev;
	struct list_head *iter;
	int ret;

	netdev_for_each_lower_dev(lag_dev, dev, iter) {
		if (mlxsw_sp_port_dev_check(dev)) {
			ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
							     vid);
4416
			if (ret)
4417 4418 4419 4420
				return ret;
		}
	}

4421
	return 0;
4422 4423
}

4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464
static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
						unsigned long event, void *ptr)
{
	struct netdev_notifier_changeupper_info *info;
	struct mlxsw_sp *mlxsw_sp;
	int err = 0;

	mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
	if (!mlxsw_sp)
		return 0;

	info = ptr;

	switch (event) {
	case NETDEV_PRECHANGEUPPER:
		/* VLAN devices are only allowed on top of the
		 * VLAN-aware bridge.
		 */
		if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
			    mlxsw_sp->master_bridge.dev))
			return -EINVAL;
		if (!netif_is_l3_master(info->upper_dev))
			return -EINVAL;
		break;
	case NETDEV_CHANGEUPPER:
		if (netif_is_l3_master(info->upper_dev)) {
			if (info->linking)
				err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
							       vlan_dev);
			else
				mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
		} else {
			err = -EINVAL;
			WARN_ON(1);
		}
		break;
	}

	return err;
}

4465 4466 4467 4468 4469 4470
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
					 unsigned long event, void *ptr)
{
	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
	u16 vid = vlan_dev_vlan_id(vlan_dev);

4471 4472 4473 4474 4475 4476
	if (mlxsw_sp_port_dev_check(real_dev))
		return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
						      vid);
	else if (netif_is_lag_master(real_dev))
		return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
							  vid);
4477 4478 4479
	else if (netif_is_bridge_master(real_dev))
		return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
							    ptr);
4480

4481
	return 0;
4482 4483
}

4484 4485 4486 4487
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
				    unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4488
	int err = 0;
4489

4490 4491 4492
	if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
		err = mlxsw_sp_netdevice_router_port_event(dev);
	else if (mlxsw_sp_port_dev_check(dev))
4493 4494 4495
		err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
	else if (netif_is_lag_master(dev))
		err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4496 4497
	else if (netif_is_bridge_master(dev))
		err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4498 4499
	else if (is_vlan_dev(dev))
		err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4500

4501
	return notifier_from_errno(err);
4502 4503
}

4504 4505 4506 4507
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
	.notifier_call = mlxsw_sp_netdevice_event,
};

4508 4509 4510 4511 4512
static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
	.notifier_call = mlxsw_sp_inetaddr_event,
	.priority = 10,	/* Must be called before FIB notifier block */
};

4513 4514 4515 4516
static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
	.notifier_call = mlxsw_sp_router_netevent_event,
};

4517 4518 4519 4520 4521 4522 4523 4524 4525 4526
static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
	{0, },
};

static struct pci_driver mlxsw_sp_pci_driver = {
	.name = mlxsw_sp_driver_name,
	.id_table = mlxsw_sp_pci_id_table,
};

4527 4528 4529 4530 4531
static int __init mlxsw_sp_module_init(void)
{
	int err;

	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4532
	register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4533 4534
	register_netevent_notifier(&mlxsw_sp_router_netevent_nb);

4535 4536 4537
	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
	if (err)
		goto err_core_driver_register;
4538 4539 4540 4541 4542

	err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
	if (err)
		goto err_pci_driver_register;

4543 4544
	return 0;

4545 4546
err_pci_driver_register:
	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4547
err_core_driver_register:
4548
	unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4549
	unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4550 4551 4552 4553 4554 4555
	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
	return err;
}

static void __exit mlxsw_sp_module_exit(void)
{
4556
	mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4557
	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4558
	unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4559
	unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4560 4561 4562 4563 4564 4565 4566 4567 4568
	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}

module_init(mlxsw_sp_module_init);
module_exit(mlxsw_sp_module_exit);

MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
4569
MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);