mv88e6xxx.c 79.3 KB
Newer Older
1 2 3 4
/*
 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
 * Copyright (c) 2008 Marvell Semiconductor
 *
5 6 7
 * Copyright (c) 2015 CMC Electronics, Inc.
 *	Added support for VLAN Table Unit operations
 *
8 9 10 11 12 13
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

14
#include <linux/delay.h>
15
#include <linux/etherdevice.h>
16
#include <linux/ethtool.h>
17
#include <linux/if_bridge.h>
18
#include <linux/jiffies.h>
19
#include <linux/list.h>
20
#include <linux/module.h>
21
#include <linux/netdevice.h>
22
#include <linux/gpio/consumer.h>
23
#include <linux/phy.h>
24
#include <net/dsa.h>
25
#include <net/switchdev.h>
26 27
#include "mv88e6xxx.h"

28
static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
29 30
{
	if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
31
		dev_err(ps->dev, "SMI lock not held!\n");
32 33 34 35
		dump_stack();
	}
}

36
/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
37 38 39 40 41 42 43 44 45 46 47 48 49
 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
 * will be directly accessible on some {device address,register address}
 * pair.  If the ADDR[4:0] pins are not strapped to zero, the switch
 * will only respond to SMI transactions to that specific address, and
 * an indirect addressing mechanism needs to be used to access its
 * registers.
 */
static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
{
	int ret;
	int i;

	for (i = 0; i < 16; i++) {
50
		ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
51 52 53
		if (ret < 0)
			return ret;

54
		if ((ret & SMI_CMD_BUSY) == 0)
55 56 57 58 59 60
			return 0;
	}

	return -ETIMEDOUT;
}

61 62
static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
				int reg)
63 64 65 66
{
	int ret;

	if (sw_addr == 0)
67
		return mdiobus_read_nested(bus, addr, reg);
68

69
	/* Wait for the bus to become free. */
70 71 72 73
	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
	if (ret < 0)
		return ret;

74
	/* Transmit the read command. */
75 76
	ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
				   SMI_CMD_OP_22_READ | (addr << 5) | reg);
77 78 79
	if (ret < 0)
		return ret;

80
	/* Wait for the read command to complete. */
81 82 83 84
	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
	if (ret < 0)
		return ret;

85
	/* Read the data. */
86
	ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
87 88 89 90 91 92
	if (ret < 0)
		return ret;

	return ret & 0xffff;
}

93 94
static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
			       int addr, int reg)
95 96 97
{
	int ret;

98
	assert_smi_lock(ps);
99

100
	ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
101 102 103
	if (ret < 0)
		return ret;

104
	dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
105 106
		addr, reg, ret);

107 108 109
	return ret;
}

110
int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
111 112 113 114
{
	int ret;

	mutex_lock(&ps->smi_mutex);
115
	ret = _mv88e6xxx_reg_read(ps, addr, reg);
116 117 118 119 120
	mutex_unlock(&ps->smi_mutex);

	return ret;
}

121 122
static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
				 int reg, u16 val)
123 124 125 126
{
	int ret;

	if (sw_addr == 0)
127
		return mdiobus_write_nested(bus, addr, reg, val);
128

129
	/* Wait for the bus to become free. */
130 131 132 133
	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
	if (ret < 0)
		return ret;

134
	/* Transmit the data to write. */
135
	ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
136 137 138
	if (ret < 0)
		return ret;

139
	/* Transmit the write command. */
140 141
	ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
				   SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
142 143 144
	if (ret < 0)
		return ret;

145
	/* Wait for the write command to complete. */
146 147 148 149 150 151 152
	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
	if (ret < 0)
		return ret;

	return 0;
}

153 154
static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
				int reg, u16 val)
155
{
156
	assert_smi_lock(ps);
157

158
	dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
159 160
		addr, reg, val);

161
	return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
162 163
}

164 165
int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
			int reg, u16 val)
166 167 168
{
	int ret;

169
	mutex_lock(&ps->smi_mutex);
170
	ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
171 172 173 174 175
	mutex_unlock(&ps->smi_mutex);

	return ret;
}

176 177
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
178
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
179
	int err;
180

181
	err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
182 183 184 185
				  (addr[0] << 8) | addr[1]);
	if (err)
		return err;

186
	err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
187 188 189 190
				  (addr[2] << 8) | addr[3]);
	if (err)
		return err;

191
	return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
192
				   (addr[4] << 8) | addr[5]);
193 194
}

195 196
int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
{
197
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
198
	int ret;
199
	int i;
200 201 202 203

	for (i = 0; i < 6; i++) {
		int j;

204
		/* Write the MAC address byte. */
205
		ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
206 207 208 209
					  GLOBAL2_SWITCH_MAC_BUSY |
					  (i << 8) | addr[i]);
		if (ret)
			return ret;
210

211
		/* Wait for the write to complete. */
212
		for (j = 0; j < 16; j++) {
213
			ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
214 215 216 217
						 GLOBAL2_SWITCH_MAC);
			if (ret < 0)
				return ret;

218
			if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
219 220 221 222 223 224 225 226 227
				break;
		}
		if (j == 16)
			return -ETIMEDOUT;
	}

	return 0;
}

228 229
static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
			       int regnum)
230 231
{
	if (addr >= 0)
232
		return _mv88e6xxx_reg_read(ps, addr, regnum);
233 234 235
	return 0xffff;
}

236 237
static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
				int regnum, u16 val)
238 239
{
	if (addr >= 0)
240
		return _mv88e6xxx_reg_write(ps, addr, regnum, val);
241 242 243
	return 0;
}

244
static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
245 246
{
	int ret;
247
	unsigned long timeout;
248

249
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
250 251 252
	if (ret < 0)
		return ret;

253 254
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
				   ret & ~GLOBAL_CONTROL_PPU_ENABLE);
255 256
	if (ret)
		return ret;
257

258 259
	timeout = jiffies + 1 * HZ;
	while (time_before(jiffies, timeout)) {
260
		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
261 262 263
		if (ret < 0)
			return ret;

264
		usleep_range(1000, 2000);
265 266
		if ((ret & GLOBAL_STATUS_PPU_MASK) !=
		    GLOBAL_STATUS_PPU_POLLING)
267
			return 0;
268 269 270 271 272
	}

	return -ETIMEDOUT;
}

273
static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
274
{
275
	int ret, err;
276
	unsigned long timeout;
277

278
	ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
279 280 281
	if (ret < 0)
		return ret;

282
	err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
283 284 285
				  ret | GLOBAL_CONTROL_PPU_ENABLE);
	if (err)
		return err;
286

287 288
	timeout = jiffies + 1 * HZ;
	while (time_before(jiffies, timeout)) {
289
		ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
290 291 292
		if (ret < 0)
			return ret;

293
		usleep_range(1000, 2000);
294 295
		if ((ret & GLOBAL_STATUS_PPU_MASK) ==
		    GLOBAL_STATUS_PPU_POLLING)
296
			return 0;
297 298 299 300 301 302 303 304 305 306 307
	}

	return -ETIMEDOUT;
}

static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
{
	struct mv88e6xxx_priv_state *ps;

	ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
	if (mutex_trylock(&ps->ppu_mutex)) {
308
		if (mv88e6xxx_ppu_enable(ps) == 0)
309 310
			ps->ppu_disabled = 0;
		mutex_unlock(&ps->ppu_mutex);
311 312 313 314 315 316 317 318 319 320
	}
}

static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
{
	struct mv88e6xxx_priv_state *ps = (void *)_ps;

	schedule_work(&ps->ppu_work);
}

321
static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
322 323 324 325 326
{
	int ret;

	mutex_lock(&ps->ppu_mutex);

327
	/* If the PHY polling unit is enabled, disable it so that
328 329 330 331 332
	 * we can access the PHY registers.  If it was already
	 * disabled, cancel the timer that is going to re-enable
	 * it.
	 */
	if (!ps->ppu_disabled) {
333
		ret = mv88e6xxx_ppu_disable(ps);
334 335 336 337 338
		if (ret < 0) {
			mutex_unlock(&ps->ppu_mutex);
			return ret;
		}
		ps->ppu_disabled = 1;
339
	} else {
340 341
		del_timer(&ps->ppu_timer);
		ret = 0;
342 343 344 345 346
	}

	return ret;
}

347
static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
348
{
349
	/* Schedule a timer to re-enable the PHY polling unit. */
350 351 352 353
	mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
	mutex_unlock(&ps->ppu_mutex);
}

354
void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
355 356 357 358 359 360 361 362
{
	mutex_init(&ps->ppu_mutex);
	INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
	init_timer(&ps->ppu_timer);
	ps->ppu_timer.data = (unsigned long)ps;
	ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
}

363 364
static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
				  int regnum)
365 366 367
{
	int ret;

368
	ret = mv88e6xxx_ppu_access_get(ps);
369
	if (ret >= 0) {
370
		ret = _mv88e6xxx_reg_read(ps, addr, regnum);
371
		mv88e6xxx_ppu_access_put(ps);
372 373 374 375 376
	}

	return ret;
}

377 378
static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
				   int regnum, u16 val)
379 380 381
{
	int ret;

382
	ret = mv88e6xxx_ppu_access_get(ps);
383
	if (ret >= 0) {
384
		ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
385
		mv88e6xxx_ppu_access_put(ps);
386 387 388 389 390
	}

	return ret;
}

391
static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
392
{
393
	return ps->info->family == MV88E6XXX_FAMILY_6065;
394 395
}

396
static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
397
{
398
	return ps->info->family == MV88E6XXX_FAMILY_6095;
399 400
}

401
static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
402
{
403
	return ps->info->family == MV88E6XXX_FAMILY_6097;
404 405
}

406
static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
407
{
408
	return ps->info->family == MV88E6XXX_FAMILY_6165;
409 410
}

411
static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
412
{
413
	return ps->info->family == MV88E6XXX_FAMILY_6185;
414 415
}

416
static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
417
{
418
	return ps->info->family == MV88E6XXX_FAMILY_6320;
419 420
}

421
static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
422
{
423
	return ps->info->family == MV88E6XXX_FAMILY_6351;
424 425
}

426
static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
427
{
428
	return ps->info->family == MV88E6XXX_FAMILY_6352;
429 430
}

431
static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
432
{
433
	return ps->info->num_databases;
434 435
}

436
static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
437 438
{
	/* Does the device have dedicated FID registers for ATU and VTU ops? */
439 440
	if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
	    mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
441 442 443 444 445
		return true;

	return false;
}

446
static bool mv88e6xxx_has_stu(struct mv88e6xxx_priv_state *ps)
447 448
{
	/* Does the device have STU and dedicated SID registers for VTU ops? */
449 450
	if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
	    mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
451 452 453 454 455
		return true;

	return false;
}

456 457 458 459 460 461 462 463
/* We expect the switch to perform auto negotiation if there is a real
 * phy. However, in the case of a fixed link phy, we force the port
 * settings from the fixed link settings.
 */
void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
			   struct phy_device *phydev)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
464 465
	u32 reg;
	int ret;
466 467 468 469 470 471

	if (!phy_is_pseudo_fixed_link(phydev))
		return;

	mutex_lock(&ps->smi_mutex);

472
	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
473 474 475 476 477 478 479 480 481 482 483 484 485
	if (ret < 0)
		goto out;

	reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
		      PORT_PCS_CTRL_FORCE_LINK |
		      PORT_PCS_CTRL_DUPLEX_FULL |
		      PORT_PCS_CTRL_FORCE_DUPLEX |
		      PORT_PCS_CTRL_UNFORCED);

	reg |= PORT_PCS_CTRL_FORCE_LINK;
	if (phydev->link)
			reg |= PORT_PCS_CTRL_LINK_UP;

486
	if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
		goto out;

	switch (phydev->speed) {
	case SPEED_1000:
		reg |= PORT_PCS_CTRL_1000;
		break;
	case SPEED_100:
		reg |= PORT_PCS_CTRL_100;
		break;
	case SPEED_10:
		reg |= PORT_PCS_CTRL_10;
		break;
	default:
		pr_info("Unknown speed");
		goto out;
	}

	reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
	if (phydev->duplex == DUPLEX_FULL)
		reg |= PORT_PCS_CTRL_DUPLEX_FULL;

508
	if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
509
	    (port >= ps->info->num_ports - 2)) {
510 511 512 513 514 515 516 517
		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
			reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
			reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
			reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
				PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
	}
518
	_mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
519 520 521 522 523

out:
	mutex_unlock(&ps->smi_mutex);
}

524
static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
525 526 527 528 529
{
	int ret;
	int i;

	for (i = 0; i < 10; i++) {
530
		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
531
		if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
532 533 534 535 536 537
			return 0;
	}

	return -ETIMEDOUT;
}

538 539
static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
				     int port)
540 541 542
{
	int ret;

543
	if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
544 545
		port = (port + 1) << 5;

546
	/* Snapshot the hardware statistics counters for this port. */
547
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
548 549 550 551
				   GLOBAL_STATS_OP_CAPTURE_PORT |
				   GLOBAL_STATS_OP_HIST_RX_TX | port);
	if (ret < 0)
		return ret;
552

553
	/* Wait for the snapshotting to complete. */
554
	ret = _mv88e6xxx_stats_wait(ps);
555 556 557 558 559 560
	if (ret < 0)
		return ret;

	return 0;
}

561 562
static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
				  int stat, u32 *val)
563 564 565 566 567 568
{
	u32 _val;
	int ret;

	*val = 0;

569
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
570 571
				   GLOBAL_STATS_OP_READ_CAPTURED |
				   GLOBAL_STATS_OP_HIST_RX_TX | stat);
572 573 574
	if (ret < 0)
		return;

575
	ret = _mv88e6xxx_stats_wait(ps);
576 577 578
	if (ret < 0)
		return;

579
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
580 581 582 583 584
	if (ret < 0)
		return;

	_val = ret << 16;

585
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
586 587 588 589 590 591
	if (ret < 0)
		return;

	*val = _val | ret;
}

592
static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	{ "in_good_octets",	8, 0x00, BANK0, },
	{ "in_bad_octets",	4, 0x02, BANK0, },
	{ "in_unicast",		4, 0x04, BANK0, },
	{ "in_broadcasts",	4, 0x06, BANK0, },
	{ "in_multicasts",	4, 0x07, BANK0, },
	{ "in_pause",		4, 0x16, BANK0, },
	{ "in_undersize",	4, 0x18, BANK0, },
	{ "in_fragments",	4, 0x19, BANK0, },
	{ "in_oversize",	4, 0x1a, BANK0, },
	{ "in_jabber",		4, 0x1b, BANK0, },
	{ "in_rx_error",	4, 0x1c, BANK0, },
	{ "in_fcs_error",	4, 0x1d, BANK0, },
	{ "out_octets",		8, 0x0e, BANK0, },
	{ "out_unicast",	4, 0x10, BANK0, },
	{ "out_broadcasts",	4, 0x13, BANK0, },
	{ "out_multicasts",	4, 0x12, BANK0, },
	{ "out_pause",		4, 0x15, BANK0, },
	{ "excessive",		4, 0x11, BANK0, },
	{ "collisions",		4, 0x1e, BANK0, },
	{ "deferred",		4, 0x05, BANK0, },
	{ "single",		4, 0x14, BANK0, },
	{ "multiple",		4, 0x17, BANK0, },
	{ "out_fcs_error",	4, 0x03, BANK0, },
	{ "late",		4, 0x1f, BANK0, },
	{ "hist_64bytes",	4, 0x08, BANK0, },
	{ "hist_65_127bytes",	4, 0x09, BANK0, },
	{ "hist_128_255bytes",	4, 0x0a, BANK0, },
	{ "hist_256_511bytes",	4, 0x0b, BANK0, },
	{ "hist_512_1023bytes", 4, 0x0c, BANK0, },
	{ "hist_1024_max_bytes", 4, 0x0d, BANK0, },
	{ "sw_in_discards",	4, 0x10, PORT, },
	{ "sw_in_filtered",	2, 0x12, PORT, },
	{ "sw_out_filtered",	2, 0x13, PORT, },
	{ "in_discards",	4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_filtered",	4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_accepted",	4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_bad_accepted",	4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "tcam_counter_0",	4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "tcam_counter_1",	4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "tcam_counter_2",	4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "tcam_counter_3",	4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_da_unknown",	4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "in_management",	4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_0",	4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_1",	4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_2",	4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_3",	4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_4",	4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_5",	4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_6",	4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_queue_7",	4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_cut_through",	4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_octets_a",	4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_octets_b",	4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
	{ "out_management",	4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
652 653
};

654
static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
655
			       struct mv88e6xxx_hw_stat *stat)
656
{
657 658
	switch (stat->type) {
	case BANK0:
659
		return true;
660
	case BANK1:
661
		return mv88e6xxx_6320_family(ps);
662
	case PORT:
663 664 665 666 667 668
		return mv88e6xxx_6095_family(ps) ||
			mv88e6xxx_6185_family(ps) ||
			mv88e6xxx_6097_family(ps) ||
			mv88e6xxx_6165_family(ps) ||
			mv88e6xxx_6351_family(ps) ||
			mv88e6xxx_6352_family(ps);
669
	}
670
	return false;
671 672
}

673
static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
674
					    struct mv88e6xxx_hw_stat *s,
675 676 677 678 679 680 681
					    int port)
{
	u32 low;
	u32 high = 0;
	int ret;
	u64 value;

682 683
	switch (s->type) {
	case PORT:
684
		ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
685 686 687 688 689
		if (ret < 0)
			return UINT64_MAX;

		low = ret;
		if (s->sizeof_stat == 4) {
690
			ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
691
						  s->reg + 1);
692 693 694 695
			if (ret < 0)
				return UINT64_MAX;
			high = ret;
		}
696 697 698
		break;
	case BANK0:
	case BANK1:
699
		_mv88e6xxx_stats_read(ps, s->reg, &low);
700
		if (s->sizeof_stat == 8)
701
			_mv88e6xxx_stats_read(ps, s->reg + 1, &high);
702 703 704 705 706
	}
	value = (((u64)high) << 16) | low;
	return value;
}

707
void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
708
{
709
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
710 711
	struct mv88e6xxx_hw_stat *stat;
	int i, j;
712

713 714
	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
		stat = &mv88e6xxx_hw_stats[i];
715
		if (mv88e6xxx_has_stat(ps, stat)) {
716 717 718 719
			memcpy(data + j * ETH_GSTRING_LEN, stat->string,
			       ETH_GSTRING_LEN);
			j++;
		}
720
	}
721 722 723 724
}

int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
{
725
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
726 727 728 729 730
	struct mv88e6xxx_hw_stat *stat;
	int i, j;

	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
		stat = &mv88e6xxx_hw_stats[i];
731
		if (mv88e6xxx_has_stat(ps, stat))
732 733 734
			j++;
	}
	return j;
735 736 737 738 739 740
}

void
mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
			    int port, uint64_t *data)
{
741 742 743 744 745 746 747
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	struct mv88e6xxx_hw_stat *stat;
	int ret;
	int i, j;

	mutex_lock(&ps->smi_mutex);

748
	ret = _mv88e6xxx_stats_snapshot(ps, port);
749 750 751 752 753 754
	if (ret < 0) {
		mutex_unlock(&ps->smi_mutex);
		return;
	}
	for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
		stat = &mv88e6xxx_hw_stats[i];
755 756
		if (mv88e6xxx_has_stat(ps, stat)) {
			data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
757 758 759 760 761
			j++;
		}
	}

	mutex_unlock(&ps->smi_mutex);
762 763
}

764 765 766 767 768 769 770 771
int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
	return 32 * sizeof(u16);
}

void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
			struct ethtool_regs *regs, void *_p)
{
772
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
773 774 775 776 777 778 779 780 781 782
	u16 *p = _p;
	int i;

	regs->version = 0;

	memset(p, 0xff, 32 * sizeof(u16));

	for (i = 0; i < 32; i++) {
		int ret;

783
		ret = mv88e6xxx_reg_read(ps, REG_PORT(port), i);
784 785 786 787 788
		if (ret >= 0)
			p[i] = ret;
	}
}

789
static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
790
			   u16 mask)
791 792 793 794 795 796
{
	unsigned long timeout = jiffies + HZ / 10;

	while (time_before(jiffies, timeout)) {
		int ret;

797
		ret = _mv88e6xxx_reg_read(ps, reg, offset);
798 799
		if (ret < 0)
			return ret;
800 801 802 803 804 805 806 807
		if (!(ret & mask))
			return 0;

		usleep_range(1000, 2000);
	}
	return -ETIMEDOUT;
}

808 809
static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
			  int offset, u16 mask)
810 811 812 813
{
	int ret;

	mutex_lock(&ps->smi_mutex);
814
	ret = _mv88e6xxx_wait(ps, reg, offset, mask);
815 816 817 818 819
	mutex_unlock(&ps->smi_mutex);

	return ret;
}

820
static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
821
{
822
	return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
823
			       GLOBAL2_SMI_OP_BUSY);
824 825
}

826
static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
827
{
828 829 830
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);

	return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
831
			      GLOBAL2_EEPROM_OP_LOAD);
832 833
}

834
static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
835
{
836 837 838
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);

	return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
839
			      GLOBAL2_EEPROM_OP_BUSY);
840 841
}

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;

	mutex_lock(&ps->eeprom_mutex);

	ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
				  GLOBAL2_EEPROM_OP_READ |
				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
	if (ret < 0)
		goto error;

	ret = mv88e6xxx_eeprom_busy_wait(ds);
	if (ret < 0)
		goto error;

	ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
error:
	mutex_unlock(&ps->eeprom_mutex);
	return ret;
}

int mv88e6xxx_get_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom,
			 u8 *data)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int offset;
	int len;
	int ret;

	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
		return -EOPNOTSUPP;

	offset = eeprom->offset;
	len = eeprom->len;
	eeprom->len = 0;

	eeprom->magic = 0xc3ec4951;

	ret = mv88e6xxx_eeprom_load_wait(ds);
	if (ret < 0)
		return ret;

	if (offset & 1) {
		int word;

		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
		if (word < 0)
			return word;

		*data++ = (word >> 8) & 0xff;

		offset++;
		len--;
		eeprom->len++;
	}

	while (len >= 2) {
		int word;

		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
		if (word < 0)
			return word;

		*data++ = word & 0xff;
		*data++ = (word >> 8) & 0xff;

		offset += 2;
		len -= 2;
		eeprom->len += 2;
	}

	if (len) {
		int word;

		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
		if (word < 0)
			return word;

		*data++ = word & 0xff;

		offset++;
		len--;
		eeprom->len++;
	}

	return 0;
}

static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;

	ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
	if (ret < 0)
		return ret;

	if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
		return -EROFS;

	return 0;
}

static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
				       u16 data)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;

	mutex_lock(&ps->eeprom_mutex);

	ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
	if (ret < 0)
		goto error;

	ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
				  GLOBAL2_EEPROM_OP_WRITE |
				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
	if (ret < 0)
		goto error;

	ret = mv88e6xxx_eeprom_busy_wait(ds);
error:
	mutex_unlock(&ps->eeprom_mutex);
	return ret;
}

int mv88e6xxx_set_eeprom(struct dsa_switch *ds, struct ethtool_eeprom *eeprom,
			 u8 *data)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int offset;
	int ret;
	int len;

	if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
		return -EOPNOTSUPP;

	if (eeprom->magic != 0xc3ec4951)
		return -EINVAL;

	ret = mv88e6xxx_eeprom_is_readonly(ds);
	if (ret)
		return ret;

	offset = eeprom->offset;
	len = eeprom->len;
	eeprom->len = 0;

	ret = mv88e6xxx_eeprom_load_wait(ds);
	if (ret < 0)
		return ret;

	if (offset & 1) {
		int word;

		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
		if (word < 0)
			return word;

		word = (*data++ << 8) | (word & 0xff);

		ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
		if (ret < 0)
			return ret;

		offset++;
		len--;
		eeprom->len++;
	}

	while (len >= 2) {
		int word;

		word = *data++;
		word |= *data++ << 8;

		ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
		if (ret < 0)
			return ret;

		offset += 2;
		len -= 2;
		eeprom->len += 2;
	}

	if (len) {
		int word;

		word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
		if (word < 0)
			return word;

		word = (word & 0xff00) | *data++;

		ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
		if (ret < 0)
			return ret;

		offset++;
		len--;
		eeprom->len++;
	}

	return 0;
}

1051
static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
1052
{
1053
	return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
1054
			       GLOBAL_ATU_OP_BUSY);
1055 1056
}

1057 1058
static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
					int addr, int regnum)
1059 1060 1061
{
	int ret;

1062
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1063 1064 1065 1066
				   GLOBAL2_SMI_OP_22_READ | (addr << 5) |
				   regnum);
	if (ret < 0)
		return ret;
1067

1068
	ret = _mv88e6xxx_phy_wait(ps);
1069 1070 1071
	if (ret < 0)
		return ret;

1072 1073 1074
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);

	return ret;
1075 1076
}

1077 1078
static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
					 int addr, int regnum, u16 val)
1079
{
1080 1081
	int ret;

1082
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
1083 1084
	if (ret < 0)
		return ret;
1085

1086
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
1087 1088 1089
				   GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
				   regnum);

1090
	return _mv88e6xxx_phy_wait(ps);
1091 1092
}

1093 1094
int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
{
1095
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1096 1097
	int reg;

1098
	mutex_lock(&ps->smi_mutex);
1099

1100
	reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1101
	if (reg < 0)
1102
		goto out;
1103 1104 1105 1106

	e->eee_enabled = !!(reg & 0x0200);
	e->tx_lpi_enabled = !!(reg & 0x0100);

1107
	reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
1108
	if (reg < 0)
1109
		goto out;
1110

1111
	e->eee_active = !!(reg & PORT_STATUS_EEE);
1112
	reg = 0;
1113

1114
out:
1115
	mutex_unlock(&ps->smi_mutex);
1116
	return reg;
1117 1118 1119 1120 1121
}

int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
		      struct phy_device *phydev, struct ethtool_eee *e)
{
1122 1123
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int reg;
1124 1125
	int ret;

1126
	mutex_lock(&ps->smi_mutex);
1127

1128
	ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
1129 1130 1131 1132 1133 1134 1135 1136 1137
	if (ret < 0)
		goto out;

	reg = ret & ~0x0300;
	if (e->eee_enabled)
		reg |= 0x0200;
	if (e->tx_lpi_enabled)
		reg |= 0x0100;

1138
	ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
1139
out:
1140
	mutex_unlock(&ps->smi_mutex);
1141 1142

	return ret;
1143 1144
}

1145
static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
1146 1147 1148
{
	int ret;

1149 1150
	if (mv88e6xxx_has_fid_reg(ps)) {
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1151 1152
		if (ret < 0)
			return ret;
1153
	} else if (mv88e6xxx_num_databases(ps) == 256) {
1154
		/* ATU DBNum[7:4] are located in ATU Control 15:12 */
1155
		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
1156 1157 1158
		if (ret < 0)
			return ret;

1159
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
1160 1161 1162 1163 1164 1165 1166
					   (ret & 0xfff) |
					   ((fid << 8) & 0xf000));
		if (ret < 0)
			return ret;

		/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
		cmd |= fid & 0xf;
1167 1168
	}

1169
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
1170 1171 1172
	if (ret < 0)
		return ret;

1173
	return _mv88e6xxx_atu_wait(ps);
1174 1175
}

1176
static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
				     struct mv88e6xxx_atu_entry *entry)
{
	u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;

	if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
		unsigned int mask, shift;

		if (entry->trunk) {
			data |= GLOBAL_ATU_DATA_TRUNK;
			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
		} else {
			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
		}

		data |= (entry->portv_trunkid << shift) & mask;
	}

1196
	return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1197 1198
}

1199
static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
1200 1201
				     struct mv88e6xxx_atu_entry *entry,
				     bool static_too)
1202
{
1203 1204
	int op;
	int err;
1205

1206
	err = _mv88e6xxx_atu_wait(ps);
1207 1208
	if (err)
		return err;
1209

1210
	err = _mv88e6xxx_atu_data_write(ps, entry);
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	if (err)
		return err;

	if (entry->fid) {
		op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
			GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
	} else {
		op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
			GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
	}

1222
	return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
1223 1224
}

1225 1226
static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
				u16 fid, bool static_too)
1227 1228 1229 1230 1231
{
	struct mv88e6xxx_atu_entry entry = {
		.fid = fid,
		.state = 0, /* EntryState bits must be 0 */
	};
1232

1233
	return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1234 1235
}

1236 1237
static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
			       int from_port, int to_port, bool static_too)
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
{
	struct mv88e6xxx_atu_entry entry = {
		.trunk = false,
		.fid = fid,
	};

	/* EntryState bits must be 0xF */
	entry.state = GLOBAL_ATU_DATA_STATE_MASK;

	/* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
	entry.portv_trunkid = (to_port & 0x0f) << 4;
	entry.portv_trunkid |= from_port & 0x0f;

1251
	return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
1252 1253
}

1254 1255
static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
				 int port, bool static_too)
1256 1257
{
	/* Destination port 0xF means remove the entries */
1258
	return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
1259 1260
}

1261 1262 1263 1264 1265 1266 1267
static const char * const mv88e6xxx_port_state_names[] = {
	[PORT_CONTROL_STATE_DISABLED] = "Disabled",
	[PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
	[PORT_CONTROL_STATE_LEARNING] = "Learning",
	[PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
};

1268 1269
static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
				 u8 state)
1270
{
1271
	struct dsa_switch *ds = ps->ds;
1272
	int reg, ret = 0;
1273 1274
	u8 oldstate;

1275
	reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
1276 1277
	if (reg < 0)
		return reg;
1278

1279
	oldstate = reg & PORT_CONTROL_STATE_MASK;
1280

1281 1282 1283 1284 1285
	if (oldstate != state) {
		/* Flush forwarding database if we're moving a port
		 * from Learning or Forwarding state to Disabled or
		 * Blocking or Listening state.
		 */
1286 1287 1288 1289
		if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
		     oldstate == PORT_CONTROL_STATE_FORWARDING)
		    && (state == PORT_CONTROL_STATE_DISABLED ||
			state == PORT_CONTROL_STATE_BLOCKING)) {
1290
			ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
1291
			if (ret)
1292
				return ret;
1293
		}
1294

1295
		reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1296
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
1297
					   reg);
1298 1299 1300 1301 1302 1303
		if (ret)
			return ret;

		netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
			   mv88e6xxx_port_state_names[state],
			   mv88e6xxx_port_state_names[oldstate]);
1304 1305 1306 1307 1308
	}

	return ret;
}

1309 1310
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
					  int port)
1311
{
1312
	struct net_device *bridge = ps->ports[port].bridge_dev;
1313
	const u16 mask = (1 << ps->info->num_ports) - 1;
1314
	struct dsa_switch *ds = ps->ds;
1315
	u16 output_ports = 0;
1316
	int reg;
1317 1318 1319 1320 1321 1322
	int i;

	/* allow CPU port or DSA link(s) to send frames to every port */
	if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
		output_ports = mask;
	} else {
1323
		for (i = 0; i < ps->info->num_ports; ++i) {
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
			/* allow sending frames to every group member */
			if (bridge && ps->ports[i].bridge_dev == bridge)
				output_ports |= BIT(i);

			/* allow sending frames to CPU port and DSA link(s) */
			if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
				output_ports |= BIT(i);
		}
	}

	/* prevent frames from going back out of the port they came in on */
	output_ports &= ~BIT(port);
1336

1337
	reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1338 1339
	if (reg < 0)
		return reg;
1340

1341 1342
	reg &= ~mask;
	reg |= output_ports & mask;
1343

1344
	return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
1345 1346
}

1347
void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
1348 1349 1350 1351 1352 1353
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int stp_state;

	switch (state) {
	case BR_STATE_DISABLED:
1354
		stp_state = PORT_CONTROL_STATE_DISABLED;
1355 1356 1357
		break;
	case BR_STATE_BLOCKING:
	case BR_STATE_LISTENING:
1358
		stp_state = PORT_CONTROL_STATE_BLOCKING;
1359 1360
		break;
	case BR_STATE_LEARNING:
1361
		stp_state = PORT_CONTROL_STATE_LEARNING;
1362 1363 1364
		break;
	case BR_STATE_FORWARDING:
	default:
1365
		stp_state = PORT_CONTROL_STATE_FORWARDING;
1366 1367 1368
		break;
	}

1369
	/* mv88e6xxx_port_stp_state_set may be called with softirqs disabled,
1370 1371
	 * so we can not update the port state directly but need to schedule it.
	 */
1372
	ps->ports[port].state = stp_state;
1373
	set_bit(port, ps->port_state_update_mask);
1374 1375 1376
	schedule_work(&ps->bridge_work);
}

1377 1378
static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
				u16 *new, u16 *old)
1379
{
1380
	struct dsa_switch *ds = ps->ds;
1381
	u16 pvid;
1382 1383
	int ret;

1384
	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
1385 1386 1387
	if (ret < 0)
		return ret;

1388 1389 1390 1391 1392 1393
	pvid = ret & PORT_DEFAULT_VLAN_MASK;

	if (new) {
		ret &= ~PORT_DEFAULT_VLAN_MASK;
		ret |= *new & PORT_DEFAULT_VLAN_MASK;

1394
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
					   PORT_DEFAULT_VLAN, ret);
		if (ret < 0)
			return ret;

		netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
			   pvid);
	}

	if (old)
		*old = pvid;
1405 1406 1407 1408

	return 0;
}

1409 1410
static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
				    int port, u16 *pvid)
1411
{
1412
	return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
1413 1414
}

1415 1416
static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
				    int port, u16 pvid)
1417
{
1418
	return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
1419 1420
}

1421
static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
1422
{
1423
	return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
1424 1425 1426
			       GLOBAL_VTU_OP_BUSY);
}

1427
static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
1428 1429 1430
{
	int ret;

1431
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
1432 1433 1434
	if (ret < 0)
		return ret;

1435
	return _mv88e6xxx_vtu_wait(ps);
1436 1437
}

1438
static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
1439 1440 1441
{
	int ret;

1442
	ret = _mv88e6xxx_vtu_wait(ps);
1443 1444 1445
	if (ret < 0)
		return ret;

1446
	return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
1447 1448
}

1449
static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
1450 1451 1452 1453 1454 1455 1456 1457
					struct mv88e6xxx_vtu_stu_entry *entry,
					unsigned int nibble_offset)
{
	u16 regs[3];
	int i;
	int ret;

	for (i = 0; i < 3; ++i) {
1458
		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1459 1460 1461 1462 1463 1464 1465
					  GLOBAL_VTU_DATA_0_3 + i);
		if (ret < 0)
			return ret;

		regs[i] = ret;
	}

1466
	for (i = 0; i < ps->info->num_ports; ++i) {
1467 1468 1469 1470 1471 1472 1473 1474 1475
		unsigned int shift = (i % 4) * 4 + nibble_offset;
		u16 reg = regs[i / 4];

		entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
	}

	return 0;
}

1476
static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
1477 1478 1479 1480 1481 1482 1483
					 struct mv88e6xxx_vtu_stu_entry *entry,
					 unsigned int nibble_offset)
{
	u16 regs[3] = { 0 };
	int i;
	int ret;

1484
	for (i = 0; i < ps->info->num_ports; ++i) {
1485 1486 1487 1488 1489 1490 1491
		unsigned int shift = (i % 4) * 4 + nibble_offset;
		u8 data = entry->data[i];

		regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
	}

	for (i = 0; i < 3; ++i) {
1492
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
1493 1494 1495 1496 1497 1498 1499 1500
					   GLOBAL_VTU_DATA_0_3 + i, regs[i]);
		if (ret < 0)
			return ret;
	}

	return 0;
}

1501
static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
1502
{
1503
	return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
1504 1505 1506
				    vid & GLOBAL_VTU_VID_MASK);
}

1507
static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
1508 1509 1510 1511 1512
				  struct mv88e6xxx_vtu_stu_entry *entry)
{
	struct mv88e6xxx_vtu_stu_entry next = { 0 };
	int ret;

1513
	ret = _mv88e6xxx_vtu_wait(ps);
1514 1515 1516
	if (ret < 0)
		return ret;

1517
	ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
1518 1519 1520
	if (ret < 0)
		return ret;

1521
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1522 1523 1524 1525 1526 1527 1528
	if (ret < 0)
		return ret;

	next.vid = ret & GLOBAL_VTU_VID_MASK;
	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);

	if (next.valid) {
1529
		ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 0);
1530 1531 1532
		if (ret < 0)
			return ret;

1533 1534
		if (mv88e6xxx_has_fid_reg(ps)) {
			ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1535 1536 1537 1538 1539
						  GLOBAL_VTU_FID);
			if (ret < 0)
				return ret;

			next.fid = ret & GLOBAL_VTU_FID_MASK;
1540
		} else if (mv88e6xxx_num_databases(ps) == 256) {
1541 1542 1543
			/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
			 * VTU DBNum[3:0] are located in VTU Operation 3:0
			 */
1544
			ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1545 1546 1547 1548 1549 1550
						  GLOBAL_VTU_OP);
			if (ret < 0)
				return ret;

			next.fid = (ret & 0xf00) >> 4;
			next.fid |= ret & 0xf;
1551
		}
1552

1553 1554
		if (mv88e6xxx_has_stu(ps)) {
			ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
						  GLOBAL_VTU_SID);
			if (ret < 0)
				return ret;

			next.sid = ret & GLOBAL_VTU_SID_MASK;
		}
	}

	*entry = next;
	return 0;
}

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
			     struct switchdev_obj_port_vlan *vlan,
			     int (*cb)(struct switchdev_obj *obj))
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	struct mv88e6xxx_vtu_stu_entry next;
	u16 pvid;
	int err;

	mutex_lock(&ps->smi_mutex);

1578
	err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
1579 1580 1581
	if (err)
		goto unlock;

1582
	err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1583 1584 1585 1586
	if (err)
		goto unlock;

	do {
1587
		err = _mv88e6xxx_vtu_getnext(ps, &next);
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
		if (err)
			break;

		if (!next.valid)
			break;

		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
			continue;

		/* reinit and dump this VLAN obj */
		vlan->vid_begin = vlan->vid_end = next.vid;
		vlan->flags = 0;

		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		if (next.vid == pvid)
			vlan->flags |= BRIDGE_VLAN_INFO_PVID;

		err = cb(&vlan->obj);
		if (err)
			break;
	} while (next.vid < GLOBAL_VTU_VID_MASK);

unlock:
	mutex_unlock(&ps->smi_mutex);

	return err;
}

1618
static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
1619 1620
				    struct mv88e6xxx_vtu_stu_entry *entry)
{
1621
	u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
1622 1623 1624
	u16 reg = 0;
	int ret;

1625
	ret = _mv88e6xxx_vtu_wait(ps);
1626 1627 1628 1629 1630 1631 1632
	if (ret < 0)
		return ret;

	if (!entry->valid)
		goto loadpurge;

	/* Write port member tags */
1633
	ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
1634 1635 1636
	if (ret < 0)
		return ret;

1637
	if (mv88e6xxx_has_stu(ps)) {
1638
		reg = entry->sid & GLOBAL_VTU_SID_MASK;
1639
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1640 1641
		if (ret < 0)
			return ret;
1642
	}
1643

1644
	if (mv88e6xxx_has_fid_reg(ps)) {
1645
		reg = entry->fid & GLOBAL_VTU_FID_MASK;
1646
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1647 1648
		if (ret < 0)
			return ret;
1649
	} else if (mv88e6xxx_num_databases(ps) == 256) {
1650 1651 1652 1653 1654
		/* VTU DBNum[7:4] are located in VTU Operation 11:8, and
		 * VTU DBNum[3:0] are located in VTU Operation 3:0
		 */
		op |= (entry->fid & 0xf0) << 8;
		op |= entry->fid & 0xf;
1655 1656 1657 1658 1659
	}

	reg = GLOBAL_VTU_VID_VALID;
loadpurge:
	reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1660
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1661 1662 1663
	if (ret < 0)
		return ret;

1664
	return _mv88e6xxx_vtu_cmd(ps, op);
1665 1666
}

1667
static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
1668 1669 1670 1671 1672
				  struct mv88e6xxx_vtu_stu_entry *entry)
{
	struct mv88e6xxx_vtu_stu_entry next = { 0 };
	int ret;

1673
	ret = _mv88e6xxx_vtu_wait(ps);
1674 1675 1676
	if (ret < 0)
		return ret;

1677
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
1678 1679 1680 1681
				   sid & GLOBAL_VTU_SID_MASK);
	if (ret < 0)
		return ret;

1682
	ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
1683 1684 1685
	if (ret < 0)
		return ret;

1686
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
1687 1688 1689 1690 1691
	if (ret < 0)
		return ret;

	next.sid = ret & GLOBAL_VTU_SID_MASK;

1692
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
1693 1694 1695 1696 1697 1698
	if (ret < 0)
		return ret;

	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);

	if (next.valid) {
1699
		ret = _mv88e6xxx_vtu_stu_data_read(ps, &next, 2);
1700 1701 1702 1703 1704 1705 1706 1707
		if (ret < 0)
			return ret;
	}

	*entry = next;
	return 0;
}

1708
static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
1709 1710 1711 1712 1713
				    struct mv88e6xxx_vtu_stu_entry *entry)
{
	u16 reg = 0;
	int ret;

1714
	ret = _mv88e6xxx_vtu_wait(ps);
1715 1716 1717 1718 1719 1720 1721
	if (ret < 0)
		return ret;

	if (!entry->valid)
		goto loadpurge;

	/* Write port states */
1722
	ret = _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
1723 1724 1725 1726 1727
	if (ret < 0)
		return ret;

	reg = GLOBAL_VTU_VID_VALID;
loadpurge:
1728
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1729 1730 1731 1732
	if (ret < 0)
		return ret;

	reg = entry->sid & GLOBAL_VTU_SID_MASK;
1733
	ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1734 1735 1736
	if (ret < 0)
		return ret;

1737
	return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1738 1739
}

1740 1741
static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
			       u16 *new, u16 *old)
1742
{
1743
	struct dsa_switch *ds = ps->ds;
1744
	u16 upper_mask;
1745 1746 1747
	u16 fid;
	int ret;

1748
	if (mv88e6xxx_num_databases(ps) == 4096)
1749
		upper_mask = 0xff;
1750
	else if (mv88e6xxx_num_databases(ps) == 256)
1751
		upper_mask = 0xf;
1752 1753 1754
	else
		return -EOPNOTSUPP;

1755
	/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
1756
	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
1757 1758 1759 1760 1761 1762 1763 1764 1765
	if (ret < 0)
		return ret;

	fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;

	if (new) {
		ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
		ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;

1766
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
1767 1768 1769 1770 1771 1772
					   ret);
		if (ret < 0)
			return ret;
	}

	/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
1773
	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
1774 1775 1776
	if (ret < 0)
		return ret;

1777
	fid |= (ret & upper_mask) << 4;
1778 1779

	if (new) {
1780 1781
		ret &= ~upper_mask;
		ret |= (*new >> 4) & upper_mask;
1782

1783
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
					   ret);
		if (ret < 0)
			return ret;

		netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
	}

	if (old)
		*old = fid;

	return 0;
}

1797 1798
static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
				   int port, u16 *fid)
1799
{
1800
	return _mv88e6xxx_port_fid(ps, port, NULL, fid);
1801 1802
}

1803 1804
static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
				   int port, u16 fid)
1805
{
1806
	return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
1807 1808
}

1809
static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
1810 1811 1812
{
	DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
	struct mv88e6xxx_vtu_stu_entry vlan;
1813
	int i, err;
1814 1815 1816

	bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);

1817
	/* Set every FID bit used by the (un)bridged ports */
1818
	for (i = 0; i < ps->info->num_ports; ++i) {
1819
		err = _mv88e6xxx_port_fid_get(ps, i, fid);
1820 1821 1822 1823 1824 1825
		if (err)
			return err;

		set_bit(*fid, fid_bitmap);
	}

1826
	/* Set every FID bit used by the VLAN entries */
1827
	err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
1828 1829 1830 1831
	if (err)
		return err;

	do {
1832
		err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
		if (err)
			return err;

		if (!vlan.valid)
			break;

		set_bit(vlan.fid, fid_bitmap);
	} while (vlan.vid < GLOBAL_VTU_VID_MASK);

	/* The reset value 0x000 is used to indicate that multiple address
	 * databases are not needed. Return the next positive available.
	 */
	*fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
1846
	if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
1847 1848 1849
		return -ENOSPC;

	/* Clear the database */
1850
	return _mv88e6xxx_atu_flush(ps, *fid, true);
1851 1852
}

1853
static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
1854
			      struct mv88e6xxx_vtu_stu_entry *entry)
1855
{
1856
	struct dsa_switch *ds = ps->ds;
1857 1858 1859 1860
	struct mv88e6xxx_vtu_stu_entry vlan = {
		.valid = true,
		.vid = vid,
	};
1861 1862
	int i, err;

1863
	err = _mv88e6xxx_fid_new(ps, &vlan.fid);
1864 1865
	if (err)
		return err;
1866

1867
	/* exclude all ports except the CPU and DSA ports */
1868
	for (i = 0; i < ps->info->num_ports; ++i)
1869 1870 1871
		vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
			? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
			: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1872

1873 1874
	if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
	    mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
1875 1876 1877 1878 1879 1880 1881
		struct mv88e6xxx_vtu_stu_entry vstp;

		/* Adding a VTU entry requires a valid STU entry. As VSTP is not
		 * implemented, only one STU entry is needed to cover all VTU
		 * entries. Thus, validate the SID 0.
		 */
		vlan.sid = 0;
1882
		err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
1883 1884 1885 1886 1887 1888 1889 1890
		if (err)
			return err;

		if (vstp.sid != vlan.sid || !vstp.valid) {
			memset(&vstp, 0, sizeof(vstp));
			vstp.valid = true;
			vstp.sid = vlan.sid;

1891
			err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
1892 1893 1894 1895 1896 1897 1898 1899 1900
			if (err)
				return err;
		}
	}

	*entry = vlan;
	return 0;
}

1901
static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
1902 1903 1904 1905 1906 1907 1908
			      struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
{
	int err;

	if (!vid)
		return -EINVAL;

1909
	err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
1910 1911 1912
	if (err)
		return err;

1913
	err = _mv88e6xxx_vtu_getnext(ps, entry);
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
	if (err)
		return err;

	if (entry->vid != vid || !entry->valid) {
		if (!creat)
			return -EOPNOTSUPP;
		/* -ENOENT would've been more appropriate, but switchdev expects
		 * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
		 */

1924
		err = _mv88e6xxx_vtu_new(ps, vid, entry);
1925 1926 1927 1928 1929
	}

	return err;
}

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
					u16 vid_begin, u16 vid_end)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	struct mv88e6xxx_vtu_stu_entry vlan;
	int i, err;

	if (!vid_begin)
		return -EOPNOTSUPP;

	mutex_lock(&ps->smi_mutex);

1942
	err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
1943 1944 1945 1946
	if (err)
		goto unlock;

	do {
1947
		err = _mv88e6xxx_vtu_getnext(ps, &vlan);
1948 1949 1950 1951 1952 1953 1954 1955 1956
		if (err)
			goto unlock;

		if (!vlan.valid)
			break;

		if (vlan.vid > vid_end)
			break;

1957
		for (i = 0; i < ps->info->num_ports; ++i) {
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
			if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
				continue;

			if (vlan.data[i] ==
			    GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
				continue;

			if (ps->ports[i].bridge_dev ==
			    ps->ports[port].bridge_dev)
				break; /* same bridge, check next VLAN */

			netdev_warn(ds->ports[port],
				    "hardware VLAN %d already used by %s\n",
				    vlan.vid,
				    netdev_name(ps->ports[i].bridge_dev));
			err = -EOPNOTSUPP;
			goto unlock;
		}
	} while (vlan.vid < vid_end);

unlock:
	mutex_unlock(&ps->smi_mutex);

	return err;
}

1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
static const char * const mv88e6xxx_port_8021q_mode_names[] = {
	[PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
	[PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
	[PORT_CONTROL_2_8021Q_CHECK] = "Check",
	[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
};

int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
				  bool vlan_filtering)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
		PORT_CONTROL_2_8021Q_DISABLED;
	int ret;

	mutex_lock(&ps->smi_mutex);

2001
	ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
2002 2003 2004 2005 2006
	if (ret < 0)
		goto unlock;

	old = ret & PORT_CONTROL_2_8021Q_MASK;

2007 2008 2009
	if (new != old) {
		ret &= ~PORT_CONTROL_2_8021Q_MASK;
		ret |= new & PORT_CONTROL_2_8021Q_MASK;
2010

2011
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
2012 2013 2014 2015 2016 2017 2018 2019
					   ret);
		if (ret < 0)
			goto unlock;

		netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
			   mv88e6xxx_port_8021q_mode_names[new],
			   mv88e6xxx_port_8021q_mode_names[old]);
	}
2020

2021
	ret = 0;
2022 2023 2024 2025 2026 2027
unlock:
	mutex_unlock(&ps->smi_mutex);

	return ret;
}

2028 2029 2030 2031
int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
				const struct switchdev_obj_port_vlan *vlan,
				struct switchdev_trans *trans)
{
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
	int err;

	/* If the requested port doesn't belong to the same bridge as the VLAN
	 * members, do not support it (yet) and fallback to software VLAN.
	 */
	err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
					   vlan->vid_end);
	if (err)
		return err;

2042 2043 2044 2045 2046 2047
	/* We don't need any dynamic resource from the kernel (yet),
	 * so skip the prepare phase.
	 */
	return 0;
}

2048 2049
static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
				    u16 vid, bool untagged)
2050 2051 2052 2053
{
	struct mv88e6xxx_vtu_stu_entry vlan;
	int err;

2054
	err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
2055
	if (err)
2056
		return err;
2057 2058 2059 2060 2061

	vlan.data[port] = untagged ?
		GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
		GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;

2062
	return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2063 2064
}

2065 2066 2067
void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
			     const struct switchdev_obj_port_vlan *vlan,
			     struct switchdev_trans *trans)
2068 2069 2070 2071 2072 2073 2074 2075
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
	u16 vid;

	mutex_lock(&ps->smi_mutex);

2076
	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
2077
		if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
2078 2079
			netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
				   vid, untagged ? 'u' : 't');
2080

2081
	if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
2082 2083
		netdev_err(ds->ports[port], "failed to set PVID %d\n",
			   vlan->vid_end);
2084

2085
	mutex_unlock(&ps->smi_mutex);
2086 2087
}

2088 2089
static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
				    int port, u16 vid)
2090
{
2091
	struct dsa_switch *ds = ps->ds;
2092 2093 2094
	struct mv88e6xxx_vtu_stu_entry vlan;
	int i, err;

2095
	err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2096
	if (err)
2097
		return err;
2098

2099 2100
	/* Tell switchdev if this VLAN is handled in software */
	if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
2101
		return -EOPNOTSUPP;
2102 2103 2104 2105

	vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;

	/* keep the VLAN unless all ports are excluded */
2106
	vlan.valid = false;
2107
	for (i = 0; i < ps->info->num_ports; ++i) {
2108
		if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2109 2110 2111
			continue;

		if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
2112
			vlan.valid = true;
2113 2114 2115 2116
			break;
		}
	}

2117
	err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
2118 2119 2120
	if (err)
		return err;

2121
	return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
}

int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
			    const struct switchdev_obj_port_vlan *vlan)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	u16 pvid, vid;
	int err = 0;

	mutex_lock(&ps->smi_mutex);

2133
	err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
2134 2135 2136
	if (err)
		goto unlock;

2137
	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
2138
		err = _mv88e6xxx_port_vlan_del(ps, port, vid);
2139 2140 2141 2142
		if (err)
			goto unlock;

		if (vid == pvid) {
2143
			err = _mv88e6xxx_port_pvid_set(ps, port, 0);
2144 2145 2146 2147 2148
			if (err)
				goto unlock;
		}
	}

2149 2150 2151 2152 2153 2154
unlock:
	mutex_unlock(&ps->smi_mutex);

	return err;
}

2155
static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
2156
				    const unsigned char *addr)
2157 2158 2159 2160
{
	int i, ret;

	for (i = 0; i < 3; i++) {
2161
		ret = _mv88e6xxx_reg_write(
2162
			ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
2163
			(addr[i * 2] << 8) | addr[i * 2 + 1]);
2164 2165 2166 2167 2168 2169 2170
		if (ret < 0)
			return ret;
	}

	return 0;
}

2171 2172
static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
				   unsigned char *addr)
2173 2174 2175 2176
{
	int i, ret;

	for (i = 0; i < 3; i++) {
2177
		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
2178
					  GLOBAL_ATU_MAC_01 + i);
2179 2180 2181 2182 2183 2184 2185 2186 2187
		if (ret < 0)
			return ret;
		addr[i * 2] = ret >> 8;
		addr[i * 2 + 1] = ret & 0xff;
	}

	return 0;
}

2188
static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
2189
			       struct mv88e6xxx_atu_entry *entry)
2190
{
2191 2192
	int ret;

2193
	ret = _mv88e6xxx_atu_wait(ps);
2194 2195 2196
	if (ret < 0)
		return ret;

2197
	ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
2198 2199 2200
	if (ret < 0)
		return ret;

2201
	ret = _mv88e6xxx_atu_data_write(ps, entry);
2202
	if (ret < 0)
2203 2204
		return ret;

2205
	return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
2206
}
2207

2208
static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
2209 2210 2211 2212
				    const unsigned char *addr, u16 vid,
				    u8 state)
{
	struct mv88e6xxx_atu_entry entry = { 0 };
2213 2214 2215
	struct mv88e6xxx_vtu_stu_entry vlan;
	int err;

2216 2217
	/* Null VLAN ID corresponds to the port private database */
	if (vid == 0)
2218
		err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
2219
	else
2220
		err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
2221 2222
	if (err)
		return err;
2223

2224
	entry.fid = vlan.fid;
2225 2226 2227 2228 2229 2230 2231
	entry.state = state;
	ether_addr_copy(entry.mac, addr);
	if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
		entry.trunk = false;
		entry.portv_trunkid = BIT(port);
	}

2232
	return _mv88e6xxx_atu_load(ps, &entry);
2233 2234
}

V
Vivien Didelot 已提交
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244
int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
			       const struct switchdev_obj_port_fdb *fdb,
			       struct switchdev_trans *trans)
{
	/* We don't need any dynamic resource from the kernel (yet),
	 * so skip the prepare phase.
	 */
	return 0;
}

2245 2246 2247
void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
			    const struct switchdev_obj_port_fdb *fdb,
			    struct switchdev_trans *trans)
2248
{
2249
	int state = is_multicast_ether_addr(fdb->addr) ?
2250 2251
		GLOBAL_ATU_DATA_STATE_MC_STATIC :
		GLOBAL_ATU_DATA_STATE_UC_STATIC;
2252
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2253 2254

	mutex_lock(&ps->smi_mutex);
2255
	if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
2256
		netdev_err(ds->ports[port], "failed to load MAC address\n");
2257 2258 2259
	mutex_unlock(&ps->smi_mutex);
}

2260
int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
2261
			   const struct switchdev_obj_port_fdb *fdb)
2262 2263 2264 2265 2266
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;

	mutex_lock(&ps->smi_mutex);
2267
	ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
2268
				       GLOBAL_ATU_DATA_STATE_UNUSED);
2269 2270 2271 2272 2273
	mutex_unlock(&ps->smi_mutex);

	return ret;
}

2274
static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
2275
				  struct mv88e6xxx_atu_entry *entry)
2276
{
2277 2278 2279 2280
	struct mv88e6xxx_atu_entry next = { 0 };
	int ret;

	next.fid = fid;
2281

2282
	ret = _mv88e6xxx_atu_wait(ps);
2283 2284
	if (ret < 0)
		return ret;
2285

2286
	ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
2287 2288
	if (ret < 0)
		return ret;
2289

2290
	ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
2291 2292
	if (ret < 0)
		return ret;
2293

2294
	ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
2295 2296
	if (ret < 0)
		return ret;
2297

2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
	next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
	if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
		unsigned int mask, shift;

		if (ret & GLOBAL_ATU_DATA_TRUNK) {
			next.trunk = true;
			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
		} else {
			next.trunk = false;
			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
		}

		next.portv_trunkid = (ret & mask) >> shift;
	}
2314

2315
	*entry = next;
2316 2317 2318
	return 0;
}

2319 2320
static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
					u16 fid, u16 vid, int port,
2321 2322 2323 2324 2325 2326 2327 2328
					struct switchdev_obj_port_fdb *fdb,
					int (*cb)(struct switchdev_obj *obj))
{
	struct mv88e6xxx_atu_entry addr = {
		.mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
	};
	int err;

2329
	err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
2330 2331 2332 2333
	if (err)
		return err;

	do {
2334
		err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
		if (err)
			break;

		if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
			break;

		if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
			bool is_static = addr.state ==
				(is_multicast_ether_addr(addr.mac) ?
				 GLOBAL_ATU_DATA_STATE_MC_STATIC :
				 GLOBAL_ATU_DATA_STATE_UC_STATIC);

			fdb->vid = vid;
			ether_addr_copy(fdb->addr, addr.mac);
			fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;

			err = cb(&fdb->obj);
			if (err)
				break;
		}
	} while (!is_broadcast_ether_addr(addr.mac));

	return err;
}

2360 2361 2362 2363 2364 2365 2366 2367
int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
			    struct switchdev_obj_port_fdb *fdb,
			    int (*cb)(struct switchdev_obj *obj))
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	struct mv88e6xxx_vtu_stu_entry vlan = {
		.vid = GLOBAL_VTU_VID_MASK, /* all ones */
	};
2368
	u16 fid;
2369 2370 2371 2372
	int err;

	mutex_lock(&ps->smi_mutex);

2373
	/* Dump port's default Filtering Information Database (VLAN ID 0) */
2374
	err = _mv88e6xxx_port_fid_get(ps, port, &fid);
2375 2376 2377
	if (err)
		goto unlock;

2378
	err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
2379 2380 2381
	if (err)
		goto unlock;

2382
	/* Dump VLANs' Filtering Information Databases */
2383
	err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
2384 2385 2386 2387
	if (err)
		goto unlock;

	do {
2388
		err = _mv88e6xxx_vtu_getnext(ps, &vlan);
2389
		if (err)
2390
			break;
2391 2392 2393 2394

		if (!vlan.valid)
			break;

2395
		err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
2396
						   fdb, cb);
2397
		if (err)
2398
			break;
2399 2400 2401 2402 2403 2404 2405 2406
	} while (vlan.vid < GLOBAL_VTU_VID_MASK);

unlock:
	mutex_unlock(&ps->smi_mutex);

	return err;
}

2407 2408
int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
			       struct net_device *bridge)
2409
{
2410
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2411
	int i, err = 0;
2412 2413 2414

	mutex_lock(&ps->smi_mutex);

2415
	/* Assign the bridge and remap each port's VLANTable */
2416
	ps->ports[port].bridge_dev = bridge;
2417

2418
	for (i = 0; i < ps->info->num_ports; ++i) {
2419
		if (ps->ports[i].bridge_dev == bridge) {
2420
			err = _mv88e6xxx_port_based_vlan_map(ps, i);
2421 2422 2423 2424 2425
			if (err)
				break;
		}
	}

2426
	mutex_unlock(&ps->smi_mutex);
2427

2428
	return err;
2429 2430
}

2431
void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2432
{
2433
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2434
	struct net_device *bridge = ps->ports[port].bridge_dev;
2435
	int i;
2436 2437 2438

	mutex_lock(&ps->smi_mutex);

2439
	/* Unassign the bridge and remap each port's VLANTable */
2440
	ps->ports[port].bridge_dev = NULL;
2441

2442
	for (i = 0; i < ps->info->num_ports; ++i)
2443
		if (i == port || ps->ports[i].bridge_dev == bridge)
2444
			if (_mv88e6xxx_port_based_vlan_map(ps, i))
2445
				netdev_warn(ds->ports[i], "failed to remap\n");
2446

2447
	mutex_unlock(&ps->smi_mutex);
2448 2449
}

2450 2451 2452 2453 2454 2455 2456
static void mv88e6xxx_bridge_work(struct work_struct *work)
{
	struct mv88e6xxx_priv_state *ps;
	struct dsa_switch *ds;
	int port;

	ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
2457
	ds = ps->ds;
2458

2459 2460
	mutex_lock(&ps->smi_mutex);

2461
	for (port = 0; port < ps->info->num_ports; ++port)
2462
		if (test_and_clear_bit(port, ps->port_state_update_mask) &&
2463 2464 2465
		    _mv88e6xxx_port_state(ps, port, ps->ports[port].state))
			netdev_warn(ds->ports[port],
				    "failed to update state to %s\n",
2466 2467 2468
				    mv88e6xxx_port_state_names[ps->ports[port].state]);

	mutex_unlock(&ps->smi_mutex);
2469 2470
}

2471 2472
static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
				     int port, int page, int reg, int val)
2473 2474 2475
{
	int ret;

2476
	ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2477 2478 2479
	if (ret < 0)
		goto restore_page_0;

2480
	ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
2481
restore_page_0:
2482
	_mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2483 2484 2485 2486

	return ret;
}

2487 2488
static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
				    int port, int page, int reg)
2489 2490 2491
{
	int ret;

2492
	ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
2493 2494 2495
	if (ret < 0)
		goto restore_page_0;

2496
	ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
2497
restore_page_0:
2498
	_mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
2499 2500 2501 2502

	return ret;
}

2503
static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
2504 2505 2506
{
	int ret;

2507
	ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
2508 2509 2510 2511 2512 2513
				       MII_BMCR);
	if (ret < 0)
		return ret;

	if (ret & BMCR_PDOWN) {
		ret &= ~BMCR_PDOWN;
2514
		ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
2515 2516 2517 2518 2519 2520 2521
						PAGE_FIBER_SERDES, MII_BMCR,
						ret);
	}

	return ret;
}

2522
static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2523 2524
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2525
	int ret;
2526
	u16 reg;
2527 2528 2529

	mutex_lock(&ps->smi_mutex);

2530 2531 2532 2533
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
	    mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
2534 2535 2536 2537 2538 2539
		/* MAC Forcing register: don't force link, speed,
		 * duplex or flow control state to any particular
		 * values on physical ports, but force the CPU port
		 * and all DSA ports to their maximum bandwidth and
		 * full duplex.
		 */
2540
		reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
2541
		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2542
			reg &= ~PORT_PCS_CTRL_UNFORCED;
2543 2544 2545 2546
			reg |= PORT_PCS_CTRL_FORCE_LINK |
				PORT_PCS_CTRL_LINK_UP |
				PORT_PCS_CTRL_DUPLEX_FULL |
				PORT_PCS_CTRL_FORCE_DUPLEX;
2547
			if (mv88e6xxx_6065_family(ps))
2548 2549 2550 2551 2552 2553 2554
				reg |= PORT_PCS_CTRL_100;
			else
				reg |= PORT_PCS_CTRL_1000;
		} else {
			reg |= PORT_PCS_CTRL_UNFORCED;
		}

2555
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
					   PORT_PCS_CTRL, reg);
		if (ret)
			goto abort;
	}

	/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
	 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
	 * tunneling, determine priority by looking at 802.1p and IP
	 * priority fields (IP prio has precedence), and set STP state
	 * to Forwarding.
	 *
	 * If this is the CPU link, use DSA or EDSA tagging depending
	 * on which tagging mode was configured.
	 *
	 * If this is a link to another switch, use DSA tagging mode.
	 *
	 * If this is the upstream port for this switch, enable
	 * forwarding of unknown unicasts and multicasts.
	 */
	reg = 0;
2576 2577 2578 2579
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
2580 2581 2582 2583
		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
		PORT_CONTROL_STATE_FORWARDING;
	if (dsa_is_cpu_port(ds, port)) {
2584
		if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2585
			reg |= PORT_CONTROL_DSA_TAG;
2586 2587 2588
		if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
		    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
		    mv88e6xxx_6320_family(ps)) {
2589 2590 2591 2592
			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
			else
				reg |= PORT_CONTROL_FRAME_MODE_DSA;
2593 2594
			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
				PORT_CONTROL_FORWARD_UNKNOWN_MC;
2595 2596
		}

2597 2598 2599 2600
		if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
		    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
		    mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
		    mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
2601 2602 2603 2604
			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
		}
	}
2605
	if (dsa_is_dsa_port(ds, port)) {
2606
		if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
2607
			reg |= PORT_CONTROL_DSA_TAG;
2608 2609 2610
		if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
		    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
		    mv88e6xxx_6320_family(ps)) {
2611
			reg |= PORT_CONTROL_FRAME_MODE_DSA;
2612 2613
		}

2614 2615 2616 2617 2618
		if (port == dsa_upstream_port(ds))
			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
				PORT_CONTROL_FORWARD_UNKNOWN_MC;
	}
	if (reg) {
2619
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2620 2621 2622 2623 2624
					   PORT_CONTROL, reg);
		if (ret)
			goto abort;
	}

2625 2626 2627
	/* If this port is connected to a SerDes, make sure the SerDes is not
	 * powered down.
	 */
2628 2629
	if (mv88e6xxx_6352_family(ps)) {
		ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
2630 2631 2632 2633 2634 2635
		if (ret < 0)
			goto abort;
		ret &= PORT_STATUS_CMODE_MASK;
		if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
		    (ret == PORT_STATUS_CMODE_1000BASE_X) ||
		    (ret == PORT_STATUS_CMODE_SGMII)) {
2636
			ret = mv88e6xxx_power_on_serdes(ps);
2637 2638 2639 2640 2641
			if (ret < 0)
				goto abort;
		}
	}

2642
	/* Port Control 2: don't force a good FCS, set the maximum frame size to
2643
	 * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
2644 2645 2646
	 * untagged frames on this port, do a destination address lookup on all
	 * received packets as usual, disable ARP mirroring and don't send a
	 * copy of all transmitted/received frames on this port to the CPU.
2647 2648
	 */
	reg = 0;
2649 2650 2651 2652
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
	    mv88e6xxx_6185_family(ps))
2653 2654
		reg = PORT_CONTROL_2_MAP_DA;

2655 2656
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
2657 2658
		reg |= PORT_CONTROL_2_JUMBO_10240;

2659
	if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
2660 2661 2662 2663 2664 2665 2666 2667 2668
		/* Set the upstream port this port should use */
		reg |= dsa_upstream_port(ds);
		/* enable forwarding of unknown multicast addresses to
		 * the upstream port
		 */
		if (port == dsa_upstream_port(ds))
			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
	}

2669
	reg |= PORT_CONTROL_2_8021Q_DISABLED;
2670

2671
	if (reg) {
2672
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
					   PORT_CONTROL_2, reg);
		if (ret)
			goto abort;
	}

	/* Port Association Vector: when learning source addresses
	 * of packets, add the address to the address database using
	 * a port bitmap that has only the bit for this port set and
	 * the other bits clear.
	 */
2683
	reg = 1 << port;
2684 2685
	/* Disable learning for CPU port */
	if (dsa_is_cpu_port(ds, port))
2686
		reg = 0;
2687

2688
	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2689 2690 2691 2692
	if (ret)
		goto abort;

	/* Egress rate control 2: disable egress rate control. */
2693
	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
2694 2695 2696 2697
				   0x0000);
	if (ret)
		goto abort;

2698 2699 2700
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6320_family(ps)) {
2701 2702 2703 2704
		/* Do not limit the period of time that this port can
		 * be paused for by the remote end or the period of
		 * time that this port can pause the remote end.
		 */
2705
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2706 2707 2708 2709 2710 2711 2712 2713
					   PORT_PAUSE_CTRL, 0x0000);
		if (ret)
			goto abort;

		/* Port ATU control: disable limiting the number of
		 * address database entries that this port is allowed
		 * to use.
		 */
2714
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2715 2716 2717 2718
					   PORT_ATU_CONTROL, 0x0000);
		/* Priority Override: disable DA, SA and VTU priority
		 * override.
		 */
2719
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2720 2721 2722 2723 2724 2725 2726
					   PORT_PRI_OVERRIDE, 0x0000);
		if (ret)
			goto abort;

		/* Port Ethertype: use the Ethertype DSA Ethertype
		 * value.
		 */
2727
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2728 2729 2730 2731 2732 2733
					   PORT_ETH_TYPE, ETH_P_EDSA);
		if (ret)
			goto abort;
		/* Tag Remap: use an identity 802.1p prio -> switch
		 * prio mapping.
		 */
2734
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2735 2736 2737 2738 2739 2740 2741
					   PORT_TAG_REGMAP_0123, 0x3210);
		if (ret)
			goto abort;

		/* Tag Remap 2: use an identity 802.1p prio -> switch
		 * prio mapping.
		 */
2742
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2743 2744 2745 2746 2747
					   PORT_TAG_REGMAP_4567, 0x7654);
		if (ret)
			goto abort;
	}

2748 2749 2750 2751
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
	    mv88e6xxx_6320_family(ps)) {
2752
		/* Rate Control: disable ingress rate limiting. */
2753
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
2754 2755 2756 2757 2758
					   PORT_RATE_CONTROL, 0x0001);
		if (ret)
			goto abort;
	}

2759 2760
	/* Port Control 1: disable trunking, disable sending
	 * learning messages to this port.
2761
	 */
2762
	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
2763 2764 2765
	if (ret)
		goto abort;

2766
	/* Port based VLAN map: give each port the same default address
2767 2768
	 * database, and allow bidirectional communication between the
	 * CPU and DSA port(s), and the other ports.
2769
	 */
2770
	ret = _mv88e6xxx_port_fid_set(ps, port, 0);
2771 2772 2773
	if (ret)
		goto abort;

2774
	ret = _mv88e6xxx_port_based_vlan_map(ps, port);
2775 2776 2777 2778 2779 2780
	if (ret)
		goto abort;

	/* Default VLAN ID and priority: don't set a default VLAN
	 * ID, and set the default packet priority to zero.
	 */
2781
	ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
2782
				   0x0000);
2783 2784 2785 2786 2787
abort:
	mutex_unlock(&ps->smi_mutex);
	return ret;
}

2788 2789 2790 2791 2792 2793
int mv88e6xxx_setup_ports(struct dsa_switch *ds)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;
	int i;

2794
	for (i = 0; i < ps->info->num_ports; i++) {
2795 2796 2797 2798 2799 2800 2801
		ret = mv88e6xxx_setup_port(ds, i);
		if (ret < 0)
			return ret;
	}
	return 0;
}

2802
int mv88e6xxx_setup_common(struct mv88e6xxx_priv_state *ps)
2803 2804 2805
{
	mutex_init(&ps->smi_mutex);

2806 2807
	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);

2808 2809 2810
	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
		mutex_init(&ps->eeprom_mutex);

2811 2812 2813
	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
		mv88e6xxx_ppu_state_init(ps);

2814 2815 2816
	return 0;
}

2817 2818 2819
int mv88e6xxx_setup_global(struct dsa_switch *ds)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2820
	int err;
2821 2822
	int i;

2823
	mutex_lock(&ps->smi_mutex);
2824 2825 2826 2827
	/* Set the default address aging time to 5 minutes, and
	 * enable address learn messages to be sent to all message
	 * ports.
	 */
2828
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
2829 2830 2831
				   0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
	if (err)
		goto unlock;
2832 2833

	/* Configure the IP ToS mapping registers. */
2834
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2835 2836
	if (err)
		goto unlock;
2837
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2838 2839
	if (err)
		goto unlock;
2840
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2841 2842
	if (err)
		goto unlock;
2843
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2844 2845
	if (err)
		goto unlock;
2846
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2847 2848
	if (err)
		goto unlock;
2849
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2850 2851
	if (err)
		goto unlock;
2852
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2853 2854
	if (err)
		goto unlock;
2855
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2856 2857
	if (err)
		goto unlock;
2858 2859

	/* Configure the IEEE 802.1p priority mapping register. */
2860
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2861 2862
	if (err)
		goto unlock;
2863 2864 2865 2866

	/* Send all frames with destination addresses matching
	 * 01:80:c2:00:00:0x to the CPU port.
	 */
2867
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2868 2869
	if (err)
		goto unlock;
2870 2871 2872 2873 2874 2875

	/* Ignore removed tag data on doubly tagged packets, disable
	 * flow control messages, force flow control priority to the
	 * highest, and send all special multicast frames to the CPU
	 * port at the highest priority.
	 */
2876
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2877 2878 2879 2880
				   0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
				   GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
	if (err)
		goto unlock;
2881 2882 2883 2884 2885 2886 2887 2888 2889

	/* Program the DSA routing table. */
	for (i = 0; i < 32; i++) {
		int nexthop = 0x1f;

		if (ds->pd->rtable &&
		    i != ds->index && i < ds->dst->pd->nr_chips)
			nexthop = ds->pd->rtable[i] & 0x1f;

2890
		err = _mv88e6xxx_reg_write(
2891
			ps, REG_GLOBAL2,
2892 2893 2894 2895 2896
			GLOBAL2_DEVICE_MAPPING,
			GLOBAL2_DEVICE_MAPPING_UPDATE |
			(i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
		if (err)
			goto unlock;
2897 2898 2899
	}

	/* Clear all trunk masks. */
2900
	for (i = 0; i < 8; i++) {
2901
		err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2902 2903
					   0x8000 |
					   (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2904
					   ((1 << ps->info->num_ports) - 1));
2905 2906 2907
		if (err)
			goto unlock;
	}
2908 2909

	/* Clear all trunk mappings. */
2910 2911
	for (i = 0; i < 16; i++) {
		err = _mv88e6xxx_reg_write(
2912
			ps, REG_GLOBAL2,
2913 2914 2915 2916 2917 2918
			GLOBAL2_TRUNK_MAPPING,
			GLOBAL2_TRUNK_MAPPING_UPDATE |
			(i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
		if (err)
			goto unlock;
	}
2919

2920 2921 2922
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6320_family(ps)) {
2923 2924 2925
		/* Send all frames with destination addresses matching
		 * 01:80:c2:00:00:2x to the CPU port.
		 */
2926
		err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2927 2928 2929
					   GLOBAL2_MGMT_EN_2X, 0xffff);
		if (err)
			goto unlock;
2930 2931 2932 2933

		/* Initialise cross-chip port VLAN table to reset
		 * defaults.
		 */
2934
		err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2935 2936 2937
					   GLOBAL2_PVT_ADDR, 0x9000);
		if (err)
			goto unlock;
2938 2939

		/* Clear the priority override table. */
2940
		for (i = 0; i < 16; i++) {
2941
			err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2942 2943 2944 2945 2946
						   GLOBAL2_PRIO_OVERRIDE,
						   0x8000 | (i << 8));
			if (err)
				goto unlock;
		}
2947 2948
	}

2949 2950 2951 2952
	if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
	    mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
	    mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
	    mv88e6xxx_6320_family(ps)) {
2953 2954 2955 2956
		/* Disable ingress rate limiting by resetting all
		 * ingress rate limit registers to their initial
		 * state.
		 */
2957
		for (i = 0; i < ps->info->num_ports; i++) {
2958
			err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
2959 2960 2961 2962 2963
						   GLOBAL2_INGRESS_OP,
						   0x9000 | (i << 8));
			if (err)
				goto unlock;
		}
2964 2965
	}

2966
	/* Clear the statistics counters for all ports */
2967
	err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
2968 2969 2970
				   GLOBAL_STATS_OP_FLUSH_ALL);
	if (err)
		goto unlock;
2971 2972

	/* Wait for the flush to complete. */
2973
	err = _mv88e6xxx_stats_wait(ps);
2974
	if (err < 0)
2975 2976
		goto unlock;

2977
	/* Clear all ATU entries */
2978
	err = _mv88e6xxx_atu_flush(ps, 0, true);
2979
	if (err < 0)
2980 2981
		goto unlock;

2982
	/* Clear all the VTU and STU entries */
2983
	err = _mv88e6xxx_vtu_stu_flush(ps);
2984
unlock:
2985
	mutex_unlock(&ps->smi_mutex);
2986

2987
	return err;
2988 2989
}

2990
int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps, bool ppu_active)
2991 2992
{
	u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2993
	struct gpio_desc *gpiod = ps->ds->pd->reset;
2994 2995 2996 2997
	unsigned long timeout;
	int ret;
	int i;

2998 2999
	mutex_lock(&ps->smi_mutex);

3000
	/* Set all ports to the disabled state. */
3001
	for (i = 0; i < ps->info->num_ports; i++) {
3002
		ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
3003 3004 3005
		if (ret < 0)
			goto unlock;

3006
		ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
3007 3008 3009
					   ret & 0xfffc);
		if (ret)
			goto unlock;
3010 3011 3012 3013 3014
	}

	/* Wait for transmit queues to drain. */
	usleep_range(2000, 4000);

3015 3016 3017 3018 3019 3020 3021 3022
	/* If there is a gpio connected to the reset pin, toggle it */
	if (gpiod) {
		gpiod_set_value_cansleep(gpiod, 1);
		usleep_range(10000, 20000);
		gpiod_set_value_cansleep(gpiod, 0);
		usleep_range(10000, 20000);
	}

3023 3024 3025 3026 3027
	/* Reset the switch. Keep the PPU active if requested. The PPU
	 * needs to be active to support indirect phy register access
	 * through global registers 0x18 and 0x19.
	 */
	if (ppu_active)
3028
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
3029
	else
3030
		ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
3031 3032
	if (ret)
		goto unlock;
3033 3034 3035 3036

	/* Wait up to one second for reset to complete. */
	timeout = jiffies + 1 * HZ;
	while (time_before(jiffies, timeout)) {
3037
		ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
3038 3039 3040
		if (ret < 0)
			goto unlock;

3041 3042 3043 3044 3045
		if ((ret & is_reset) == is_reset)
			break;
		usleep_range(1000, 2000);
	}
	if (time_after(jiffies, timeout))
3046 3047 3048 3049 3050
		ret = -ETIMEDOUT;
	else
		ret = 0;
unlock:
	mutex_unlock(&ps->smi_mutex);
3051

3052
	return ret;
3053 3054
}

3055 3056 3057 3058 3059
int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;

3060
	mutex_lock(&ps->smi_mutex);
3061
	ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
3062
	mutex_unlock(&ps->smi_mutex);
3063

3064 3065 3066 3067 3068 3069 3070 3071 3072
	return ret;
}

int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
			     int reg, int val)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;

3073
	mutex_lock(&ps->smi_mutex);
3074
	ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
3075
	mutex_unlock(&ps->smi_mutex);
3076

3077 3078 3079
	return ret;
}

3080 3081
static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
				      int port)
3082
{
3083
	if (port >= 0 && port < ps->info->num_ports)
3084 3085 3086 3087 3088 3089 3090 3091
		return port;
	return -EINVAL;
}

int
mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3092
	int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3093 3094 3095
	int ret;

	if (addr < 0)
3096
		return 0xffff;
3097

3098
	mutex_lock(&ps->smi_mutex);
3099 3100 3101

	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
		ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
3102 3103
	else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
		ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
3104 3105 3106
	else
		ret = _mv88e6xxx_phy_read(ps, addr, regnum);

3107
	mutex_unlock(&ps->smi_mutex);
3108 3109 3110 3111 3112 3113 3114
	return ret;
}

int
mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
3115
	int addr = mv88e6xxx_port_to_phy_addr(ps, port);
3116 3117 3118
	int ret;

	if (addr < 0)
3119
		return 0xffff;
3120

3121
	mutex_lock(&ps->smi_mutex);
3122 3123 3124

	if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
		ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
3125 3126
	else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
		ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
3127 3128 3129
	else
		ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);

3130
	mutex_unlock(&ps->smi_mutex);
3131 3132 3133
	return ret;
}

3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
#ifdef CONFIG_NET_DSA_HWMON

static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
{
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int ret;
	int val;

	*temp = 0;

	mutex_lock(&ps->smi_mutex);

3146
	ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
3147 3148 3149 3150
	if (ret < 0)
		goto error;

	/* Enable temperature sensor */
3151
	ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3152 3153 3154
	if (ret < 0)
		goto error;

3155
	ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
3156 3157 3158 3159 3160 3161
	if (ret < 0)
		goto error;

	/* Wait for temperature to stabilize */
	usleep_range(10000, 12000);

3162
	val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
3163 3164 3165 3166 3167 3168
	if (val < 0) {
		ret = val;
		goto error;
	}

	/* Disable temperature sensor */
3169
	ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
3170 3171 3172 3173 3174 3175
	if (ret < 0)
		goto error;

	*temp = ((val & 0x1f) - 5) * 5;

error:
3176
	_mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
3177 3178 3179 3180 3181 3182
	mutex_unlock(&ps->smi_mutex);
	return ret;
}

static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
{
3183 3184
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199
	int ret;

	*temp = 0;

	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
	if (ret < 0)
		return ret;

	*temp = (ret & 0xff) - 25;

	return 0;
}

int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
{
3200 3201 3202
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);

	if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
3203 3204 3205 3206 3207 3208 3209
		return mv88e63xx_get_temp(ds, temp);

	return mv88e61xx_get_temp(ds, temp);
}

int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
{
3210 3211
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3212 3213
	int ret;

3214
	if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps))
3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
		return -EOPNOTSUPP;

	*temp = 0;

	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
	if (ret < 0)
		return ret;

	*temp = (((ret >> 8) & 0x1f) * 5) - 25;

	return 0;
}

int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
{
3230 3231
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3232 3233
	int ret;

3234
	if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps))
3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246
		return -EOPNOTSUPP;

	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
	if (ret < 0)
		return ret;
	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
	return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
					(ret & 0xe0ff) | (temp << 8));
}

int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
3247 3248
	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
	int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
3249 3250
	int ret;

3251
	if (!mv88e6xxx_6320_family(ps) && !mv88e6xxx_6352_family(ps))
3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
		return -EOPNOTSUPP;

	*alarm = false;

	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
	if (ret < 0)
		return ret;

	*alarm = !!(ret & 0x40);

	return 0;
}
#endif /* CONFIG_NET_DSA_HWMON */

3266 3267
static const struct mv88e6xxx_info *
mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
V
Vivien Didelot 已提交
3268
		      unsigned int num)
3269
{
3270
	int i;
3271 3272

	for (i = 0; i < num; ++i)
3273 3274
		if (table[i].prod_num == prod_num)
			return &table[i];
3275 3276 3277 3278

	return NULL;
}

V
Vivien Didelot 已提交
3279 3280
const char *mv88e6xxx_drv_probe(struct device *dsa_dev, struct device *host_dev,
				int sw_addr, void **priv,
3281
				const struct mv88e6xxx_info *table,
V
Vivien Didelot 已提交
3282
				unsigned int num)
3283
{
3284
	const struct mv88e6xxx_info *info;
3285
	struct mv88e6xxx_priv_state *ps;
3286
	struct mii_bus *bus;
V
Vivien Didelot 已提交
3287
	const char *name;
3288
	int id, prod_num, rev;
3289

3290
	bus = dsa_host_dev_to_mii_bus(host_dev);
3291 3292 3293
	if (!bus)
		return NULL;

3294 3295 3296 3297 3298 3299 3300
	id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
	if (id < 0)
		return NULL;

	prod_num = (id & 0xfff0) >> 4;
	rev = id & 0x000f;

3301 3302
	info = mv88e6xxx_lookup_info(prod_num, table, num);
	if (!info)
3303 3304
		return NULL;

3305 3306
	name = info->name;

3307 3308 3309 3310 3311 3312
	ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
	if (!ps)
		return NULL;

	ps->bus = bus;
	ps->sw_addr = sw_addr;
3313
	ps->info = info;
3314 3315 3316 3317 3318 3319

	*priv = ps;

	dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
		 prod_num, name, rev);

3320 3321 3322
	return name;
}

3323 3324 3325 3326 3327
static int __init mv88e6xxx_init(void)
{
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
	register_switch_driver(&mv88e6131_switch_driver);
#endif
3328 3329
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
	register_switch_driver(&mv88e6123_switch_driver);
3330
#endif
3331 3332 3333
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
	register_switch_driver(&mv88e6352_switch_driver);
#endif
3334 3335
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
	register_switch_driver(&mv88e6171_switch_driver);
3336 3337 3338 3339 3340 3341 3342
#endif
	return 0;
}
module_init(mv88e6xxx_init);

static void __exit mv88e6xxx_cleanup(void)
{
3343 3344 3345
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
	unregister_switch_driver(&mv88e6171_switch_driver);
#endif
3346 3347 3348
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
	unregister_switch_driver(&mv88e6352_switch_driver);
#endif
3349 3350
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
	unregister_switch_driver(&mv88e6123_switch_driver);
3351 3352 3353 3354 3355 3356
#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
	unregister_switch_driver(&mv88e6131_switch_driver);
#endif
}
module_exit(mv88e6xxx_cleanup);
3357 3358 3359 3360

MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
MODULE_LICENSE("GPL");