bcmsysport.c 76.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Broadcom BCM7xxx System Port Ethernet MAC driver
 *
 * Copyright (C) 2014 Broadcom Corporation
 */

#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
22
#include <net/dsa.h>
23
#include <linux/clk.h>
24 25 26 27 28 29 30 31 32
#include <net/ip.h>
#include <net/ipv6.h>

#include "bcmsysport.h"

/* I/O accessors register helpers */
#define BCM_SYSPORT_IO_MACRO(name, offset) \
static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\
{									\
33
	u32 reg = readl_relaxed(priv->base + offset + off);		\
34 35 36 37 38
	return reg;							\
}									\
static inline void name##_writel(struct bcm_sysport_priv *priv,		\
				  u32 val, u32 off)			\
{									\
39
	writel_relaxed(val, priv->base + offset + off);			\
40 41 42 43 44
}									\

BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
45
BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
46 47 48 49 50 51 52
BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);

53 54 55 56 57 58 59
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
 * same layout, except it has been moved by 4 bytes up, *sigh*
 */
static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
{
	if (priv->is_lite && off >= RDMA_STATUS)
		off += 4;
60
	return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
61 62 63 64 65 66
}

static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
{
	if (priv->is_lite && off >= RDMA_STATUS)
		off += 4;
67
	writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
68 69 70 71 72 73 74 75 76 77 78 79 80 81
}

static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
{
	if (!priv->is_lite) {
		return BIT(bit);
	} else {
		if (bit >= ACB_ALGO)
			return BIT(bit + 1);
		else
			return BIT(bit);
	}
}

82 83 84 85 86 87 88 89
/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
  */
#define BCM_SYSPORT_INTR_L2(which)	\
static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
						u32 mask)		\
{									\
	priv->irq##which##_mask &= ~(mask);				\
90
	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
}									\
static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
						u32 mask)		\
{									\
	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\
	priv->irq##which##_mask |= (mask);				\
}									\

BCM_SYSPORT_INTR_L2(0)
BCM_SYSPORT_INTR_L2(1)

/* Register accesses to GISB/RBUS registers are expensive (few hundred
 * nanoseconds), so keep the check for 64-bits explicit here to save
 * one register write per-packet on 32-bits platforms.
 */
static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
				     void __iomem *d,
				     dma_addr_t addr)
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
111
	writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
112
		     d + DESC_ADDR_HI_STATUS_LEN);
113
#endif
114
	writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
115 116 117
}

/* Ethtool operations */
118 119
static void bcm_sysport_set_rx_csum(struct net_device *dev,
				    netdev_features_t wanted)
120 121 122 123
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

124
	priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
125
	reg = rxchk_readl(priv, RXCHK_CONTROL);
126 127 128 129
	/* Clear L2 header checks, which would prevent BPDUs
	 * from being received.
	 */
	reg &= ~RXCHK_L2_HDR_DIS;
130
	if (priv->rx_chk_en)
131 132 133 134 135 136 137
		reg |= RXCHK_EN;
	else
		reg &= ~RXCHK_EN;

	/* If UniMAC forwards CRC, we need to skip over it to get
	 * a valid CHK bit to be set in the per-packet status word
	 */
138
	if (priv->rx_chk_en && priv->crc_fwd)
139 140 141 142
		reg |= RXCHK_SKIP_FCS;
	else
		reg &= ~RXCHK_SKIP_FCS;

143 144 145 146 147 148 149 150 151
	/* If Broadcom tags are enabled (e.g: using a switch), make
	 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
	 * tag after the Ethernet MAC Source Address.
	 */
	if (netdev_uses_dsa(dev))
		reg |= RXCHK_BRCM_TAG_EN;
	else
		reg &= ~RXCHK_BRCM_TAG_EN;

152 153 154
	rxchk_writel(priv, reg, RXCHK_CONTROL);
}

155 156
static void bcm_sysport_set_tx_csum(struct net_device *dev,
				    netdev_features_t wanted)
157 158 159 160 161 162 163
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

	/* Hardware transmit checksum requires us to enable the Transmit status
	 * block prepended to the packet contents
	 */
164 165
	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
				    NETIF_F_HW_VLAN_CTAG_TX));
166 167
	reg = tdma_readl(priv, TDMA_CONTROL);
	if (priv->tsb_en)
168
		reg |= tdma_control_bit(priv, TSB_EN);
169
	else
170
		reg &= ~tdma_control_bit(priv, TSB_EN);
171 172 173 174 175 176 177 178
	/* Indicating that software inserts Broadcom tags is needed for the TX
	 * checksum to be computed correctly when using VLAN HW acceleration,
	 * else it has no effect, so it can always be turned on.
	 */
	if (netdev_uses_dsa(dev))
		reg |= tdma_control_bit(priv, SW_BRCM_TAG);
	else
		reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
179
	tdma_writel(priv, reg, TDMA_CONTROL);
180 181 182 183

	/* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
	if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
		tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
184 185 186
}

static int bcm_sysport_set_features(struct net_device *dev,
187
				    netdev_features_t features)
188
{
189
	struct bcm_sysport_priv *priv = netdev_priv(dev);
190 191 192 193 194
	int ret;

	ret = clk_prepare_enable(priv->clk);
	if (ret)
		return ret;
195

196 197 198 199 200 201
	/* Read CRC forward */
	if (!priv->is_lite)
		priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
	else
		priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
				  GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
202

203 204 205
	bcm_sysport_set_rx_csum(dev, features);
	bcm_sysport_set_tx_csum(dev, features);

206 207
	clk_disable_unprepare(priv->clk);

208
	return 0;
209 210 211 212 213 214 215
}

/* Hardware counters must be kept in sync because the order/offset
 * is important here (order in structure declaration = order in hardware)
 */
static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
	/* general stats */
216 217 218 219
	STAT_NETDEV64(rx_packets),
	STAT_NETDEV64(tx_packets),
	STAT_NETDEV64(rx_bytes),
	STAT_NETDEV64(tx_bytes),
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	STAT_NETDEV(rx_errors),
	STAT_NETDEV(tx_errors),
	STAT_NETDEV(rx_dropped),
	STAT_NETDEV(tx_dropped),
	STAT_NETDEV(multicast),
	/* UniMAC RSV counters */
	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
	STAT_MIB_RX("rx_pkts", mib.rx.pkt),
	STAT_MIB_RX("rx_bytes", mib.rx.bytes),
	STAT_MIB_RX("rx_multicast", mib.rx.mca),
	STAT_MIB_RX("rx_broadcast", mib.rx.bca),
	STAT_MIB_RX("rx_fcs", mib.rx.fcs),
	STAT_MIB_RX("rx_control", mib.rx.cf),
	STAT_MIB_RX("rx_pause", mib.rx.pf),
	STAT_MIB_RX("rx_unknown", mib.rx.uo),
	STAT_MIB_RX("rx_align", mib.rx.aln),
	STAT_MIB_RX("rx_outrange", mib.rx.flr),
	STAT_MIB_RX("rx_code", mib.rx.cde),
	STAT_MIB_RX("rx_carrier", mib.rx.fcr),
	STAT_MIB_RX("rx_oversize", mib.rx.ovr),
	STAT_MIB_RX("rx_jabber", mib.rx.jbr),
	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
	STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
	STAT_MIB_RX("rx_unicast", mib.rx.uc),
	STAT_MIB_RX("rx_ppp", mib.rx.ppp),
	STAT_MIB_RX("rx_crc", mib.rx.rcrc),
	/* UniMAC TSV counters */
	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
	STAT_MIB_TX("tx_pkts", mib.tx.pkts),
	STAT_MIB_TX("tx_multicast", mib.tx.mca),
	STAT_MIB_TX("tx_broadcast", mib.tx.bca),
	STAT_MIB_TX("tx_pause", mib.tx.pf),
	STAT_MIB_TX("tx_control", mib.tx.cf),
	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
	STAT_MIB_TX("tx_oversize", mib.tx.ovr),
	STAT_MIB_TX("tx_defer", mib.tx.drf),
	STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
	STAT_MIB_TX("tx_single_col", mib.tx.scl),
	STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
	STAT_MIB_TX("tx_late_col", mib.tx.lcl),
	STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
	STAT_MIB_TX("tx_frags", mib.tx.frg),
	STAT_MIB_TX("tx_total_col", mib.tx.ncl),
	STAT_MIB_TX("tx_jabber", mib.tx.jbr),
	STAT_MIB_TX("tx_bytes", mib.tx.bytes),
	STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
	STAT_MIB_TX("tx_unicast", mib.tx.uc),
	/* UniMAC RUNT counters */
	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
	/* RXCHK misc statistics */
	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
293
		   RXCHK_OTHER_DISC_CNTR),
294 295 296
	/* RBUF misc statistics */
	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
297 298 299
	STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
	STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
	STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
300 301
	STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
	STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
302
	/* Per TX-queue statistics are dynamically appended */
303 304 305 306 307
};

#define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats)

static void bcm_sysport_get_drvinfo(struct net_device *dev,
308
				    struct ethtool_drvinfo *info)
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
{
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
}

static u32 bcm_sysport_get_msglvl(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	return priv->msg_enable;
}

static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	priv->msg_enable = enable;
}

328 329 330 331
static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
{
	switch (type) {
	case BCM_SYSPORT_STAT_NETDEV:
332
	case BCM_SYSPORT_STAT_NETDEV64:
333 334 335 336 337 338 339 340 341
	case BCM_SYSPORT_STAT_RXCHK:
	case BCM_SYSPORT_STAT_RBUF:
	case BCM_SYSPORT_STAT_SOFT:
		return true;
	default:
		return false;
	}
}

342 343
static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
{
344 345 346 347
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	const struct bcm_sysport_stats *s;
	unsigned int i, j;

348 349
	switch (string_set) {
	case ETH_SS_STATS:
350 351 352 353 354 355 356
		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
			s = &bcm_sysport_gstrings_stats[i];
			if (priv->is_lite &&
			    !bcm_sysport_lite_stat_valid(s->type))
				continue;
			j++;
		}
357 358
		/* Include per-queue statistics */
		return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
359 360 361 362 363 364
	default:
		return -EOPNOTSUPP;
	}
}

static void bcm_sysport_get_strings(struct net_device *dev,
365
				    u32 stringset, u8 *data)
366
{
367 368
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	const struct bcm_sysport_stats *s;
369
	char buf[128];
370
	int i, j;
371 372 373

	switch (stringset) {
	case ETH_SS_STATS:
374 375 376 377 378 379 380
		for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
			s = &bcm_sysport_gstrings_stats[i];
			if (priv->is_lite &&
			    !bcm_sysport_lite_stat_valid(s->type))
				continue;

			memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
381
			       ETH_GSTRING_LEN);
382
			j++;
383
		}
384 385 386 387 388 389 390 391 392 393 394 395

		for (i = 0; i < dev->num_tx_queues; i++) {
			snprintf(buf, sizeof(buf), "txq%d_packets", i);
			memcpy(data + j * ETH_GSTRING_LEN, buf,
			       ETH_GSTRING_LEN);
			j++;

			snprintf(buf, sizeof(buf), "txq%d_bytes", i);
			memcpy(data + j * ETH_GSTRING_LEN, buf,
			       ETH_GSTRING_LEN);
			j++;
		}
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
		break;
	default:
		break;
	}
}

static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
{
	int i, j = 0;

	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
		const struct bcm_sysport_stats *s;
		u8 offset = 0;
		u32 val = 0;
		char *p;

		s = &bcm_sysport_gstrings_stats[i];
		switch (s->type) {
		case BCM_SYSPORT_STAT_NETDEV:
415
		case BCM_SYSPORT_STAT_NETDEV64:
416
		case BCM_SYSPORT_STAT_SOFT:
417 418 419 420
			continue;
		case BCM_SYSPORT_STAT_MIB_RX:
		case BCM_SYSPORT_STAT_MIB_TX:
		case BCM_SYSPORT_STAT_RUNT:
421 422 423
			if (priv->is_lite)
				continue;

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
			if (s->type != BCM_SYSPORT_STAT_MIB_RX)
				offset = UMAC_MIB_STAT_OFFSET;
			val = umac_readl(priv, UMAC_MIB_START + j + offset);
			break;
		case BCM_SYSPORT_STAT_RXCHK:
			val = rxchk_readl(priv, s->reg_offset);
			if (val == ~0)
				rxchk_writel(priv, 0, s->reg_offset);
			break;
		case BCM_SYSPORT_STAT_RBUF:
			val = rbuf_readl(priv, s->reg_offset);
			if (val == ~0)
				rbuf_writel(priv, 0, s->reg_offset);
			break;
		}

		j += s->stat_sizeof;
		p = (char *)priv + s->stat_offset;
		*(u32 *)p = val;
	}

	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
					u64 *tx_bytes, u64 *tx_packets)
{
	struct bcm_sysport_tx_ring *ring;
	u64 bytes = 0, packets = 0;
	unsigned int start;
	unsigned int q;

	for (q = 0; q < priv->netdev->num_tx_queues; q++) {
		ring = &priv->tx_rings[q];
		do {
			start = u64_stats_fetch_begin_irq(&priv->syncp);
			bytes = ring->bytes;
			packets = ring->packets;
		} while (u64_stats_fetch_retry_irq(&priv->syncp, start));

		*tx_bytes += bytes;
		*tx_packets += packets;
	}
}

469
static void bcm_sysport_get_stats(struct net_device *dev,
470
				  struct ethtool_stats *stats, u64 *data)
471 472
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
473 474
	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
	struct u64_stats_sync *syncp = &priv->syncp;
475
	struct bcm_sysport_tx_ring *ring;
476
	u64 tx_bytes = 0, tx_packets = 0;
477
	unsigned int start;
478
	int i, j;
479

480
	if (netif_running(dev)) {
481
		bcm_sysport_update_mib_counters(priv);
482 483 484 485
		bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
		stats64->tx_bytes = tx_bytes;
		stats64->tx_packets = tx_packets;
	}
486

487
	for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
488 489 490 491 492 493
		const struct bcm_sysport_stats *s;
		char *p;

		s = &bcm_sysport_gstrings_stats[i];
		if (s->type == BCM_SYSPORT_STAT_NETDEV)
			p = (char *)&dev->stats;
494 495
		else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
			p = (char *)stats64;
496 497
		else
			p = (char *)priv;
498

499 500
		if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
			continue;
501
		p += s->stat_offset;
502

503 504
		if (s->stat_sizeof == sizeof(u64) &&
		    s->type == BCM_SYSPORT_STAT_NETDEV64) {
505 506 507 508
			do {
				start = u64_stats_fetch_begin_irq(syncp);
				data[i] = *(u64 *)p;
			} while (u64_stats_fetch_retry_irq(syncp, start));
509
		} else
510
			data[i] = *(u32 *)p;
511
		j++;
512
	}
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

	/* For SYSTEMPORT Lite since we have holes in our statistics, j would
	 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
	 * needs to point to how many total statistics we have minus the
	 * number of per TX queue statistics
	 */
	j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
	    dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;

	for (i = 0; i < dev->num_tx_queues; i++) {
		ring = &priv->tx_rings[i];
		data[j] = ring->packets;
		j++;
		data[j] = ring->bytes;
		j++;
	}
529 530
}

531 532 533 534 535
static void bcm_sysport_get_wol(struct net_device *dev,
				struct ethtool_wolinfo *wol)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

536
	wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
537 538 539 540 541
	wol->wolopts = priv->wolopts;

	if (!(priv->wolopts & WAKE_MAGICSECURE))
		return;

542
	memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
543 544 545
}

static int bcm_sysport_set_wol(struct net_device *dev,
546
			       struct ethtool_wolinfo *wol)
547 548 549
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct device *kdev = &priv->pdev->dev;
550
	u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
551 552 553 554 555 556 557

	if (!device_can_wakeup(kdev))
		return -ENOTSUPP;

	if (wol->wolopts & ~supported)
		return -EINVAL;

558 559
	if (wol->wolopts & WAKE_MAGICSECURE)
		memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
560 561 562 563

	/* Flag the device and relevant IRQ as wakeup capable */
	if (wol->wolopts) {
		device_set_wakeup_enable(kdev, 1);
564 565
		if (priv->wol_irq_disabled)
			enable_irq_wake(priv->wol_irq);
566 567 568 569 570 571 572 573 574 575 576 577 578 579
		priv->wol_irq_disabled = 0;
	} else {
		device_set_wakeup_enable(kdev, 0);
		/* Avoid unbalanced disable_irq_wake calls */
		if (!priv->wol_irq_disabled)
			disable_irq_wake(priv->wol_irq);
		priv->wol_irq_disabled = 1;
	}

	priv->wolopts = wol->wolopts;

	return 0;
}

580 581
static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
					u32 usecs, u32 pkts)
582 583 584 585 586 587
{
	u32 reg;

	reg = rdma_readl(priv, RDMA_MBDONE_INTR);
	reg &= ~(RDMA_INTR_THRESH_MASK |
		 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
588 589
	reg |= pkts;
	reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
590 591 592
	rdma_writel(priv, reg, RDMA_MBDONE_INTR);
}

593 594
static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
					struct ethtool_coalesce *ec)
595 596 597 598 599 600 601
{
	struct bcm_sysport_priv *priv = ring->priv;
	u32 reg;

	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
	reg &= ~(RING_INTR_THRESH_MASK |
		 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
602 603
	reg |= ec->tx_max_coalesced_frames;
	reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
604 605 606 607
			    RING_TIMEOUT_SHIFT;
	tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
}

608
static int bcm_sysport_get_coalesce(struct net_device *dev,
609 610 611
				    struct ethtool_coalesce *ec,
				    struct kernel_ethtool_coalesce *kernel_coal,
				    struct netlink_ext_ack *extack)
612 613 614 615 616 617 618 619 620
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

	reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));

	ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
	ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;

621 622 623 624
	reg = rdma_readl(priv, RDMA_MBDONE_INTR);

	ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
	ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
625
	ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
626

627 628 629 630
	return 0;
}

static int bcm_sysport_set_coalesce(struct net_device *dev,
631 632 633
				    struct ethtool_coalesce *ec,
				    struct kernel_ethtool_coalesce *kernel_coal,
				    struct netlink_ext_ack *extack)
634 635
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
636
	struct dim_cq_moder moder;
637
	u32 usecs, pkts;
638 639
	unsigned int i;

640 641 642
	/* Base system clock is 125Mhz, DMA timeout is this reference clock
	 * divided by 1024, which yield roughly 8.192 us, our maximum value has
	 * to fit in the RING_TIMEOUT_MASK (16 bits).
643 644
	 */
	if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
645 646 647
	    ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
	    ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
	    ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
648 649
		return -EINVAL;

650
	if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
651
	    (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
652 653
		return -EINVAL;

654 655
	for (i = 0; i < dev->num_tx_queues; i++)
		bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
656

657 658 659 660
	priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
	priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
	usecs = priv->rx_coalesce_usecs;
	pkts = priv->rx_max_coalesced_frames;
661

662
	if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
663
		moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
664 665
		usecs = moder.usec;
		pkts = moder.pkts;
666
	}
667

668
	priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
669 670 671

	/* Apply desired coalescing parameters */
	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
672

673 674 675
	return 0;
}

676 677
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
678
	dev_consume_skb_any(cb->skb);
679 680 681 682
	cb->skb = NULL;
	dma_unmap_addr_set(cb, dma_addr, 0);
}

683 684
static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
					     struct bcm_sysport_cb *cb)
685 686 687
{
	struct device *kdev = &priv->pdev->dev;
	struct net_device *ndev = priv->netdev;
688
	struct sk_buff *skb, *rx_skb;
689 690
	dma_addr_t mapping;

691
	/* Allocate a new SKB for a new packet */
692 693
	skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
				 GFP_ATOMIC | __GFP_NOWARN);
694 695
	if (!skb) {
		priv->mib.alloc_rx_buff_failed++;
696
		netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
697
		return NULL;
698 699
	}

700
	mapping = dma_map_single(kdev, skb->data,
701
				 RX_BUF_LENGTH, DMA_FROM_DEVICE);
702
	if (dma_mapping_error(kdev, mapping)) {
703
		priv->mib.rx_dma_failed++;
704
		dev_kfree_skb_any(skb);
705
		netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
706
		return NULL;
707 708
	}

709 710 711 712 713 714 715 716
	/* Grab the current SKB on the ring */
	rx_skb = cb->skb;
	if (likely(rx_skb))
		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
				 RX_BUF_LENGTH, DMA_FROM_DEVICE);

	/* Put the new SKB on the ring */
	cb->skb = skb;
717
	dma_unmap_addr_set(cb, dma_addr, mapping);
718
	dma_desc_set_addr(priv, cb->bd_addr, mapping);
719 720 721

	netif_dbg(priv, rx_status, ndev, "RX refill\n");

722 723
	/* Return the current SKB to the caller */
	return rx_skb;
724 725 726 727 728
}

static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{
	struct bcm_sysport_cb *cb;
729
	struct sk_buff *skb;
730 731 732
	unsigned int i;

	for (i = 0; i < priv->num_rx_bds; i++) {
733
		cb = &priv->rx_cbs[i];
734
		skb = bcm_sysport_rx_refill(priv, cb);
735
		dev_kfree_skb(skb);
736 737
		if (!cb->skb)
			return -ENOMEM;
738 739
	}

740
	return 0;
741 742 743 744 745 746
}

/* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
					unsigned int budget)
{
747
	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
748 749
	struct net_device *ndev = priv->netdev;
	unsigned int processed = 0, to_process;
750
	unsigned int processed_bytes = 0;
751 752 753 754
	struct bcm_sysport_cb *cb;
	struct sk_buff *skb;
	unsigned int p_index;
	u16 len, status;
755
	struct bcm_rsb *rsb;
756

757 758 759
	/* Clear status before servicing to reduce spurious interrupts */
	intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);

760 761 762 763 764 765 766 767
	/* Determine how much we should process since last call, SYSTEMPORT Lite
	 * groups the producer and consumer indexes into the same 32-bit
	 * which we access using RDMA_CONS_INDEX
	 */
	if (!priv->is_lite)
		p_index = rdma_readl(priv, RDMA_PROD_INDEX);
	else
		p_index = rdma_readl(priv, RDMA_CONS_INDEX);
768 769
	p_index &= RDMA_PROD_INDEX_MASK;

770
	to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
771 772

	netif_dbg(priv, rx_status, ndev,
773 774
		  "p_index=%d rx_c_index=%d to_process=%d\n",
		  p_index, priv->rx_c_index, to_process);
775

776
	while ((processed < to_process) && (processed < budget)) {
777
		cb = &priv->rx_cbs[priv->rx_read_ptr];
778
		skb = bcm_sysport_rx_refill(priv, cb);
779 780 781 782 783 784 785 786 787 788 789


		/* We do not have a backing SKB, so we do not a corresponding
		 * DMA mapping for this incoming packet since
		 * bcm_sysport_rx_refill always either has both skb and mapping
		 * or none.
		 */
		if (unlikely(!skb)) {
			netif_err(priv, rx_err, ndev, "out of memory!\n");
			ndev->stats.rx_dropped++;
			ndev->stats.rx_errors++;
790
			goto next;
791 792
		}

793
		/* Extract the Receive Status Block prepended */
794
		rsb = (struct bcm_rsb *)skb->data;
795 796
		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
797
			  DESC_STATUS_MASK;
798 799

		netif_dbg(priv, rx_status, ndev,
800 801 802
			  "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
			  p_index, priv->rx_c_index, priv->rx_read_ptr,
			  len, status);
803

804 805 806 807 808 809 810 811
		if (unlikely(len > RX_BUF_LENGTH)) {
			netif_err(priv, rx_status, ndev, "oversized packet\n");
			ndev->stats.rx_length_errors++;
			ndev->stats.rx_errors++;
			dev_kfree_skb_any(skb);
			goto next;
		}

812 813 814 815
		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
			netif_err(priv, rx_status, ndev, "fragmented packet!\n");
			ndev->stats.rx_dropped++;
			ndev->stats.rx_errors++;
816 817
			dev_kfree_skb_any(skb);
			goto next;
818 819 820 821
		}

		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
			netif_err(priv, rx_err, ndev, "error packet\n");
822
			if (status & RX_STATUS_OVFLOW)
823 824 825
				ndev->stats.rx_over_errors++;
			ndev->stats.rx_dropped++;
			ndev->stats.rx_errors++;
826 827
			dev_kfree_skb_any(skb);
			goto next;
828 829 830 831 832 833 834 835
		}

		skb_put(skb, len);

		/* Hardware validated our checksum */
		if (likely(status & DESC_L4_CSUM))
			skb->ip_summed = CHECKSUM_UNNECESSARY;

836 837 838
		/* Hardware pre-pends packets with 2bytes before Ethernet
		 * header plus we have the Receive Status Block, strip off all
		 * of this from the SKB.
839 840 841
		 */
		skb_pull(skb, sizeof(*rsb) + 2);
		len -= (sizeof(*rsb) + 2);
842
		processed_bytes += len;
843 844 845 846 847 848 849 850 851 852

		/* UniMAC may forward CRC */
		if (priv->crc_fwd) {
			skb_trim(skb, len - ETH_FCS_LEN);
			len -= ETH_FCS_LEN;
		}

		skb->protocol = eth_type_trans(skb, ndev);
		ndev->stats.rx_packets++;
		ndev->stats.rx_bytes += len;
853 854 855 856
		u64_stats_update_begin(&priv->syncp);
		stats64->rx_packets++;
		stats64->rx_bytes += len;
		u64_stats_update_end(&priv->syncp);
857 858

		napi_gro_receive(&priv->napi, skb);
859 860 861 862 863 864
next:
		processed++;
		priv->rx_read_ptr++;

		if (priv->rx_read_ptr == priv->num_rx_bds)
			priv->rx_read_ptr = 0;
865 866
	}

867 868 869
	priv->dim.packets = processed;
	priv->dim.bytes = processed_bytes;

870 871 872
	return processed;
}

873
static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
874 875 876
				       struct bcm_sysport_cb *cb,
				       unsigned int *bytes_compl,
				       unsigned int *pkts_compl)
877
{
878
	struct bcm_sysport_priv *priv = ring->priv;
879 880 881 882 883
	struct device *kdev = &priv->pdev->dev;

	if (cb->skb) {
		*bytes_compl += cb->skb->len;
		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
884 885
				 dma_unmap_len(cb, dma_len),
				 DMA_TO_DEVICE);
886 887 888 889
		(*pkts_compl)++;
		bcm_sysport_free_cb(cb);
	/* SKB fragment */
	} else if (dma_unmap_addr(cb, dma_addr)) {
890
		*bytes_compl += dma_unmap_len(cb, dma_len);
891
		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
892
			       dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
893 894 895 896 897 898 899 900 901
		dma_unmap_addr_set(cb, dma_addr, 0);
	}
}

/* Reclaim queued SKBs for transmission completion, lockless version */
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
					     struct bcm_sysport_tx_ring *ring)
{
	unsigned int pkts_compl = 0, bytes_compl = 0;
902
	struct net_device *ndev = priv->netdev;
903
	unsigned int txbds_processed = 0;
904
	struct bcm_sysport_cb *cb;
905 906
	unsigned int txbds_ready;
	unsigned int c_index;
907 908
	u32 hw_ind;

909 910 911 912 913 914 915
	/* Clear status before servicing to reduce spurious interrupts */
	if (!ring->priv->is_lite)
		intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
	else
		intrl2_0_writel(ring->priv, BIT(ring->index +
				INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);

916 917 918
	/* Compute how many descriptors have been processed since last call */
	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
919
	txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
920 921

	netif_dbg(priv, tx_done, ndev,
922 923
		  "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
		  ring->index, ring->c_index, c_index, txbds_ready);
924

925 926
	while (txbds_processed < txbds_ready) {
		cb = &ring->cbs[ring->clean_index];
927
		bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
928 929

		ring->desc_count++;
930 931 932 933 934 935
		txbds_processed++;

		if (likely(ring->clean_index < ring->size - 1))
			ring->clean_index++;
		else
			ring->clean_index = 0;
936 937
	}

938 939 940 941 942
	u64_stats_update_begin(&priv->syncp);
	ring->packets += pkts_compl;
	ring->bytes += bytes_compl;
	u64_stats_update_end(&priv->syncp);

943 944 945
	ring->c_index = c_index;

	netif_dbg(priv, tx_done, ndev,
946 947
		  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
		  ring->index, ring->c_index, pkts_compl, bytes_compl);
948 949 950 951 952 953 954 955

	return pkts_compl;
}

/* Locked version of the per-ring TX reclaim routine */
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
					   struct bcm_sysport_tx_ring *ring)
{
956
	struct netdev_queue *txq;
957
	unsigned int released;
958
	unsigned long flags;
959

960 961
	txq = netdev_get_tx_queue(priv->netdev, ring->index);

962
	spin_lock_irqsave(&ring->lock, flags);
963
	released = __bcm_sysport_tx_reclaim(priv, ring);
964 965 966
	if (released)
		netif_tx_wake_queue(txq);

967
	spin_unlock_irqrestore(&ring->lock, flags);
968 969 970 971

	return released;
}

972 973 974 975 976 977 978 979 980 981 982
/* Locked version of the per-ring TX reclaim, but does not wake the queue */
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
				 struct bcm_sysport_tx_ring *ring)
{
	unsigned long flags;

	spin_lock_irqsave(&ring->lock, flags);
	__bcm_sysport_tx_reclaim(priv, ring);
	spin_unlock_irqrestore(&ring->lock, flags);
}

983 984 985 986 987 988 989 990
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
	struct bcm_sysport_tx_ring *ring =
		container_of(napi, struct bcm_sysport_tx_ring, napi);
	unsigned int work_done = 0;

	work_done = bcm_sysport_tx_reclaim(ring->priv, ring);

991
	if (work_done == 0) {
992 993
		napi_complete(napi);
		/* re-enable TX interrupt */
994 995 996 997 998
		if (!ring->priv->is_lite)
			intrl2_1_mask_clear(ring->priv, BIT(ring->index));
		else
			intrl2_0_mask_clear(ring->priv, BIT(ring->index +
					    INTRL2_0_TDMA_MBDONE_SHIFT));
999 1000

		return 0;
1001 1002
	}

1003
	return budget;
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
}

static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
{
	unsigned int q;

	for (q = 0; q < priv->netdev->num_tx_queues; q++)
		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
}

static int bcm_sysport_poll(struct napi_struct *napi, int budget)
{
	struct bcm_sysport_priv *priv =
		container_of(napi, struct bcm_sysport_priv, napi);
1018
	struct dim_sample dim_sample = {};
1019 1020 1021 1022 1023 1024
	unsigned int work_done = 0;

	work_done = bcm_sysport_desc_rx(priv, budget);

	priv->rx_c_index += work_done;
	priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1025 1026 1027 1028 1029 1030 1031 1032 1033

	/* SYSTEMPORT Lite groups the producer/consumer index, producer is
	 * maintained by HW, but writes to it will be ignore while RDMA
	 * is active
	 */
	if (!priv->is_lite)
		rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
	else
		rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1034 1035

	if (work_done < budget) {
1036
		napi_complete_done(napi, work_done);
1037 1038 1039 1040
		/* re-enable RX interrupts */
		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
	}

1041
	if (priv->dim.use_dim) {
1042 1043
		dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
				  priv->dim.bytes, &dim_sample);
1044 1045 1046
		net_dim(&priv->dim.dim, dim_sample);
	}

1047 1048 1049
	return work_done;
}

1050
static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1051
{
1052
	u32 reg, bit;
1053

1054 1055 1056 1057 1058 1059
	reg = umac_readl(priv, UMAC_MPD_CTRL);
	if (enable)
		reg |= MPD_EN;
	else
		reg &= ~MPD_EN;
	umac_writel(priv, reg, UMAC_MPD_CTRL);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071

	if (priv->is_lite)
		bit = RBUF_ACPI_EN_LITE;
	else
		bit = RBUF_ACPI_EN;

	reg = rbuf_readl(priv, RBUF_CONTROL);
	if (enable)
		reg |= bit;
	else
		reg &= ~bit;
	rbuf_writel(priv, reg, RBUF_CONTROL);
1072 1073 1074 1075
}

static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
1076
	unsigned int index;
1077 1078 1079 1080 1081 1082 1083
	u32 reg;

	/* Disable RXCHK, active filters and Broadcom tag matching */
	reg = rxchk_readl(priv, RXCHK_CONTROL);
	reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
		 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
	rxchk_writel(priv, reg, RXCHK_CONTROL);
1084

1085 1086 1087 1088 1089 1090 1091 1092 1093
	/* Make sure we restore correct CID index in case HW lost
	 * its context during deep idle state
	 */
	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
		rxchk_writel(priv, priv->filters_loc[index] <<
			     RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
		rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
	}

1094
	/* Clear the MagicPacket detection logic */
1095
	mpd_enable_set(priv, false);
1096

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
	if (reg & INTRL2_0_MPD)
		netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");

	if (reg & INTRL2_0_BRCM_MATCH_TAG) {
		reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
				  RXCHK_BRCM_TAG_MATCH_MASK;
		netdev_info(priv->netdev,
			    "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
	}

1108 1109
	netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
1110

1111 1112
static void bcm_sysport_dim_work(struct work_struct *work)
{
1113
	struct dim *dim = container_of(work, struct dim, work);
1114 1115 1116 1117
	struct bcm_sysport_net_dim *ndim =
			container_of(dim, struct bcm_sysport_net_dim, dim);
	struct bcm_sysport_priv *priv =
			container_of(ndim, struct bcm_sysport_priv, dim);
1118 1119
	struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
								    dim->profile_ix);
1120

1121
	bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1122
	dim->state = DIM_START_MEASURE;
1123 1124
}

1125 1126 1127 1128 1129
/* RX and misc interrupt routine */
static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct bcm_sysport_priv *priv = netdev_priv(dev);
1130 1131
	struct bcm_sysport_tx_ring *txr;
	unsigned int ring, ring_bit;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);

	if (unlikely(priv->irq0_stat == 0)) {
		netdev_warn(priv->netdev, "spurious RX interrupt\n");
		return IRQ_NONE;
	}

	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1143
		priv->dim.event_ctr++;
1144 1145 1146
		if (likely(napi_schedule_prep(&priv->napi))) {
			/* disable RX interrupts */
			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1147
			__napi_schedule_irqoff(&priv->napi);
1148 1149 1150 1151 1152 1153 1154 1155 1156
		}
	}

	/* TX ring is full, perform a full reclaim since we do not know
	 * which one would trigger this interrupt
	 */
	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
		bcm_sysport_tx_reclaim_all(priv);

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	if (!priv->is_lite)
		goto out;

	for (ring = 0; ring < dev->num_tx_queues; ring++) {
		ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
		if (!(priv->irq0_stat & ring_bit))
			continue;

		txr = &priv->tx_rings[ring];

		if (likely(napi_schedule_prep(&txr->napi))) {
			intrl2_0_mask_set(priv, ring_bit);
			__napi_schedule(&txr->napi);
		}
	}
out:
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	return IRQ_HANDLED;
}

/* TX interrupt service routine */
static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct bcm_sysport_tx_ring *txr;
	unsigned int ring;

	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);

	if (unlikely(priv->irq1_stat == 0)) {
		netdev_warn(priv->netdev, "spurious TX interrupt\n");
		return IRQ_NONE;
	}

	for (ring = 0; ring < dev->num_tx_queues; ring++) {
		if (!(priv->irq1_stat & BIT(ring)))
			continue;

		txr = &priv->tx_rings[ring];

		if (likely(napi_schedule_prep(&txr->napi))) {
			intrl2_1_mask_set(priv, BIT(ring));
1201
			__napi_schedule_irqoff(&txr->napi);
1202 1203 1204 1205 1206 1207
		}
	}

	return IRQ_HANDLED;
}

1208 1209 1210 1211 1212 1213 1214 1215 1216
static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
{
	struct bcm_sysport_priv *priv = dev_id;

	pm_wakeup_event(&priv->pdev->dev, 0);

	return IRQ_HANDLED;
}

1217 1218 1219 1220 1221 1222 1223 1224 1225
#ifdef CONFIG_NET_POLL_CONTROLLER
static void bcm_sysport_poll_controller(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	disable_irq(priv->irq0);
	bcm_sysport_rx_isr(priv->irq0, priv);
	enable_irq(priv->irq0);

1226 1227 1228 1229 1230
	if (!priv->is_lite) {
		disable_irq(priv->irq1);
		bcm_sysport_tx_isr(priv->irq1, priv);
		enable_irq(priv->irq1);
	}
1231 1232 1233
}
#endif

1234 1235
static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
					      struct net_device *dev)
1236
{
1237
	struct bcm_sysport_priv *priv = netdev_priv(dev);
1238
	struct sk_buff *nskb;
1239
	struct bcm_tsb *tsb;
1240 1241 1242
	u32 csum_info;
	u8 ip_proto;
	u16 csum_start;
1243
	__be16 ip_ver;
1244 1245 1246 1247 1248

	/* Re-allocate SKB if needed */
	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
		nskb = skb_realloc_headroom(skb, sizeof(*tsb));
		if (!nskb) {
1249
			dev_kfree_skb_any(skb);
1250
			priv->mib.tx_realloc_tsb_failed++;
1251 1252
			dev->stats.tx_errors++;
			dev->stats.tx_dropped++;
1253
			return NULL;
1254
		}
1255
		dev_consume_skb_any(skb);
1256
		skb = nskb;
1257
		priv->mib.tx_realloc_tsb++;
1258 1259
	}

1260
	tsb = skb_push(skb, sizeof(*tsb));
1261 1262 1263
	/* Zero-out TSB by default */
	memset(tsb, 0, sizeof(*tsb));

1264
	if (skb_vlan_tag_present(skb)) {
1265
		tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1266 1267 1268
		tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
	}

1269
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1270
		ip_ver = skb->protocol;
1271
		switch (ip_ver) {
1272
		case htons(ETH_P_IP):
1273 1274
			ip_proto = ip_hdr(skb)->protocol;
			break;
1275
		case htons(ETH_P_IPV6):
1276 1277 1278
			ip_proto = ipv6_hdr(skb)->nexthdr;
			break;
		default:
1279
			return skb;
1280 1281 1282 1283
		}

		/* Get the checksum offset and the L4 (transport) offset */
		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1284 1285 1286
		/* Account for the HW inserted VLAN tag */
		if (skb_vlan_tag_present(skb))
			csum_start += VLAN_HLEN;
1287 1288 1289 1290 1291
		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
		csum_info |= (csum_start << L4_PTR_SHIFT);

		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
			csum_info |= L4_LENGTH_VALID;
1292 1293
			if (ip_proto == IPPROTO_UDP &&
			    ip_ver == htons(ETH_P_IP))
1294
				csum_info |= L4_UDP;
1295
		} else {
1296
			csum_info = 0;
1297
		}
1298 1299 1300 1301

		tsb->l4_ptr_dest_map = csum_info;
	}

1302
	return skb;
1303 1304 1305 1306 1307 1308 1309 1310
}

static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
				    struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct device *kdev = &priv->pdev->dev;
	struct bcm_sysport_tx_ring *ring;
1311
	unsigned long flags, desc_flags;
1312 1313
	struct bcm_sysport_cb *cb;
	struct netdev_queue *txq;
1314
	u32 len_status, addr_lo;
1315
	unsigned int skb_len;
1316 1317 1318 1319 1320 1321 1322 1323
	dma_addr_t mapping;
	u16 queue;
	int ret;

	queue = skb_get_queue_mapping(skb);
	txq = netdev_get_tx_queue(dev, queue);
	ring = &priv->tx_rings[queue];

1324 1325
	/* lock against tx reclaim in BH context and TX ring full interrupt */
	spin_lock_irqsave(&ring->lock, flags);
1326 1327 1328 1329 1330 1331 1332
	if (unlikely(ring->desc_count == 0)) {
		netif_tx_stop_queue(txq);
		netdev_err(dev, "queue %d awake and ring full!\n", queue);
		ret = NETDEV_TX_BUSY;
		goto out;
	}

1333 1334 1335 1336 1337 1338 1339 1340 1341
	/* Insert TSB and checksum infos */
	if (priv->tsb_en) {
		skb = bcm_sysport_insert_tsb(skb, dev);
		if (!skb) {
			ret = NETDEV_TX_OK;
			goto out;
		}
	}

1342
	skb_len = skb->len;
1343 1344

	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1345
	if (dma_mapping_error(kdev, mapping)) {
1346
		priv->mib.tx_dma_failed++;
1347
		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1348
			  skb->data, skb_len);
1349 1350 1351 1352 1353 1354 1355 1356
		ret = NETDEV_TX_OK;
		goto out;
	}

	/* Remember the SKB for future freeing */
	cb = &ring->cbs[ring->curr_desc];
	cb->skb = skb;
	dma_unmap_addr_set(cb, dma_addr, mapping);
1357
	dma_unmap_len_set(cb, dma_len, skb_len);
1358

1359
	addr_lo = lower_32_bits(mapping);
1360
	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1361
	len_status |= (skb_len << DESC_LEN_SHIFT);
1362
	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1363
		       DESC_STATUS_SHIFT;
1364 1365
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1366 1367
	if (skb_vlan_tag_present(skb))
		len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
1368 1369 1370 1371 1372 1373

	ring->curr_desc++;
	if (ring->curr_desc == ring->size)
		ring->curr_desc = 0;
	ring->desc_count--;

1374
	/* Ports are latched, so write upper address first */
1375
	spin_lock_irqsave(&priv->desc_lock, desc_flags);
1376 1377
	tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
	tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1378
	spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
1379 1380 1381 1382 1383 1384

	/* Check ring space and update SW control flow */
	if (ring->desc_count == 0)
		netif_tx_stop_queue(txq);

	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1385
		  ring->index, ring->desc_count, ring->curr_desc);
1386 1387 1388

	ret = NETDEV_TX_OK;
out:
1389
	spin_unlock_irqrestore(&ring->lock, flags);
1390 1391 1392
	return ret;
}

1393
static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
1394 1395 1396
{
	netdev_warn(dev, "transmit timeout!\n");

1397
	netif_trans_update(dev);
1398 1399 1400 1401 1402 1403 1404 1405 1406
	dev->stats.tx_errors++;

	netif_tx_wake_all_queues(dev);
}

/* phylib adjust link callback */
static void bcm_sysport_adj_link(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
1407
	struct phy_device *phydev = dev->phydev;
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
	unsigned int changed = 0;
	u32 cmd_bits = 0, reg;

	if (priv->old_link != phydev->link) {
		changed = 1;
		priv->old_link = phydev->link;
	}

	if (priv->old_duplex != phydev->duplex) {
		changed = 1;
		priv->old_duplex = phydev->duplex;
	}

1421 1422 1423
	if (priv->is_lite)
		goto out;

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
	switch (phydev->speed) {
	case SPEED_2500:
		cmd_bits = CMD_SPEED_2500;
		break;
	case SPEED_1000:
		cmd_bits = CMD_SPEED_1000;
		break;
	case SPEED_100:
		cmd_bits = CMD_SPEED_100;
		break;
	case SPEED_10:
		cmd_bits = CMD_SPEED_10;
		break;
	default:
		break;
	}
	cmd_bits <<= CMD_SPEED_SHIFT;

	if (phydev->duplex == DUPLEX_HALF)
		cmd_bits |= CMD_HD_EN;

	if (priv->old_pause != phydev->pause) {
		changed = 1;
		priv->old_pause = phydev->pause;
	}

	if (!phydev->pause)
		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;

1453 1454 1455 1456
	if (!changed)
		return;

	if (phydev->link) {
1457 1458
		reg = umac_readl(priv, UMAC_CMD);
		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1459 1460
			CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
			CMD_TX_PAUSE_IGNORE);
1461 1462 1463
		reg |= cmd_bits;
		umac_writel(priv, reg, UMAC_CMD);
	}
1464 1465 1466
out:
	if (changed)
		phy_print_status(phydev);
1467 1468
}

1469
static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1470 1471
				 void (*cb)(struct work_struct *work))
{
1472 1473
	struct bcm_sysport_net_dim *dim = &priv->dim;

1474
	INIT_WORK(&dim->dim.work, cb);
1475
	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1476 1477 1478 1479 1480
	dim->event_ctr = 0;
	dim->packets = 0;
	dim->bytes = 0;
}

1481 1482 1483
static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
{
	struct bcm_sysport_net_dim *dim = &priv->dim;
1484
	struct dim_cq_moder moder;
1485 1486 1487 1488 1489 1490 1491
	u32 usecs, pkts;

	usecs = priv->rx_coalesce_usecs;
	pkts = priv->rx_max_coalesced_frames;

	/* If DIM was enabled, re-apply default parameters */
	if (dim->use_dim) {
1492
		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1493 1494 1495 1496 1497 1498 1499
		usecs = moder.usec;
		pkts = moder.pkts;
	}

	bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
}

1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
				    unsigned int index)
{
	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
	size_t size;
	u32 reg;

	/* Simple descriptors partitioning for now */
	size = 256;

1510
	ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1511 1512 1513 1514 1515 1516 1517 1518
	if (!ring->cbs) {
		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
		return -ENOMEM;
	}

	/* Initialize SW view of the ring */
	spin_lock_init(&ring->lock);
	ring->priv = priv;
E
Eric Dumazet 已提交
1519
	netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1520 1521
	ring->index = index;
	ring->size = size;
1522
	ring->clean_index = 0;
1523 1524 1525 1526 1527 1528 1529 1530 1531
	ring->alloc_size = ring->size;
	ring->desc_count = ring->size;
	ring->curr_desc = 0;

	/* Initialize HW ring */
	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1532 1533 1534 1535

	/* Configure QID and port mapping */
	reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
	reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1536 1537 1538 1539 1540 1541
	if (ring->inspect) {
		reg |= ring->switch_queue & RING_QID_MASK;
		reg |= ring->switch_port << RING_PORT_ID_SHIFT;
	} else {
		reg |= RING_IGNORE_STATUS;
	}
1542
	tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1543 1544 1545 1546 1547 1548 1549
	reg = 0;
	/* Adjust the packet size calculations if SYSTEMPORT is responsible
	 * for HW insertion of VLAN tags
	 */
	if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
		reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
	tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1550

1551 1552 1553 1554 1555
	/* Enable ACB algorithm 2 */
	reg = tdma_readl(priv, TDMA_CONTROL);
	reg |= tdma_control_bit(priv, ACB_ALGO);
	tdma_writel(priv, reg, TDMA_CONTROL);

1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
	/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
	 * with the original definition of ACB_ALGO
	 */
	reg = tdma_readl(priv, TDMA_CONTROL);
	if (priv->is_lite)
		reg &= ~BIT(TSB_SWAP1);
	/* Set a correct TSB format based on host endian */
	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
		reg |= tdma_control_bit(priv, TSB_SWAP0);
	else
		reg &= ~tdma_control_bit(priv, TSB_SWAP0);
	tdma_writel(priv, reg, TDMA_CONTROL);

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
	/* Program the number of descriptors as MAX_THRESHOLD and half of
	 * its size for the hysteresis trigger
	 */
	tdma_writel(priv, ring->size |
			1 << RING_HYST_THRESH_SHIFT,
			TDMA_DESC_RING_MAX_HYST(index));

	/* Enable the ring queue in the arbiter */
	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
	reg |= (1 << index);
	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);

	napi_enable(&ring->napi);

	netif_dbg(priv, hw, priv->netdev,
1584 1585
		  "TDMA cfg, size=%d, switch q=%d,port=%d\n",
		  ring->size, ring->switch_queue,
1586
		  ring->switch_port);
1587 1588 1589 1590 1591

	return 0;
}

static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1592
				     unsigned int index)
1593 1594 1595 1596 1597 1598 1599 1600 1601
{
	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
	u32 reg;

	/* Caller should stop the TDMA engine */
	reg = tdma_readl(priv, TDMA_STATUS);
	if (!(reg & TDMA_DISABLED))
		netdev_warn(priv->netdev, "TDMA not stopped!\n");

1602 1603 1604 1605 1606 1607 1608
	/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
	 * fail, so by checking this pointer we know whether the TX ring was
	 * fully initialized or not.
	 */
	if (!ring->cbs)
		return;

1609 1610 1611
	napi_disable(&ring->napi);
	netif_napi_del(&ring->napi);

1612
	bcm_sysport_tx_clean(priv, ring);
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623

	kfree(ring->cbs);
	ring->cbs = NULL;
	ring->size = 0;
	ring->alloc_size = 0;

	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
}

/* RDMA helper */
static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1624
				  unsigned int enable)
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
{
	unsigned int timeout = 1000;
	u32 reg;

	reg = rdma_readl(priv, RDMA_CONTROL);
	if (enable)
		reg |= RDMA_EN;
	else
		reg &= ~RDMA_EN;
	rdma_writel(priv, reg, RDMA_CONTROL);

	/* Poll for RMDA disabling completion */
	do {
		reg = rdma_readl(priv, RDMA_STATUS);
		if (!!(reg & RDMA_DISABLED) == !enable)
			return 0;
		usleep_range(1000, 2000);
	} while (timeout-- > 0);

	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");

	return -ETIMEDOUT;
}

/* TDMA helper */
static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1651
				  unsigned int enable)
1652 1653 1654 1655 1656 1657
{
	unsigned int timeout = 1000;
	u32 reg;

	reg = tdma_readl(priv, TDMA_CONTROL);
	if (enable)
1658
		reg |= tdma_control_bit(priv, TDMA_EN);
1659
	else
1660
		reg &= ~tdma_control_bit(priv, TDMA_EN);
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
	tdma_writel(priv, reg, TDMA_CONTROL);

	/* Poll for TMDA disabling completion */
	do {
		reg = tdma_readl(priv, TDMA_STATUS);
		if (!!(reg & TDMA_DISABLED) == !enable)
			return 0;

		usleep_range(1000, 2000);
	} while (timeout-- > 0);

	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");

	return -ETIMEDOUT;
}

static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
{
1679
	struct bcm_sysport_cb *cb;
1680 1681
	u32 reg;
	int ret;
1682
	int i;
1683 1684

	/* Initialize SW view of the RX ring */
1685
	priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1686 1687 1688
	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
	priv->rx_c_index = 0;
	priv->rx_read_ptr = 0;
1689 1690
	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
				GFP_KERNEL);
1691 1692 1693 1694 1695
	if (!priv->rx_cbs) {
		netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
		return -ENOMEM;
	}

1696 1697 1698 1699 1700
	for (i = 0; i < priv->num_rx_bds; i++) {
		cb = priv->rx_cbs + i;
		cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
	}

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	ret = bcm_sysport_alloc_rx_bufs(priv);
	if (ret) {
		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
		return ret;
	}

	/* Initialize HW, ensure RDMA is disabled */
	reg = rdma_readl(priv, RDMA_STATUS);
	if (!(reg & RDMA_DISABLED))
		rdma_enable_set(priv, 0);

	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
	rdma_writel(priv, 0, RDMA_PROD_INDEX);
	rdma_writel(priv, 0, RDMA_CONS_INDEX);
	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
	/* Operate the queue in ring mode */
	rdma_writel(priv, 0, RDMA_START_ADDR_HI);
	rdma_writel(priv, 0, RDMA_START_ADDR_LO);
	rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1722
	rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1723 1724

	netif_dbg(priv, hw, priv->netdev,
1725 1726
		  "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
		  priv->num_rx_bds, priv->rx_bds);
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745

	return 0;
}

static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
{
	struct bcm_sysport_cb *cb;
	unsigned int i;
	u32 reg;

	/* Caller should ensure RDMA is disabled */
	reg = rdma_readl(priv, RDMA_STATUS);
	if (!(reg & RDMA_DISABLED))
		netdev_warn(priv->netdev, "RDMA not stopped!\n");

	for (i = 0; i < priv->num_rx_bds; i++) {
		cb = &priv->rx_cbs[i];
		if (dma_unmap_addr(cb, dma_addr))
			dma_unmap_single(&priv->pdev->dev,
1746 1747
					 dma_unmap_addr(cb, dma_addr),
					 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
		bcm_sysport_free_cb(cb);
	}

	kfree(priv->rx_cbs);
	priv->rx_cbs = NULL;

	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
}

static void bcm_sysport_set_rx_mode(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u32 reg;

1762 1763 1764
	if (priv->is_lite)
		return;

1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
	reg = umac_readl(priv, UMAC_CMD);
	if (dev->flags & IFF_PROMISC)
		reg |= CMD_PROMISC;
	else
		reg &= ~CMD_PROMISC;
	umac_writel(priv, reg, UMAC_CMD);

	/* No support for ALLMULTI */
	if (dev->flags & IFF_ALLMULTI)
		return;
}

static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1778
				   u32 mask, unsigned int enable)
1779 1780 1781
{
	u32 reg;

1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	if (!priv->is_lite) {
		reg = umac_readl(priv, UMAC_CMD);
		if (enable)
			reg |= mask;
		else
			reg &= ~mask;
		umac_writel(priv, reg, UMAC_CMD);
	} else {
		reg = gib_readl(priv, GIB_CONTROL);
		if (enable)
			reg |= mask;
		else
			reg &= ~mask;
		gib_writel(priv, reg, GIB_CONTROL);
	}
1797 1798 1799 1800 1801 1802

	/* UniMAC stops on a packet boundary, wait for a full-sized packet
	 * to be processed (1 msec).
	 */
	if (enable == 0)
		usleep_range(1000, 2000);
1803 1804
}

1805
static inline void umac_reset(struct bcm_sysport_priv *priv)
1806 1807 1808
{
	u32 reg;

1809 1810 1811
	if (priv->is_lite)
		return;

1812 1813 1814 1815 1816 1817 1818
	reg = umac_readl(priv, UMAC_CMD);
	reg |= CMD_SW_RESET;
	umac_writel(priv, reg, UMAC_CMD);
	udelay(10);
	reg = umac_readl(priv, UMAC_CMD);
	reg &= ~CMD_SW_RESET;
	umac_writel(priv, reg, UMAC_CMD);
1819 1820 1821
}

static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1822
			     unsigned char *addr)
1823
{
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
		    addr[3];
	u32 mac1 = (addr[4] << 8) | addr[5];

	if (!priv->is_lite) {
		umac_writel(priv, mac0, UMAC_MAC0);
		umac_writel(priv, mac1, UMAC_MAC1);
	} else {
		gib_writel(priv, mac0, GIB_MAC0);
		gib_writel(priv, mac1, GIB_MAC1);
	}
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
}

static void topctrl_flush(struct bcm_sysport_priv *priv)
{
	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
	mdelay(1);
	topctrl_writel(priv, 0, RX_FLUSH_CNTL);
	topctrl_writel(priv, 0, TX_FLUSH_CNTL);
}

1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
static int bcm_sysport_change_mac(struct net_device *dev, void *p)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct sockaddr *addr = p;

	if (!is_valid_ether_addr(addr->sa_data))
		return -EINVAL;

	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);

	/* interface is disabled, changes to MAC will be reflected on next
	 * open call
	 */
	if (!netif_running(dev))
		return 0;

	umac_set_hw_addr(priv, dev->dev_addr);

	return 0;
}

1867 1868
static void bcm_sysport_get_stats64(struct net_device *dev,
				    struct rtnl_link_stats64 *stats)
1869 1870
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
1871 1872
	struct bcm_sysport_stats64 *stats64 = &priv->stats64;
	unsigned int start;
1873

1874 1875
	netdev_stats_to_stats64(stats, &dev->stats);

1876 1877
	bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
				    &stats->tx_packets);
1878 1879 1880 1881 1882 1883

	do {
		start = u64_stats_fetch_begin_irq(&priv->syncp);
		stats->rx_packets = stats64->rx_packets;
		stats->rx_bytes = stats64->rx_bytes;
	} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1884 1885
}

1886 1887 1888 1889 1890
static void bcm_sysport_netif_start(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	/* Enable NAPI */
1891 1892
	bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
	bcm_sysport_init_rx_coalesce(priv);
1893 1894
	napi_enable(&priv->napi);

1895 1896 1897
	/* Enable RX interrupt and TX ring full interrupt */
	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);

1898
	phy_start(dev->phydev);
1899

1900 1901 1902 1903 1904
	/* Enable TX interrupts for the TXQs */
	if (!priv->is_lite)
		intrl2_1_mask_clear(priv, 0xffffffff);
	else
		intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1905 1906
}

1907 1908 1909 1910 1911 1912
static void rbuf_init(struct bcm_sysport_priv *priv)
{
	u32 reg;

	reg = rbuf_readl(priv, RBUF_CONTROL);
	reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1913
	/* Set a correct RSB format on SYSTEMPORT Lite */
1914
	if (priv->is_lite)
1915
		reg &= ~RBUF_RSB_SWAP1;
1916 1917 1918

	/* Set a correct RSB format based on host endian */
	if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1919
		reg |= RBUF_RSB_SWAP0;
1920 1921
	else
		reg &= ~RBUF_RSB_SWAP0;
1922 1923 1924
	rbuf_writel(priv, reg, RBUF_CONTROL);
}

1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
{
	intrl2_0_mask_set(priv, 0xffffffff);
	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
	if (!priv->is_lite) {
		intrl2_1_mask_set(priv, 0xffffffff);
		intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
	}
}

static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
{
1937
	u32 reg;
1938

1939 1940
	reg = gib_readl(priv, GIB_CONTROL);
	/* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1941 1942 1943 1944
	if (netdev_uses_dsa(priv->netdev)) {
		reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
		reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
	}
1945 1946 1947
	reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
	reg |= 12 << GIB_IPG_LEN_SHIFT;
	gib_writel(priv, reg, GIB_CONTROL);
1948 1949
}

1950 1951 1952
static int bcm_sysport_open(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
1953
	struct phy_device *phydev;
1954 1955 1956
	unsigned int i;
	int ret;

1957 1958
	clk_prepare_enable(priv->clk);

1959
	/* Reset UniMAC */
1960
	umac_reset(priv);
1961 1962 1963 1964 1965

	/* Flush TX and RX FIFOs at TOPCTRL level */
	topctrl_flush(priv);

	/* Disable the UniMAC RX/TX */
1966
	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1967 1968

	/* Enable RBUF 2bytes alignment and Receive Status Block */
1969
	rbuf_init(priv);
1970 1971

	/* Set maximum frame length */
1972 1973 1974 1975
	if (!priv->is_lite)
		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
	else
		gib_set_pad_extension(priv);
1976

1977 1978 1979 1980 1981
	/* Apply features again in case we changed them while interface was
	 * down
	 */
	bcm_sysport_set_features(dev, dev->features);

1982 1983 1984
	/* Set MAC address */
	umac_set_hw_addr(priv, dev->dev_addr);

1985 1986 1987
	phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
				0, priv->phy_interface);
	if (!phydev) {
1988
		netdev_err(dev, "could not attach to PHY\n");
1989 1990
		ret = -ENODEV;
		goto out_clk_disable;
1991 1992 1993 1994 1995 1996 1997 1998
	}

	/* Reset house keeping link status */
	priv->old_duplex = -1;
	priv->old_link = -1;
	priv->old_pause = -1;

	/* mask all interrupts and request them */
1999
	bcm_sysport_mask_all_intrs(priv);
2000 2001 2002 2003 2004 2005 2006

	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
	if (ret) {
		netdev_err(dev, "failed to request RX interrupt\n");
		goto out_phy_disconnect;
	}

2007 2008 2009 2010 2011 2012 2013
	if (!priv->is_lite) {
		ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
				  dev->name, dev);
		if (ret) {
			netdev_err(dev, "failed to request TX interrupt\n");
			goto out_free_irq0;
		}
2014 2015 2016
	}

	/* Initialize both hardware and software ring */
2017
	spin_lock_init(&priv->desc_lock);
2018 2019 2020 2021
	for (i = 0; i < dev->num_tx_queues; i++) {
		ret = bcm_sysport_init_tx_ring(priv, i);
		if (ret) {
			netdev_err(dev, "failed to initialize TX ring %d\n",
2022
				   i);
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
			goto out_free_tx_ring;
		}
	}

	/* Initialize linked-list */
	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);

	/* Initialize RX ring */
	ret = bcm_sysport_init_rx_ring(priv);
	if (ret) {
		netdev_err(dev, "failed to initialize RX ring\n");
		goto out_free_rx_ring;
	}

	/* Turn on RDMA */
	ret = rdma_enable_set(priv, 1);
	if (ret)
		goto out_free_rx_ring;

	/* Turn on TDMA */
	ret = tdma_enable_set(priv, 1);
	if (ret)
		goto out_clear_rx_int;

	/* Turn on UniMAC TX/RX */
2048
	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2049

2050
	bcm_sysport_netif_start(dev);
2051

2052 2053
	netif_tx_start_all_queues(dev);

2054 2055 2056 2057 2058 2059 2060 2061 2062
	return 0;

out_clear_rx_int:
	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
out_free_rx_ring:
	bcm_sysport_fini_rx_ring(priv);
out_free_tx_ring:
	for (i = 0; i < dev->num_tx_queues; i++)
		bcm_sysport_fini_tx_ring(priv, i);
2063 2064
	if (!priv->is_lite)
		free_irq(priv->irq1, dev);
2065 2066 2067
out_free_irq0:
	free_irq(priv->irq0, dev);
out_phy_disconnect:
2068
	phy_disconnect(phydev);
2069 2070
out_clk_disable:
	clk_disable_unprepare(priv->clk);
2071 2072 2073
	return ret;
}

2074
static void bcm_sysport_netif_stop(struct net_device *dev)
2075 2076 2077 2078
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);

	/* stop all software from updating hardware */
2079
	netif_tx_disable(dev);
2080
	napi_disable(&priv->napi);
2081
	cancel_work_sync(&priv->dim.dim.work);
2082
	phy_stop(dev->phydev);
2083 2084

	/* mask all interrupts */
2085
	bcm_sysport_mask_all_intrs(priv);
2086 2087 2088 2089 2090 2091 2092 2093 2094
}

static int bcm_sysport_stop(struct net_device *dev)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	unsigned int i;
	int ret;

	bcm_sysport_netif_stop(dev);
2095 2096

	/* Disable UniMAC RX */
2097
	umac_enable_set(priv, CMD_RX_EN, 0);
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114

	ret = tdma_enable_set(priv, 0);
	if (ret) {
		netdev_err(dev, "timeout disabling RDMA\n");
		return ret;
	}

	/* Wait for a maximum packet size to be drained */
	usleep_range(2000, 3000);

	ret = rdma_enable_set(priv, 0);
	if (ret) {
		netdev_err(dev, "timeout disabling TDMA\n");
		return ret;
	}

	/* Disable UniMAC TX */
2115
	umac_enable_set(priv, CMD_TX_EN, 0);
2116 2117 2118 2119 2120 2121 2122

	/* Free RX/TX rings SW structures */
	for (i = 0; i < dev->num_tx_queues; i++)
		bcm_sysport_fini_tx_ring(priv, i);
	bcm_sysport_fini_rx_ring(priv);

	free_irq(priv->irq0, dev);
2123 2124
	if (!priv->is_lite)
		free_irq(priv->irq1, dev);
2125 2126

	/* Disconnect from PHY */
2127
	phy_disconnect(dev->phydev);
2128

2129 2130
	clk_disable_unprepare(priv->clk);

2131 2132 2133
	return 0;
}

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
				 u64 location)
{
	unsigned int index;
	u32 reg;

	for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
		reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
		reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
		reg &= RXCHK_BRCM_TAG_CID_MASK;
		if (reg == location)
			return index;
	}

	return -EINVAL;
}

static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
				struct ethtool_rxnfc *nfc)
{
	int index;

	/* This is not a rule that we know about */
	index = bcm_sysport_rule_find(priv, nfc->fs.location);
	if (index < 0)
		return -EOPNOTSUPP;

	nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;

	return 0;
}

static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
				struct ethtool_rxnfc *nfc)
{
	unsigned int index;
	u32 reg;

	/* We cannot match locations greater than what the classification ID
	 * permits (256 entries)
	 */
	if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
		return -E2BIG;

	/* We cannot support flows that are not destined for a wake-up */
	if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
		return -EOPNOTSUPP;

	/* All filters are already in use, we cannot match more rules */
	if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
	    RXCHK_BRCM_TAG_MAX)
		return -ENOSPC;

	index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2188
	if (index >= RXCHK_BRCM_TAG_MAX)
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
		return -ENOSPC;

	/* Location is the classification ID, and index is the position
	 * within one of our 8 possible filters to be programmed
	 */
	reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
	reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
	reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
	rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
	rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));

2200
	priv->filters_loc[index] = nfc->fs.location;
2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
	set_bit(index, priv->filters);

	return 0;
}

static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
				u64 location)
{
	int index;

	/* This is not a rule that we know about */
	index = bcm_sysport_rule_find(priv, location);
	if (index < 0)
		return -EOPNOTSUPP;

	/* No need to disable this filter if it was enabled, this will
	 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
	 */
	clear_bit(index, priv->filters);
2220
	priv->filters_loc[index] = 0;
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261

	return 0;
}

static int bcm_sysport_get_rxnfc(struct net_device *dev,
				 struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	int ret = -EOPNOTSUPP;

	switch (nfc->cmd) {
	case ETHTOOL_GRXCLSRULE:
		ret = bcm_sysport_rule_get(priv, nfc);
		break;
	default:
		break;
	}

	return ret;
}

static int bcm_sysport_set_rxnfc(struct net_device *dev,
				 struct ethtool_rxnfc *nfc)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	int ret = -EOPNOTSUPP;

	switch (nfc->cmd) {
	case ETHTOOL_SRXCLSRLINS:
		ret = bcm_sysport_rule_set(priv, nfc);
		break;
	case ETHTOOL_SRXCLSRLDEL:
		ret = bcm_sysport_rule_del(priv, nfc->fs.location);
		break;
	default:
		break;
	}

	return ret;
}

2262
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2263 2264 2265
	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
				     ETHTOOL_COALESCE_MAX_FRAMES |
				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2266 2267 2268 2269 2270 2271 2272
	.get_drvinfo		= bcm_sysport_get_drvinfo,
	.get_msglevel		= bcm_sysport_get_msglvl,
	.set_msglevel		= bcm_sysport_set_msglvl,
	.get_link		= ethtool_op_get_link,
	.get_strings		= bcm_sysport_get_strings,
	.get_ethtool_stats	= bcm_sysport_get_stats,
	.get_sset_count		= bcm_sysport_get_sset_count,
2273 2274
	.get_wol		= bcm_sysport_get_wol,
	.set_wol		= bcm_sysport_set_wol,
2275 2276
	.get_coalesce		= bcm_sysport_get_coalesce,
	.set_coalesce		= bcm_sysport_set_coalesce,
2277 2278
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
2279 2280
	.get_rxnfc		= bcm_sysport_get_rxnfc,
	.set_rxnfc		= bcm_sysport_set_rxnfc,
2281 2282
};

2283
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2284
				    struct net_device *sb_dev)
2285 2286 2287 2288 2289 2290 2291
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	u16 queue = skb_get_queue_mapping(skb);
	struct bcm_sysport_tx_ring *tx_ring;
	unsigned int q, port;

	if (!netdev_uses_dsa(dev))
2292
		return netdev_pick_tx(dev, skb, NULL);
2293 2294 2295 2296 2297 2298

	/* DSA tagging layer will have configured the correct queue */
	q = BRCM_TAG_GET_QUEUE(queue);
	port = BRCM_TAG_GET_PORT(queue);
	tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];

2299
	if (unlikely(!tx_ring))
2300
		return netdev_pick_tx(dev, skb, NULL);
2301

2302 2303 2304
	return tx_ring->index;
}

2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
static const struct net_device_ops bcm_sysport_netdev_ops = {
	.ndo_start_xmit		= bcm_sysport_xmit,
	.ndo_tx_timeout		= bcm_sysport_tx_timeout,
	.ndo_open		= bcm_sysport_open,
	.ndo_stop		= bcm_sysport_stop,
	.ndo_set_features	= bcm_sysport_set_features,
	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
	.ndo_set_mac_address	= bcm_sysport_change_mac,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= bcm_sysport_poll_controller,
#endif
	.ndo_get_stats64	= bcm_sysport_get_stats64,
	.ndo_select_queue	= bcm_sysport_select_queue,
};

2320
static int bcm_sysport_map_queues(struct notifier_block *nb,
2321 2322 2323
				  struct dsa_notifier_register_info *info)
{
	struct bcm_sysport_tx_ring *ring;
2324
	struct bcm_sysport_priv *priv;
2325 2326
	struct net_device *slave_dev;
	unsigned int num_tx_queues;
2327
	unsigned int q, qp, port;
2328 2329 2330 2331 2332 2333 2334
	struct net_device *dev;

	priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
	if (priv->netdev != info->master)
		return 0;

	dev = info->master;
2335 2336 2337 2338 2339 2340 2341

	/* We can't be setting up queue inspection for non directly attached
	 * switches
	 */
	if (info->switch_number)
		return 0;

2342 2343 2344
	if (dev->netdev_ops != &bcm_sysport_netdev_ops)
		return 0;

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
	port = info->port_number;
	slave_dev = info->info.dev;

	/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
	 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
	 * per-port (slave_dev) network devices queue, we achieve just that.
	 * This need to happen now before any slave network device is used such
	 * it accurately reflects the number of real TX queues.
	 */
	if (priv->is_lite)
		netif_set_real_num_tx_queues(slave_dev,
					     slave_dev->num_tx_queues / 2);
2357

2358 2359 2360 2361
	num_tx_queues = slave_dev->real_num_tx_queues;

	if (priv->per_port_num_tx_queues &&
	    priv->per_port_num_tx_queues != num_tx_queues)
2362
		netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2363 2364 2365

	priv->per_port_num_tx_queues = num_tx_queues;

2366 2367 2368 2369 2370 2371
	for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
	     q++) {
		ring = &priv->tx_rings[q];

		if (ring->inspect)
			continue;
2372 2373 2374 2375

		/* Just remember the mapping actual programming done
		 * during bcm_sysport_init_tx_ring
		 */
2376
		ring->switch_queue = qp;
2377
		ring->switch_port = port;
2378
		ring->inspect = true;
2379
		priv->ring_map[qp + port * num_tx_queues] = ring;
2380
		qp++;
2381 2382 2383 2384 2385
	}

	return 0;
}

2386 2387 2388 2389 2390 2391 2392 2393
static int bcm_sysport_unmap_queues(struct notifier_block *nb,
				    struct dsa_notifier_register_info *info)
{
	struct bcm_sysport_tx_ring *ring;
	struct bcm_sysport_priv *priv;
	struct net_device *slave_dev;
	unsigned int num_tx_queues;
	struct net_device *dev;
2394
	unsigned int q, qp, port;
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419

	priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
	if (priv->netdev != info->master)
		return 0;

	dev = info->master;

	if (dev->netdev_ops != &bcm_sysport_netdev_ops)
		return 0;

	port = info->port_number;
	slave_dev = info->info.dev;

	num_tx_queues = slave_dev->real_num_tx_queues;

	for (q = 0; q < dev->num_tx_queues; q++) {
		ring = &priv->tx_rings[q];

		if (ring->switch_port != port)
			continue;

		if (!ring->inspect)
			continue;

		ring->inspect = false;
2420 2421
		qp = ring->switch_queue;
		priv->ring_map[qp + port * num_tx_queues] = NULL;
2422 2423 2424 2425 2426
	}

	return 0;
}

2427
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
2428 2429
				    unsigned long event, void *ptr)
{
2430
	int ret = NOTIFY_DONE;
2431

2432 2433 2434 2435 2436 2437 2438 2439
	switch (event) {
	case DSA_PORT_REGISTER:
		ret = bcm_sysport_map_queues(nb, ptr);
		break;
	case DSA_PORT_UNREGISTER:
		ret = bcm_sysport_unmap_queues(nb, ptr);
		break;
	}
2440

2441
	return notifier_from_errno(ret);
2442 2443
}

2444 2445
#define REV_FMT	"v%2x.%02x"

2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
	[SYSTEMPORT] = {
		.is_lite = false,
		.num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
	},
	[SYSTEMPORT_LITE] = {
		.is_lite = true,
		.num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
	},
};

static const struct of_device_id bcm_sysport_of_match[] = {
	{ .compatible = "brcm,systemportlite-v1.00",
	  .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
	{ .compatible = "brcm,systemport-v1.00",
	  .data = &bcm_sysport_params[SYSTEMPORT] },
	{ .compatible = "brcm,systemport",
	  .data = &bcm_sysport_params[SYSTEMPORT] },
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);

2468 2469
static int bcm_sysport_probe(struct platform_device *pdev)
{
2470 2471
	const struct bcm_sysport_hw_params *params;
	const struct of_device_id *of_id = NULL;
2472 2473 2474 2475 2476 2477 2478 2479
	struct bcm_sysport_priv *priv;
	struct device_node *dn;
	struct net_device *dev;
	const void *macaddr;
	u32 txq, rxq;
	int ret;

	dn = pdev->dev.of_node;
2480 2481 2482 2483
	of_id = of_match_node(bcm_sysport_of_match, dn);
	if (!of_id || !of_id->data)
		return -EINVAL;

2484 2485 2486 2487 2488 2489 2490 2491
	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
	if (ret)
		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
		return ret;
	}

2492 2493
	/* Fairly quickly we need to know the type of adapter we have */
	params = of_id->data;
2494 2495 2496 2497 2498 2499 2500

	/* Read the Transmit/Receive Queue properties */
	if (of_property_read_u32(dn, "systemport,num-txq", &txq))
		txq = TDMA_NUM_RINGS;
	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
		rxq = 1;

2501 2502 2503 2504
	/* Sanity check the number of transmit queues */
	if (!txq || txq > TDMA_NUM_RINGS)
		return -EINVAL;

2505 2506 2507 2508 2509 2510 2511
	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
	if (!dev)
		return -ENOMEM;

	/* Initialize private members */
	priv = netdev_priv(dev);

2512
	priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2513 2514 2515 2516
	if (IS_ERR(priv->clk)) {
		ret = PTR_ERR(priv->clk);
		goto err_free_netdev;
	}
2517

2518 2519 2520 2521
	/* Allocate number of TX rings */
	priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
				      sizeof(struct bcm_sysport_tx_ring),
				      GFP_KERNEL);
2522 2523 2524 2525
	if (!priv->tx_rings) {
		ret = -ENOMEM;
		goto err_free_netdev;
	}
2526

2527 2528 2529
	priv->is_lite = params->is_lite;
	priv->num_rx_desc_words = params->num_rx_desc_words;

2530
	priv->irq0 = platform_get_irq(pdev, 0);
2531
	if (!priv->is_lite) {
2532
		priv->irq1 = platform_get_irq(pdev, 1);
2533 2534 2535 2536
		priv->wol_irq = platform_get_irq(pdev, 2);
	} else {
		priv->wol_irq = platform_get_irq(pdev, 1);
	}
2537
	if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2538
		ret = -EINVAL;
2539
		goto err_free_netdev;
2540 2541
	}

2542
	priv->base = devm_platform_ioremap_resource(pdev, 0);
2543 2544
	if (IS_ERR(priv->base)) {
		ret = PTR_ERR(priv->base);
2545
		goto err_free_netdev;
2546 2547 2548 2549 2550
	}

	priv->netdev = dev;
	priv->pdev = pdev;

2551
	ret = of_get_phy_mode(dn, &priv->phy_interface);
2552
	/* Default to GMII interface mode */
2553
	if (ret)
2554 2555
		priv->phy_interface = PHY_INTERFACE_MODE_GMII;

2556 2557 2558 2559 2560 2561 2562
	/* In the case of a fixed PHY, the DT node associated
	 * to the PHY is the Ethernet MAC DT node.
	 */
	if (of_phy_is_fixed_link(dn)) {
		ret = of_phy_register_fixed_link(dn);
		if (ret) {
			dev_err(&pdev->dev, "failed to register fixed PHY\n");
2563
			goto err_free_netdev;
2564 2565 2566 2567 2568
		}

		priv->phy_dn = dn;
	}

2569 2570
	/* Initialize netdevice members */
	macaddr = of_get_mac_address(dn);
2571
	if (IS_ERR(macaddr)) {
2572
		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2573
		eth_hw_addr_random(dev);
2574 2575 2576 2577 2578 2579
	} else {
		ether_addr_copy(dev->dev_addr, macaddr);
	}

	SET_NETDEV_DEV(dev, &pdev->dev);
	dev_set_drvdata(&pdev->dev, dev);
2580
	dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2581 2582 2583
	dev->netdev_ops = &bcm_sysport_netdev_ops;
	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);

2584
	dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2585 2586
			 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			 NETIF_F_HW_VLAN_CTAG_TX;
2587 2588
	dev->hw_features |= dev->features;
	dev->vlan_features |= dev->features;
2589
	dev->max_mtu = UMAC_MAX_MTU_SIZE;
2590

2591 2592 2593
	/* Request the WOL interrupt and advertise suspend if available */
	priv->wol_irq_disabled = 1;
	ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2594
			       bcm_sysport_wol_isr, 0, dev->name, priv);
2595 2596 2597
	if (!ret)
		device_set_wakeup_capable(&pdev->dev, 1);

2598 2599 2600 2601
	priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
	if (IS_ERR(priv->wol_clk))
		return PTR_ERR(priv->wol_clk);

2602
	/* Set the needed headroom once and for all */
2603 2604
	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
	dev->needed_headroom += sizeof(struct bcm_tsb);
2605

2606 2607 2608
	/* libphy will adjust the link state accordingly */
	netif_carrier_off(dev);

2609
	priv->rx_max_coalesced_frames = 1;
2610 2611
	u64_stats_init(&priv->syncp);

2612 2613 2614 2615 2616 2617 2618 2619
	priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;

	ret = register_dsa_notifier(&priv->dsa_notifier);
	if (ret) {
		dev_err(&pdev->dev, "failed to register DSA notifier\n");
		goto err_deregister_fixed_link;
	}

2620 2621 2622
	ret = register_netdev(dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to register net_device\n");
2623
		goto err_deregister_notifier;
2624 2625
	}

2626 2627
	clk_prepare_enable(priv->clk);

2628 2629
	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
	dev_info(&pdev->dev,
2630 2631
		 "Broadcom SYSTEMPORT%s " REV_FMT
		 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2632
		 priv->is_lite ? " Lite" : "",
2633
		 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2634
		 priv->irq0, priv->irq1, txq, rxq);
2635

2636 2637
	clk_disable_unprepare(priv->clk);

2638
	return 0;
2639

2640 2641
err_deregister_notifier:
	unregister_dsa_notifier(&priv->dsa_notifier);
2642 2643 2644 2645
err_deregister_fixed_link:
	if (of_phy_is_fixed_link(dn))
		of_phy_deregister_fixed_link(dn);
err_free_netdev:
2646 2647 2648 2649 2650 2651 2652
	free_netdev(dev);
	return ret;
}

static int bcm_sysport_remove(struct platform_device *pdev)
{
	struct net_device *dev = dev_get_drvdata(&pdev->dev);
2653
	struct bcm_sysport_priv *priv = netdev_priv(dev);
2654
	struct device_node *dn = pdev->dev.of_node;
2655 2656 2657 2658

	/* Not much to do, ndo_close has been called
	 * and we use managed allocations
	 */
2659
	unregister_dsa_notifier(&priv->dsa_notifier);
2660
	unregister_netdev(dev);
2661 2662
	if (of_phy_is_fixed_link(dn))
		of_phy_deregister_fixed_link(dn);
2663 2664 2665 2666 2667 2668
	free_netdev(dev);
	dev_set_drvdata(&pdev->dev, NULL);

	return 0;
}

2669 2670 2671 2672
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
{
	struct net_device *ndev = priv->netdev;
	unsigned int timeout = 1000;
2673
	unsigned int index, i = 0;
2674 2675 2676
	u32 reg;

	reg = umac_readl(priv, UMAC_MPD_CTRL);
2677 2678
	if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
		reg |= MPD_EN;
2679
	reg &= ~PSW_EN;
2680 2681 2682 2683 2684 2685
	if (priv->wolopts & WAKE_MAGICSECURE) {
		/* Program the SecureOn password */
		umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
			    UMAC_PSW_MS);
		umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
			    UMAC_PSW_LS);
2686
		reg |= PSW_EN;
2687
	}
2688 2689
	umac_writel(priv, reg, UMAC_MPD_CTRL);

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
	if (priv->wolopts & WAKE_FILTER) {
		/* Turn on ACPI matching to steal packets from RBUF */
		reg = rbuf_readl(priv, RBUF_CONTROL);
		if (priv->is_lite)
			reg |= RBUF_ACPI_EN_LITE;
		else
			reg |= RBUF_ACPI_EN;
		rbuf_writel(priv, reg, RBUF_CONTROL);

		/* Enable RXCHK, active filters and Broadcom tag matching */
		reg = rxchk_readl(priv, RXCHK_CONTROL);
		reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
			 RXCHK_BRCM_TAG_MATCH_SHIFT);
		for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
			reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
			i++;
		}
		reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
		rxchk_writel(priv, reg, RXCHK_CONTROL);
	}

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721
	/* Make sure RBUF entered WoL mode as result */
	do {
		reg = rbuf_readl(priv, RBUF_STATUS);
		if (reg & RBUF_WOL_MODE)
			break;

		udelay(10);
	} while (timeout-- > 0);

	/* Do not leave the UniMAC RBUF matching only MPD packets */
	if (!timeout) {
2722
		mpd_enable_set(priv, false);
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734
		netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
		return -ETIMEDOUT;
	}

	/* UniMAC receive needs to be turned on */
	umac_enable_set(priv, CMD_RX_EN, 1);

	netif_dbg(priv, wol, ndev, "entered WOL mode\n");

	return 0;
}

2735
static int __maybe_unused bcm_sysport_suspend(struct device *d)
2736 2737 2738 2739
{
	struct net_device *dev = dev_get_drvdata(d);
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	unsigned int i;
2740
	int ret = 0;
2741 2742 2743 2744 2745
	u32 reg;

	if (!netif_running(dev))
		return 0;

2746 2747
	netif_device_detach(dev);

2748 2749
	bcm_sysport_netif_stop(dev);

2750
	phy_suspend(dev->phydev);
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761

	/* Disable UniMAC RX */
	umac_enable_set(priv, CMD_RX_EN, 0);

	ret = rdma_enable_set(priv, 0);
	if (ret) {
		netdev_err(dev, "RDMA timeout!\n");
		return ret;
	}

	/* Disable RXCHK if enabled */
2762
	if (priv->rx_chk_en) {
2763 2764 2765 2766 2767 2768
		reg = rxchk_readl(priv, RXCHK_CONTROL);
		reg &= ~RXCHK_EN;
		rxchk_writel(priv, reg, RXCHK_CONTROL);
	}

	/* Flush RX pipe */
2769 2770
	if (!priv->wolopts)
		topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789

	ret = tdma_enable_set(priv, 0);
	if (ret) {
		netdev_err(dev, "TDMA timeout!\n");
		return ret;
	}

	/* Wait for a packet boundary */
	usleep_range(2000, 3000);

	umac_enable_set(priv, CMD_TX_EN, 0);

	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);

	/* Free RX/TX rings SW structures */
	for (i = 0; i < dev->num_tx_queues; i++)
		bcm_sysport_fini_tx_ring(priv, i);
	bcm_sysport_fini_rx_ring(priv);

2790
	/* Get prepared for Wake-on-LAN */
2791 2792
	if (device_may_wakeup(d) && priv->wolopts) {
		clk_prepare_enable(priv->wol_clk);
2793
		ret = bcm_sysport_suspend_to_wol(priv);
2794
	}
2795

2796 2797
	clk_disable_unprepare(priv->clk);

2798
	return ret;
2799 2800
}

2801
static int __maybe_unused bcm_sysport_resume(struct device *d)
2802 2803 2804 2805 2806 2807 2808 2809 2810
{
	struct net_device *dev = dev_get_drvdata(d);
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	unsigned int i;
	int ret;

	if (!netif_running(dev))
		return 0;

2811
	clk_prepare_enable(priv->clk);
2812 2813
	if (priv->wolopts)
		clk_disable_unprepare(priv->wol_clk);
2814

2815 2816
	umac_reset(priv);

2817 2818 2819
	/* Disable the UniMAC RX/TX */
	umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);

2820 2821 2822 2823 2824
	/* We may have been suspended and never received a WOL event that
	 * would turn off MPD detection, take care of that now
	 */
	bcm_sysport_resume_from_wol(priv);

2825 2826 2827 2828 2829
	/* Initialize both hardware and software ring */
	for (i = 0; i < dev->num_tx_queues; i++) {
		ret = bcm_sysport_init_tx_ring(priv, i);
		if (ret) {
			netdev_err(dev, "failed to initialize TX ring %d\n",
2830
				   i);
2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
			goto out_free_tx_rings;
		}
	}

	/* Initialize linked-list */
	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);

	/* Initialize RX ring */
	ret = bcm_sysport_init_rx_ring(priv);
	if (ret) {
		netdev_err(dev, "failed to initialize RX ring\n");
		goto out_free_rx_ring;
	}

	/* RX pipe enable */
	topctrl_writel(priv, 0, RX_FLUSH_CNTL);

	ret = rdma_enable_set(priv, 1);
	if (ret) {
		netdev_err(dev, "failed to enable RDMA\n");
		goto out_free_rx_ring;
	}

2854 2855
	/* Restore enabled features */
	bcm_sysport_set_features(dev, dev->features);
2856 2857 2858 2859

	rbuf_init(priv);

	/* Set maximum frame length */
2860 2861 2862 2863
	if (!priv->is_lite)
		umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
	else
		gib_set_pad_extension(priv);
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880

	/* Set MAC address */
	umac_set_hw_addr(priv, dev->dev_addr);

	umac_enable_set(priv, CMD_RX_EN, 1);

	/* TX pipe enable */
	topctrl_writel(priv, 0, TX_FLUSH_CNTL);

	umac_enable_set(priv, CMD_TX_EN, 1);

	ret = tdma_enable_set(priv, 1);
	if (ret) {
		netdev_err(dev, "TDMA timeout!\n");
		goto out_free_rx_ring;
	}

2881
	phy_resume(dev->phydev);
2882 2883 2884

	bcm_sysport_netif_start(dev);

2885 2886
	netif_device_attach(dev);

2887 2888 2889 2890 2891 2892 2893
	return 0;

out_free_rx_ring:
	bcm_sysport_fini_rx_ring(priv);
out_free_tx_rings:
	for (i = 0; i < dev->num_tx_queues; i++)
		bcm_sysport_fini_tx_ring(priv, i);
2894
	clk_disable_unprepare(priv->clk);
2895 2896 2897 2898 2899 2900
	return ret;
}

static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
		bcm_sysport_suspend, bcm_sysport_resume);

2901 2902 2903 2904 2905 2906
static struct platform_driver bcm_sysport_driver = {
	.probe	= bcm_sysport_probe,
	.remove	= bcm_sysport_remove,
	.driver =  {
		.name = "brcm-systemport",
		.of_match_table = bcm_sysport_of_match,
2907
		.pm = &bcm_sysport_pm_ops,
2908 2909 2910 2911 2912 2913 2914 2915
	},
};
module_platform_driver(bcm_sysport_driver);

MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
MODULE_ALIAS("platform:brcm-systemport");
MODULE_LICENSE("GPL");