mtk_eth_soc.c 58.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; version 2 of the License
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
 *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
 *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
 */

#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
21
#include <linux/pm_runtime.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>

#include "mtk_eth_soc.h"

static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");

#define MTK_ETHTOOL_STAT(x) { #x, \
			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }

/* strings used by ethtool */
static const struct mtk_ethtool_stats {
	char str[ETH_GSTRING_LEN];
	u32 offset;
} mtk_ethtool_stats[] = {
	MTK_ETHTOOL_STAT(tx_bytes),
	MTK_ETHTOOL_STAT(tx_packets),
	MTK_ETHTOOL_STAT(tx_skip),
	MTK_ETHTOOL_STAT(tx_collisions),
	MTK_ETHTOOL_STAT(rx_bytes),
	MTK_ETHTOOL_STAT(rx_packets),
	MTK_ETHTOOL_STAT(rx_overflow),
	MTK_ETHTOOL_STAT(rx_fcs_errors),
	MTK_ETHTOOL_STAT(rx_short_errors),
	MTK_ETHTOOL_STAT(rx_long_errors),
	MTK_ETHTOOL_STAT(rx_checksum_errors),
	MTK_ETHTOOL_STAT(rx_flow_control_packets),
};

54
static const char * const mtk_clks_source_name[] = {
55
	"ethif", "esw", "gp1", "gp2", "trgpll"
56 57
};

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
	__raw_writel(val, eth->base + reg);
}

u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
{
	return __raw_readl(eth->base + reg);
}

static int mtk_mdio_busy_wait(struct mtk_eth *eth)
{
	unsigned long t_start = jiffies;

	while (1) {
		if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
			return 0;
		if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
			break;
		usleep_range(10, 20);
	}

	dev_err(eth->dev, "mdio: MDIO timeout\n");
	return -1;
}

84 85
static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
			   u32 phy_register, u32 write_data)
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
{
	if (mtk_mdio_busy_wait(eth))
		return -1;

	write_data &= 0xffff;

	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
		(phy_register << PHY_IAC_REG_SHIFT) |
		(phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
		MTK_PHY_IAC);

	if (mtk_mdio_busy_wait(eth))
		return -1;

	return 0;
}

103
static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
{
	u32 d;

	if (mtk_mdio_busy_wait(eth))
		return 0xffff;

	mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
		(phy_reg << PHY_IAC_REG_SHIFT) |
		(phy_addr << PHY_IAC_ADDR_SHIFT),
		MTK_PHY_IAC);

	if (mtk_mdio_busy_wait(eth))
		return 0xffff;

	d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;

	return d;
}

static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
			  int phy_reg, u16 val)
{
	struct mtk_eth *eth = bus->priv;

	return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
}

static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
{
	struct mtk_eth *eth = bus->priv;

	return _mtk_mdio_read(eth, phy_addr, phy_reg);
}

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
{
	u32 val;
	int ret;

	val = (speed == SPEED_1000) ?
		INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
	mtk_w32(eth, val, INTF_MODE);

	regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
			   ETHSYS_TRGMII_CLK_SEL362_5,
			   ETHSYS_TRGMII_CLK_SEL362_5);

	val = (speed == SPEED_1000) ? 250000000 : 500000000;
	ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
	if (ret)
		dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);

	val = (speed == SPEED_1000) ?
		RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
	mtk_w32(eth, val, TRGMII_RCK_CTRL);

	val = (speed == SPEED_1000) ?
		TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
	mtk_w32(eth, val, TRGMII_TCK_CTRL);
}

165 166 167
static void mtk_phy_link_adjust(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
168 169
	u16 lcl_adv = 0, rmt_adv = 0;
	u8 flowctrl;
170 171 172 173 174
	u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
		  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
		  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
		  MAC_MCR_BACKPR_EN;

175 176 177
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return;

178
	switch (dev->phydev->speed) {
179 180 181 182 183 184 185 186
	case SPEED_1000:
		mcr |= MAC_MCR_SPEED_1000;
		break;
	case SPEED_100:
		mcr |= MAC_MCR_SPEED_100;
		break;
	};

187
	if (mac->id == 0 && !mac->trgmii)
188
		mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
189

190
	if (dev->phydev->link)
191 192
		mcr |= MAC_MCR_FORCE_LINK;

193
	if (dev->phydev->duplex) {
194 195
		mcr |= MAC_MCR_FORCE_DPX;

196
		if (dev->phydev->pause)
197
			rmt_adv = LPA_PAUSE_CAP;
198
		if (dev->phydev->asym_pause)
199 200
			rmt_adv |= LPA_PAUSE_ASYM;

201
		if (dev->phydev->advertising & ADVERTISED_Pause)
202
			lcl_adv |= ADVERTISE_PAUSE_CAP;
203
		if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
204 205 206 207 208 209 210 211 212 213 214 215 216
			lcl_adv |= ADVERTISE_PAUSE_ASYM;

		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);

		if (flowctrl & FLOW_CTRL_TX)
			mcr |= MAC_MCR_FORCE_TX_FC;
		if (flowctrl & FLOW_CTRL_RX)
			mcr |= MAC_MCR_FORCE_RX_FC;

		netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
			  flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
			  flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
	}
217 218 219

	mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));

220
	if (dev->phydev->link)
221 222 223 224 225 226 227 228 229
		netif_carrier_on(dev);
	else
		netif_carrier_off(dev);
}

static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
				struct device_node *phy_node)
{
	struct phy_device *phydev;
230
	int phy_mode;
231 232 233 234 235 236 237 238 239

	phy_mode = of_get_phy_mode(phy_node);
	if (phy_mode < 0) {
		dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
		return -EINVAL;
	}

	phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
				mtk_phy_link_adjust, 0, phy_mode);
240
	if (!phydev) {
241
		dev_err(eth->dev, "could not connect to PHY\n");
242
		return -ENODEV;
243 244 245 246 247 248 249 250 251 252
	}

	dev_info(eth->dev,
		 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
		 mac->id, phydev_name(phydev), phydev->phy_id,
		 phydev->drv->name);

	return 0;
}

253
static int mtk_phy_connect(struct net_device *dev)
254
{
255 256
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth;
257
	struct device_node *np;
258
	u32 val;
259

260
	eth = mac->hw;
261
	np = of_parse_phandle(mac->of_node, "phy-handle", 0);
262 263 264
	if (!np && of_phy_is_fixed_link(mac->of_node))
		if (!of_phy_register_fixed_link(mac->of_node))
			np = of_node_get(mac->of_node);
265 266 267 268
	if (!np)
		return -ENODEV;

	switch (of_get_phy_mode(np)) {
269 270
	case PHY_INTERFACE_MODE_TRGMII:
		mac->trgmii = true;
271 272 273
	case PHY_INTERFACE_MODE_RGMII_TXID:
	case PHY_INTERFACE_MODE_RGMII_RXID:
	case PHY_INTERFACE_MODE_RGMII_ID:
274
	case PHY_INTERFACE_MODE_RGMII:
275
		mac->ge_mode = 0;
276 277
		break;
	case PHY_INTERFACE_MODE_MII:
278
		mac->ge_mode = 1;
279
		break;
280
	case PHY_INTERFACE_MODE_REVMII:
281
		mac->ge_mode = 2;
282
		break;
283 284 285
	case PHY_INTERFACE_MODE_RMII:
		if (!mac->id)
			goto err_phy;
286
		mac->ge_mode = 3;
287
		break;
288
	default:
289
		goto err_phy;
290 291 292 293 294
	}

	/* put the gmac into the right mode */
	regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
	val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
295
	val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
296 297
	regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);

298
	/* couple phydev to net_device */
299
	mtk_phy_connect_node(eth, mac, np);
300 301 302
	dev->phydev->autoneg = AUTONEG_ENABLE;
	dev->phydev->speed = 0;
	dev->phydev->duplex = 0;
303 304

	if (of_phy_is_fixed_link(mac->of_node))
305
		dev->phydev->supported |=
306 307
		SUPPORTED_Pause | SUPPORTED_Asym_Pause;

308
	dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
309
				   SUPPORTED_Asym_Pause;
310
	dev->phydev->advertising = dev->phydev->supported |
311
				    ADVERTISED_Autoneg;
312
	phy_start_aneg(dev->phydev);
313

314 315
	of_node_put(np);

316
	return 0;
317 318 319 320 321

err_phy:
	of_node_put(np);
	dev_err(eth->dev, "invalid phy_mode\n");
	return -EINVAL;
322 323 324 325 326
}

static int mtk_mdio_init(struct mtk_eth *eth)
{
	struct device_node *mii_np;
327
	int ret;
328 329 330 331 332 333 334 335

	mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
	if (!mii_np) {
		dev_err(eth->dev, "no %s child node found", "mdio-bus");
		return -ENODEV;
	}

	if (!of_device_is_available(mii_np)) {
336
		ret = -ENODEV;
337 338 339
		goto err_put_node;
	}

340
	eth->mii_bus = devm_mdiobus_alloc(eth->dev);
341
	if (!eth->mii_bus) {
342
		ret = -ENOMEM;
343 344 345 346 347 348 349 350 351 352
		goto err_put_node;
	}

	eth->mii_bus->name = "mdio";
	eth->mii_bus->read = mtk_mdio_read;
	eth->mii_bus->write = mtk_mdio_write;
	eth->mii_bus->priv = eth;
	eth->mii_bus->parent = eth->dev;

	snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
353
	ret = of_mdiobus_register(eth->mii_bus, mii_np);
354 355 356

err_put_node:
	of_node_put(mii_np);
357
	return ret;
358 359 360 361 362 363 364 365 366 367
}

static void mtk_mdio_cleanup(struct mtk_eth *eth)
{
	if (!eth->mii_bus)
		return;

	mdiobus_unregister(eth->mii_bus);
}

368 369
static inline void mtk_irq_disable(struct mtk_eth *eth,
				   unsigned reg, u32 mask)
370
{
371
	unsigned long flags;
372 373
	u32 val;

374
	spin_lock_irqsave(&eth->irq_lock, flags);
375 376
	val = mtk_r32(eth, reg);
	mtk_w32(eth, val & ~mask, reg);
377
	spin_unlock_irqrestore(&eth->irq_lock, flags);
378 379
}

380 381
static inline void mtk_irq_enable(struct mtk_eth *eth,
				  unsigned reg, u32 mask)
382
{
383
	unsigned long flags;
384 385
	u32 val;

386
	spin_lock_irqsave(&eth->irq_lock, flags);
387 388
	val = mtk_r32(eth, reg);
	mtk_w32(eth, val | mask, reg);
389
	spin_unlock_irqrestore(&eth->irq_lock, flags);
390 391 392 393 394 395 396 397 398 399 400
}

static int mtk_set_mac_address(struct net_device *dev, void *p)
{
	int ret = eth_mac_addr(dev, p);
	struct mtk_mac *mac = netdev_priv(dev);
	const char *macaddr = dev->dev_addr;

	if (ret)
		return ret;

401 402 403
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return -EBUSY;

404
	spin_lock_bh(&mac->hw->page_lock);
405 406 407 408 409
	mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
		MTK_GDMA_MAC_ADRH(mac->id));
	mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
		(macaddr[4] << 8) | macaddr[5],
		MTK_GDMA_MAC_ADRL(mac->id));
410
	spin_unlock_bh(&mac->hw->page_lock);
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528

	return 0;
}

void mtk_stats_update_mac(struct mtk_mac *mac)
{
	struct mtk_hw_stats *hw_stats = mac->hw_stats;
	unsigned int base = MTK_GDM1_TX_GBCNT;
	u64 stats;

	base += hw_stats->reg_offset;

	u64_stats_update_begin(&hw_stats->syncp);

	hw_stats->rx_bytes += mtk_r32(mac->hw, base);
	stats =  mtk_r32(mac->hw, base + 0x04);
	if (stats)
		hw_stats->rx_bytes += (stats << 32);
	hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
	hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
	hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
	hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
	hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
	hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
	hw_stats->rx_flow_control_packets +=
					mtk_r32(mac->hw, base + 0x24);
	hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
	hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
	hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
	stats =  mtk_r32(mac->hw, base + 0x34);
	if (stats)
		hw_stats->tx_bytes += (stats << 32);
	hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
	u64_stats_update_end(&hw_stats->syncp);
}

static void mtk_stats_update(struct mtk_eth *eth)
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->mac[i] || !eth->mac[i]->hw_stats)
			continue;
		if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
			mtk_stats_update_mac(eth->mac[i]);
			spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
		}
	}
}

static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
					struct rtnl_link_stats64 *storage)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_hw_stats *hw_stats = mac->hw_stats;
	unsigned int start;

	if (netif_running(dev) && netif_device_present(dev)) {
		if (spin_trylock(&hw_stats->stats_lock)) {
			mtk_stats_update_mac(mac);
			spin_unlock(&hw_stats->stats_lock);
		}
	}

	do {
		start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
		storage->rx_packets = hw_stats->rx_packets;
		storage->tx_packets = hw_stats->tx_packets;
		storage->rx_bytes = hw_stats->rx_bytes;
		storage->tx_bytes = hw_stats->tx_bytes;
		storage->collisions = hw_stats->tx_collisions;
		storage->rx_length_errors = hw_stats->rx_short_errors +
			hw_stats->rx_long_errors;
		storage->rx_over_errors = hw_stats->rx_overflow;
		storage->rx_crc_errors = hw_stats->rx_fcs_errors;
		storage->rx_errors = hw_stats->rx_checksum_errors;
		storage->tx_aborted_errors = hw_stats->tx_skip;
	} while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));

	storage->tx_errors = dev->stats.tx_errors;
	storage->rx_dropped = dev->stats.rx_dropped;
	storage->tx_dropped = dev->stats.tx_dropped;

	return storage;
}

static inline int mtk_max_frag_size(int mtu)
{
	/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
	if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
		mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;

	return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}

static inline int mtk_max_buf_size(int frag_size)
{
	int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
		       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

	WARN_ON(buf_size < MTK_MAX_RX_LENGTH);

	return buf_size;
}

static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
				   struct mtk_rx_dma *dma_rxd)
{
	rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
	rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
	rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
	rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
}

/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
529
	dma_addr_t phy_ring_tail;
530 531 532 533 534 535
	int cnt = MTK_DMA_SIZE;
	dma_addr_t dma_addr;
	int i;

	eth->scratch_ring = dma_alloc_coherent(eth->dev,
					       cnt * sizeof(struct mtk_tx_dma),
536
					       &eth->phy_scratch_ring,
537 538 539 540 541 542
					       GFP_ATOMIC | __GFP_ZERO);
	if (unlikely(!eth->scratch_ring))
		return -ENOMEM;

	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
				    GFP_KERNEL);
543 544 545
	if (unlikely(!eth->scratch_head))
		return -ENOMEM;

546 547 548 549 550 551 552
	dma_addr = dma_map_single(eth->dev,
				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
				  DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
		return -ENOMEM;

	memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
553
	phy_ring_tail = eth->phy_scratch_ring +
554 555 556 557 558 559
			(sizeof(struct mtk_tx_dma) * (cnt - 1));

	for (i = 0; i < cnt; i++) {
		eth->scratch_ring[i].txd1 =
					(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
		if (i < cnt - 1)
560
			eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
561 562 563 564
				((i + 1) * sizeof(struct mtk_tx_dma)));
		eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
	}

565
	mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
	mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
	mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);

	return 0;
}

static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{
	void *ret = ring->dma;

	return ret + (desc - ring->phys);
}

static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
						    struct mtk_tx_dma *txd)
{
	int idx = txd - ring->dma;

	return &ring->buf[idx];
}

588
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
589 590
{
	if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
591
		dma_unmap_single(eth->dev,
592 593 594 595
				 dma_unmap_addr(tx_buf, dma_addr0),
				 dma_unmap_len(tx_buf, dma_len0),
				 DMA_TO_DEVICE);
	} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
596
		dma_unmap_page(eth->dev,
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
			       dma_unmap_addr(tx_buf, dma_addr0),
			       dma_unmap_len(tx_buf, dma_len0),
			       DMA_TO_DEVICE);
	}
	tx_buf->flags = 0;
	if (tx_buf->skb &&
	    (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
		dev_kfree_skb_any(tx_buf->skb);
	tx_buf->skb = NULL;
}

static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
		      int tx_num, struct mtk_tx_ring *ring, bool gso)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	struct mtk_tx_dma *itxd, *txd;
	struct mtk_tx_buf *tx_buf;
	dma_addr_t mapped_addr;
	unsigned int nr_frags;
	int i, n_desc = 1;
618
	u32 txd4 = 0, fport;
619 620 621 622 623 624

	itxd = ring->next_free;
	if (itxd == ring->last_free)
		return -ENOMEM;

	/* set the forward port */
625 626
	fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
	txd4 |= fport;
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

	tx_buf = mtk_desc_to_tx_buf(ring, itxd);
	memset(tx_buf, 0, sizeof(*tx_buf));

	if (gso)
		txd4 |= TX_DMA_TSO;

	/* TX Checksum offload */
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd4 |= TX_DMA_CHKSUM;

	/* VLAN header offload */
	if (skb_vlan_tag_present(skb))
		txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);

642
	mapped_addr = dma_map_single(eth->dev, skb->data,
643
				     skb_headlen(skb), DMA_TO_DEVICE);
644
	if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
		return -ENOMEM;

	WRITE_ONCE(itxd->txd1, mapped_addr);
	tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
	dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
	dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));

	/* TX SG offload */
	txd = itxd;
	nr_frags = skb_shinfo(skb)->nr_frags;
	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		unsigned int offset = 0;
		int frag_size = skb_frag_size(frag);

		while (frag_size) {
			bool last_frag = false;
			unsigned int frag_map_size;

			txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
			if (txd == ring->last_free)
				goto err_dma;

			n_desc++;
			frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
670
			mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
671 672
						       frag_map_size,
						       DMA_TO_DEVICE);
673
			if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
674 675 676 677 678 679 680 681 682
				goto err_dma;

			if (i == nr_frags - 1 &&
			    (frag_size - frag_map_size) == 0)
				last_frag = true;

			WRITE_ONCE(txd->txd1, mapped_addr);
			WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
					       TX_DMA_PLEN0(frag_map_size) |
683
					       last_frag * TX_DMA_LS0));
684
			WRITE_ONCE(txd->txd4, fport);
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722

			tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
			tx_buf = mtk_desc_to_tx_buf(ring, txd);
			memset(tx_buf, 0, sizeof(*tx_buf));

			tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
			dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
			dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
			frag_size -= frag_map_size;
			offset += frag_map_size;
		}
	}

	/* store skb to cleanup */
	tx_buf->skb = skb;

	WRITE_ONCE(itxd->txd4, txd4);
	WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
				(!nr_frags * TX_DMA_LS0)));

	netdev_sent_queue(dev, skb->len);
	skb_tx_timestamp(skb);

	ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
	atomic_sub(n_desc, &ring->free_count);

	/* make sure that all changes to the dma ring are flushed before we
	 * continue
	 */
	wmb();

	if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
		mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);

	return 0;

err_dma:
	do {
723
		tx_buf = mtk_desc_to_tx_buf(ring, itxd);
724 725

		/* unmap dma */
726
		mtk_tx_unmap(eth, tx_buf);
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749

		itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
		itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
	} while (itxd != txd);

	return -ENOMEM;
}

static inline int mtk_cal_txd_req(struct sk_buff *skb)
{
	int i, nfrags;
	struct skb_frag_struct *frag;

	nfrags = 1;
	if (skb_is_gso(skb)) {
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			frag = &skb_shinfo(skb)->frags[i];
			nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
		}
	} else {
		nfrags += skb_shinfo(skb)->nr_frags;
	}

750
	return nfrags;
751 752
}

753 754 755 756 757 758 759 760 761 762 763 764 765 766
static int mtk_queue_stopped(struct mtk_eth *eth)
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
			continue;
		if (netif_queue_stopped(eth->netdev[i]))
			return 1;
	}

	return 0;
}

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
static void mtk_wake_queue(struct mtk_eth *eth)
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
			continue;
		netif_wake_queue(eth->netdev[i]);
	}
}

static void mtk_stop_queue(struct mtk_eth *eth)
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
			continue;
		netif_stop_queue(eth->netdev[i]);
	}
}

789 790 791 792 793 794 795 796 797
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	struct mtk_tx_ring *ring = &eth->tx_ring;
	struct net_device_stats *stats = &dev->stats;
	bool gso = false;
	int tx_num;

J
John Crispin 已提交
798 799 800 801
	/* normally we can rely on the stack not calling this more than once,
	 * however we have 2 queues running on the same ring so we need to lock
	 * the ring access
	 */
802
	spin_lock(&eth->page_lock);
J
John Crispin 已提交
803

804 805 806
	if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
		goto drop;

807 808
	tx_num = mtk_cal_txd_req(skb);
	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
809
		mtk_stop_queue(eth);
810 811
		netif_err(eth, tx_queued, dev,
			  "Tx Ring full when queue awake!\n");
812
		spin_unlock(&eth->page_lock);
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
		return NETDEV_TX_BUSY;
	}

	/* TSO: fill MSS info in tcp checksum field */
	if (skb_is_gso(skb)) {
		if (skb_cow_head(skb, 0)) {
			netif_warn(eth, tx_err, dev,
				   "GSO expand head fail.\n");
			goto drop;
		}

		if (skb_shinfo(skb)->gso_type &
				(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
			gso = true;
			tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
		}
	}

	if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
		goto drop;

834
	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
835
		mtk_stop_queue(eth);
836

837
	spin_unlock(&eth->page_lock);
838 839 840 841

	return NETDEV_TX_OK;

drop:
842
	spin_unlock(&eth->page_lock);
843 844 845 846 847
	stats->tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
{
	int i;
	struct mtk_rx_ring *ring;
	int idx;

	if (!eth->hwlro)
		return &eth->rx_ring[0];

	for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
		ring = &eth->rx_ring[i];
		idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
		if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
			ring->calc_idx_update = true;
			return ring;
		}
	}

	return NULL;
}

static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
{
	struct mtk_rx_ring *ring;
	int i;

	if (!eth->hwlro) {
		ring = &eth->rx_ring[0];
		mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
	} else {
		for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
			ring = &eth->rx_ring[i];
			if (ring->calc_idx_update) {
				ring->calc_idx_update = false;
				mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
			}
		}
	}
}

888
static int mtk_poll_rx(struct napi_struct *napi, int budget,
889
		       struct mtk_eth *eth)
890
{
891 892
	struct mtk_rx_ring *ring;
	int idx;
893 894 895 896 897 898 899 900 901 902 903
	struct sk_buff *skb;
	u8 *data, *new_data;
	struct mtk_rx_dma *rxd, trxd;
	int done = 0;

	while (done < budget) {
		struct net_device *netdev;
		unsigned int pktlen;
		dma_addr_t dma_addr;
		int mac = 0;

904 905 906 907 908
		ring = mtk_get_rx_ring(eth);
		if (unlikely(!ring))
			goto rx_done;

		idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
909 910 911 912 913 914 915 916 917 918 919 920 921 922
		rxd = &ring->dma[idx];
		data = ring->data[idx];

		mtk_rx_get_desc(&trxd, rxd);
		if (!(trxd.rxd2 & RX_DMA_DONE))
			break;

		/* find out which mac the packet come from. values start at 1 */
		mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
		      RX_DMA_FPORT_MASK;
		mac--;

		netdev = eth->netdev[mac];

923 924 925
		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
			goto release_desc;

926 927 928 929 930 931
		/* alloc new buffer */
		new_data = napi_alloc_frag(ring->frag_size);
		if (unlikely(!new_data)) {
			netdev->stats.rx_dropped++;
			goto release_desc;
		}
932
		dma_addr = dma_map_single(eth->dev,
933 934 935
					  new_data + NET_SKB_PAD,
					  ring->buf_size,
					  DMA_FROM_DEVICE);
936
		if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
937
			skb_free_frag(new_data);
938
			netdev->stats.rx_dropped++;
939 940 941 942 943 944
			goto release_desc;
		}

		/* receive data */
		skb = build_skb(data, ring->frag_size);
		if (unlikely(!skb)) {
945
			skb_free_frag(new_data);
946
			netdev->stats.rx_dropped++;
947 948 949 950
			goto release_desc;
		}
		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);

951
		dma_unmap_single(eth->dev, trxd.rxd1,
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
				 ring->buf_size, DMA_FROM_DEVICE);
		pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
		skb->dev = netdev;
		skb_put(skb, pktlen);
		if (trxd.rxd4 & RX_DMA_L4_VALID)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb_checksum_none_assert(skb);
		skb->protocol = eth_type_trans(skb, netdev);

		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
		    RX_DMA_VID(trxd.rxd3))
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
					       RX_DMA_VID(trxd.rxd3));
		napi_gro_receive(napi, skb);

		ring->data[idx] = new_data;
		rxd->rxd1 = (unsigned int)dma_addr;

release_desc:
		rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);

		ring->calc_idx = idx;
975

976 977 978
		done++;
	}

979
rx_done:
980 981 982 983 984
	if (done) {
		/* make sure that all changes to the dma ring are flushed before
		 * we continue
		 */
		wmb();
985
		mtk_update_rx_cpu_idx(eth);
986
	}
987 988 989 990

	return done;
}

991
static int mtk_poll_tx(struct mtk_eth *eth, int budget)
992 993 994 995 996
{
	struct mtk_tx_ring *ring = &eth->tx_ring;
	struct mtk_tx_dma *desc;
	struct sk_buff *skb;
	struct mtk_tx_buf *tx_buf;
997
	unsigned int done[MTK_MAX_DEVS];
998 999 1000
	unsigned int bytes[MTK_MAX_DEVS];
	u32 cpu, dma;
	static int condition;
1001
	int total = 0, i;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034

	memset(done, 0, sizeof(done));
	memset(bytes, 0, sizeof(bytes));

	cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
	dma = mtk_r32(eth, MTK_QTX_DRX_PTR);

	desc = mtk_qdma_phys_to_virt(ring, cpu);

	while ((cpu != dma) && budget) {
		u32 next_cpu = desc->txd2;
		int mac;

		desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
		if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
			break;

		mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
		       TX_DMA_FPORT_MASK;
		mac--;

		tx_buf = mtk_desc_to_tx_buf(ring, desc);
		skb = tx_buf->skb;
		if (!skb) {
			condition = 1;
			break;
		}

		if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
			bytes[mac] += skb->len;
			done[mac]++;
			budget--;
		}
1035
		mtk_tx_unmap(eth, tx_buf);
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051

		ring->last_free = desc;
		atomic_inc(&ring->free_count);

		cpu = next_cpu;
	}

	mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i] || !done[i])
			continue;
		netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
		total += done[i];
	}

1052 1053
	if (mtk_queue_stopped(eth) &&
	    (atomic_read(&ring->free_count) > ring->thresh))
1054
		mtk_wake_queue(eth);
1055 1056 1057 1058

	return total;
}

1059
static void mtk_handle_status_irq(struct mtk_eth *eth)
1060
{
1061
	u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1062

1063
	if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1064
		mtk_stats_update(eth);
1065 1066
		mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
			MTK_INT_STATUS2);
1067
	}
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
}

static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
	struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
	u32 status, mask;
	int tx_done = 0;

	mtk_handle_status_irq(eth);
	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
	tx_done = mtk_poll_tx(eth, budget);

	if (unlikely(netif_msg_intr(eth))) {
		status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
		mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
		dev_info(eth->dev,
			 "done tx %d, intr 0x%08x/0x%x\n",
			 tx_done, status, mask);
	}

	if (tx_done == budget)
		return budget;

	status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
	if (status & MTK_TX_DONE_INT)
		return budget;

	napi_complete(napi);
1096
	mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1097 1098 1099 1100 1101 1102 1103 1104 1105

	return tx_done;
}

static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
	struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
	u32 status, mask;
	int rx_done = 0;
1106
	int remain_budget = budget;
1107 1108

	mtk_handle_status_irq(eth);
1109 1110

poll_again:
1111
	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1112
	rx_done = mtk_poll_rx(napi, remain_budget, eth);
1113 1114

	if (unlikely(netif_msg_intr(eth))) {
1115 1116
		status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
		mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1117 1118 1119
		dev_info(eth->dev,
			 "done rx %d, intr 0x%08x/0x%x\n",
			 rx_done, status, mask);
1120
	}
1121
	if (rx_done == remain_budget)
1122 1123
		return budget;

1124
	status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1125 1126 1127 1128
	if (status & MTK_RX_DONE_INT) {
		remain_budget -= rx_done;
		goto poll_again;
	}
1129
	napi_complete(napi);
1130
	mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1131

1132
	return rx_done + budget - remain_budget;
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
}

static int mtk_tx_alloc(struct mtk_eth *eth)
{
	struct mtk_tx_ring *ring = &eth->tx_ring;
	int i, sz = sizeof(*ring->dma);

	ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
			       GFP_KERNEL);
	if (!ring->buf)
		goto no_tx_mem;

	ring->dma = dma_alloc_coherent(eth->dev,
					  MTK_DMA_SIZE * sz,
					  &ring->phys,
					  GFP_ATOMIC | __GFP_ZERO);
	if (!ring->dma)
		goto no_tx_mem;

	memset(ring->dma, 0, MTK_DMA_SIZE * sz);
	for (i = 0; i < MTK_DMA_SIZE; i++) {
		int next = (i + 1) % MTK_DMA_SIZE;
		u32 next_ptr = ring->phys + next * sz;

		ring->dma[i].txd2 = next_ptr;
		ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
	}

	atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
	ring->next_free = &ring->dma[0];
1163
	ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1164
	ring->thresh = MAX_SKB_FRAGS;
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178

	/* make sure that all changes to the dma ring are flushed before we
	 * continue
	 */
	wmb();

	mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
	mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
	mtk_w32(eth,
		ring->phys + ((MTK_DMA_SIZE - 1) * sz),
		MTK_QTX_CRX_PTR);
	mtk_w32(eth,
		ring->phys + ((MTK_DMA_SIZE - 1) * sz),
		MTK_QTX_DRX_PTR);
1179
	mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193

	return 0;

no_tx_mem:
	return -ENOMEM;
}

static void mtk_tx_clean(struct mtk_eth *eth)
{
	struct mtk_tx_ring *ring = &eth->tx_ring;
	int i;

	if (ring->buf) {
		for (i = 0; i < MTK_DMA_SIZE; i++)
1194
			mtk_tx_unmap(eth, &ring->buf[i]);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		kfree(ring->buf);
		ring->buf = NULL;
	}

	if (ring->dma) {
		dma_free_coherent(eth->dev,
				  MTK_DMA_SIZE * sizeof(*ring->dma),
				  ring->dma,
				  ring->phys);
		ring->dma = NULL;
	}
}

1208
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1209
{
1210 1211
	struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
	int rx_data_len, rx_dma_size;
1212 1213
	int i;

1214 1215 1216 1217 1218 1219 1220 1221 1222
	if (rx_flag == MTK_RX_FLAGS_HWLRO) {
		rx_data_len = MTK_MAX_LRO_RX_LENGTH;
		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
	} else {
		rx_data_len = ETH_DATA_LEN;
		rx_dma_size = MTK_DMA_SIZE;
	}

	ring->frag_size = mtk_max_frag_size(rx_data_len);
1223
	ring->buf_size = mtk_max_buf_size(ring->frag_size);
1224
	ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1225 1226 1227 1228
			     GFP_KERNEL);
	if (!ring->data)
		return -ENOMEM;

1229
	for (i = 0; i < rx_dma_size; i++) {
1230 1231 1232 1233 1234 1235
		ring->data[i] = netdev_alloc_frag(ring->frag_size);
		if (!ring->data[i])
			return -ENOMEM;
	}

	ring->dma = dma_alloc_coherent(eth->dev,
1236
				       rx_dma_size * sizeof(*ring->dma),
1237 1238 1239 1240 1241
				       &ring->phys,
				       GFP_ATOMIC | __GFP_ZERO);
	if (!ring->dma)
		return -ENOMEM;

1242
	for (i = 0; i < rx_dma_size; i++) {
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
		dma_addr_t dma_addr = dma_map_single(eth->dev,
				ring->data[i] + NET_SKB_PAD,
				ring->buf_size,
				DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
			return -ENOMEM;
		ring->dma[i].rxd1 = (unsigned int)dma_addr;

		ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
	}
1253 1254 1255 1256
	ring->dma_size = rx_dma_size;
	ring->calc_idx_update = false;
	ring->calc_idx = rx_dma_size - 1;
	ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1257 1258 1259 1260 1261
	/* make sure that all changes to the dma ring are flushed before we
	 * continue
	 */
	wmb();

1262 1263 1264 1265
	mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
	mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
	mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
	mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
1266 1267 1268 1269

	return 0;
}

1270
static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
1271
{
1272
	struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
1273 1274 1275
	int i;

	if (ring->data && ring->dma) {
1276
		for (i = 0; i < ring->dma_size; i++) {
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
			if (!ring->data[i])
				continue;
			if (!ring->dma[i].rxd1)
				continue;
			dma_unmap_single(eth->dev,
					 ring->dma[i].rxd1,
					 ring->buf_size,
					 DMA_FROM_DEVICE);
			skb_free_frag(ring->data[i]);
		}
		kfree(ring->data);
		ring->data = NULL;
	}

	if (ring->dma) {
		dma_free_coherent(eth->dev,
1293
				  ring->dma_size * sizeof(*ring->dma),
1294 1295 1296 1297 1298 1299
				  ring->dma,
				  ring->phys);
		ring->dma = NULL;
	}
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
{
	int i;
	u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
	u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;

	/* set LRO rings to auto-learn modes */
	ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;

	/* validate LRO ring */
	ring_ctrl_dw2 |= MTK_RING_VLD;

	/* set AGE timer (unit: 20us) */
	ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
	ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;

	/* set max AGG timer (unit: 20us) */
	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;

	/* set max LRO AGG count */
	ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
	ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;

	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
		mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
		mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
		mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
	}

	/* IPv4 checksum update enable */
	lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;

	/* switch priority comparison to packet count mode */
	lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;

	/* bandwidth threshold setting */
	mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);

	/* auto-learn score delta setting */
	mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);

	/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
	mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
		MTK_PDMA_LRO_ALT_REFRESH_TIMER);

	/* set HW LRO mode & the max aggregation count for rx packets */
	lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);

	/* the minimal remaining room of SDL0 in RXD for lro aggregation */
	lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;

	/* enable HW LRO */
	lro_ctrl_dw0 |= MTK_LRO_EN;

	mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
	mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);

	return 0;
}

static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
{
	int i;
	u32 val;

	/* relinquish lro rings, flush aggregated packets */
	mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);

	/* wait for relinquishments done */
	for (i = 0; i < 10; i++) {
		val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
		if (val & MTK_LRO_RING_RELINQUISH_DONE) {
			msleep(20);
			continue;
		}
	}

	/* invalidate lro rings */
	for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
		mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));

	/* disable HW LRO */
	mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
}

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
{
	u32 reg_val;

	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));

	/* invalidate the IP setting */
	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));

	mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));

	/* validate the IP setting */
	mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
}

static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
{
	u32 reg_val;

	reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));

	/* invalidate the IP setting */
	mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));

	mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
}

static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
{
	int cnt = 0;
	int i;

	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
		if (mac->hwlro_ip[i])
			cnt++;
	}

	return cnt;
}

static int mtk_hwlro_add_ipaddr(struct net_device *dev,
				struct ethtool_rxnfc *cmd)
{
	struct ethtool_rx_flow_spec *fsp =
		(struct ethtool_rx_flow_spec *)&cmd->fs;
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	int hwlro_idx;

	if ((fsp->flow_type != TCP_V4_FLOW) ||
	    (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
	    (fsp->location > 1))
		return -EINVAL;

	mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;

	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);

	mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);

	return 0;
}

static int mtk_hwlro_del_ipaddr(struct net_device *dev,
				struct ethtool_rxnfc *cmd)
{
	struct ethtool_rx_flow_spec *fsp =
		(struct ethtool_rx_flow_spec *)&cmd->fs;
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	int hwlro_idx;

	if (fsp->location > 1)
		return -EINVAL;

	mac->hwlro_ip[fsp->location] = 0;
	hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;

	mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);

	mtk_hwlro_inval_ipaddr(eth, hwlro_idx);

	return 0;
}

static void mtk_hwlro_netdev_disable(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	int i, hwlro_idx;

	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
		mac->hwlro_ip[i] = 0;
		hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;

		mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
	}

	mac->hwlro_ip_cnt = 0;
}

static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
				    struct ethtool_rxnfc *cmd)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct ethtool_rx_flow_spec *fsp =
		(struct ethtool_rx_flow_spec *)&cmd->fs;

	/* only tcp dst ipv4 is meaningful, others are meaningless */
	fsp->flow_type = TCP_V4_FLOW;
	fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
	fsp->m_u.tcp_ip4_spec.ip4dst = 0;

	fsp->h_u.tcp_ip4_spec.ip4src = 0;
	fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
	fsp->h_u.tcp_ip4_spec.psrc = 0;
	fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
	fsp->h_u.tcp_ip4_spec.pdst = 0;
	fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
	fsp->h_u.tcp_ip4_spec.tos = 0;
	fsp->m_u.tcp_ip4_spec.tos = 0xff;

	return 0;
}

static int mtk_hwlro_get_fdir_all(struct net_device *dev,
				  struct ethtool_rxnfc *cmd,
				  u32 *rule_locs)
{
	struct mtk_mac *mac = netdev_priv(dev);
	int cnt = 0;
	int i;

	for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
		if (mac->hwlro_ip[i]) {
			rule_locs[cnt] = i;
			cnt++;
		}
	}

	cmd->rule_cnt = cnt;

	return 0;
}

static netdev_features_t mtk_fix_features(struct net_device *dev,
					  netdev_features_t features)
{
	if (!(features & NETIF_F_LRO)) {
		struct mtk_mac *mac = netdev_priv(dev);
		int ip_cnt = mtk_hwlro_get_ip_cnt(mac);

		if (ip_cnt) {
			netdev_info(dev, "RX flow is programmed, LRO should keep on\n");

			features |= NETIF_F_LRO;
		}
	}

	return features;
}

static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
	int err = 0;

	if (!((dev->features ^ features) & NETIF_F_LRO))
		return 0;

	if (!(features & NETIF_F_LRO))
		mtk_hwlro_netdev_disable(dev);

	return err;
}

1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
	unsigned long t_start = jiffies;

	while (1) {
		if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
		      (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
			return 0;
		if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
			break;
	}

	dev_err(eth->dev, "DMA init timeout\n");
	return -1;
}

static int mtk_dma_init(struct mtk_eth *eth)
{
	int err;
1581
	u32 i;
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596

	if (mtk_dma_busy_wait(eth))
		return -EBUSY;

	/* QDMA needs scratch memory for internal reordering of the
	 * descriptors
	 */
	err = mtk_init_fq_dma(eth);
	if (err)
		return err;

	err = mtk_tx_alloc(eth);
	if (err)
		return err;

1597
	err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
1598 1599 1600
	if (err)
		return err;

1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
	if (eth->hwlro) {
		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
			err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
			if (err)
				return err;
		}
		err = mtk_hwlro_rx_init(eth);
		if (err)
			return err;
	}

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	/* Enable random early drop and set drop threshold automatically */
	mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
		MTK_QDMA_FC_THRES);
	mtk_w32(eth, 0x0, MTK_QDMA_HRED2);

	return 0;
}

static void mtk_dma_free(struct mtk_eth *eth)
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++)
		if (eth->netdev[i])
			netdev_reset_queue(eth->netdev[i]);
1627 1628 1629 1630 1631 1632 1633 1634
	if (eth->scratch_ring) {
		dma_free_coherent(eth->dev,
				  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
				  eth->scratch_ring,
				  eth->phy_scratch_ring);
		eth->scratch_ring = NULL;
		eth->phy_scratch_ring = 0;
	}
1635
	mtk_tx_clean(eth);
1636 1637 1638 1639 1640 1641 1642 1643
	mtk_rx_clean(eth, 0);

	if (eth->hwlro) {
		mtk_hwlro_rx_uninit(eth);
		for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
			mtk_rx_clean(eth, i);
	}

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
	kfree(eth->scratch_head);
}

static void mtk_tx_timeout(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;

	eth->netdev[mac->id]->stats.tx_errors++;
	netif_err(eth, tx_err, dev,
		  "transmit timed out\n");
1655
	schedule_work(&eth->pending_work);
1656 1657
}

1658
static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1659 1660 1661
{
	struct mtk_eth *eth = _eth;

1662 1663
	if (likely(napi_schedule_prep(&eth->rx_napi))) {
		__napi_schedule(&eth->rx_napi);
1664
		mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1665
	}
1666

1667 1668 1669 1670 1671 1672 1673 1674 1675
	return IRQ_HANDLED;
}

static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
{
	struct mtk_eth *eth = _eth;

	if (likely(napi_schedule_prep(&eth->tx_napi))) {
		__napi_schedule(&eth->tx_napi);
1676
		mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	}

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void mtk_poll_controller(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;

1688 1689
	mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
	mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1690
	mtk_handle_irq_rx(eth->irq[2], dev);
1691 1692
	mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
	mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
}
#endif

static int mtk_start_dma(struct mtk_eth *eth)
{
	int err;

	err = mtk_dma_init(eth);
	if (err) {
		mtk_dma_free(eth);
		return err;
	}

	mtk_w32(eth,
1707 1708
		MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
		MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
1709 1710
		MTK_QDMA_GLO_CFG);

1711 1712 1713 1714 1715
	mtk_w32(eth,
		MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
		MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
		MTK_PDMA_GLO_CFG);

1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
	return 0;
}

static int mtk_open(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;

	/* we run 2 netdevs on the same dma ring so we only bring it up once */
	if (!atomic_read(&eth->dma_refcnt)) {
		int err = mtk_start_dma(eth);

		if (err)
			return err;

1731
		napi_enable(&eth->tx_napi);
1732
		napi_enable(&eth->rx_napi);
1733 1734
		mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
		mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1735 1736 1737
	}
	atomic_inc(&eth->dma_refcnt);

1738
	phy_start(dev->phydev);
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
	netif_start_queue(dev);

	return 0;
}

static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
{
	u32 val;
	int i;

	/* stop the dma engine */
1750
	spin_lock_bh(&eth->page_lock);
1751 1752 1753
	val = mtk_r32(eth, glo_cfg);
	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
		glo_cfg);
1754
	spin_unlock_bh(&eth->page_lock);
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772

	/* wait for dma stop */
	for (i = 0; i < 10; i++) {
		val = mtk_r32(eth, glo_cfg);
		if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
			msleep(20);
			continue;
		}
		break;
	}
}

static int mtk_stop(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;

	netif_tx_disable(dev);
1773
	phy_stop(dev->phydev);
1774 1775 1776 1777 1778

	/* only shutdown DMA if this is the last user */
	if (!atomic_dec_and_test(&eth->dma_refcnt))
		return 0;

1779 1780
	mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
	mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
1781
	napi_disable(&eth->tx_napi);
1782 1783 1784 1785 1786 1787 1788 1789 1790
	napi_disable(&eth->rx_napi);

	mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);

	mtk_dma_free(eth);

	return 0;
}

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
			   reset_bits,
			   reset_bits);

	usleep_range(1000, 1100);
	regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
			   reset_bits,
			   ~reset_bits);
	mdelay(10);
}

1804
static int mtk_hw_init(struct mtk_eth *eth)
1805
{
1806 1807 1808 1809
	int i, val;

	if (test_and_set_bit(MTK_HW_INIT, &eth->state))
		return 0;
1810

1811 1812 1813
	pm_runtime_enable(eth->dev);
	pm_runtime_get_sync(eth->dev);

1814 1815 1816 1817
	clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
	clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
	clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
	clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
1818 1819
	ethsys_reset(eth, RSTCTRL_FE);
	ethsys_reset(eth, RSTCTRL_PPE);
1820

1821 1822 1823 1824 1825 1826 1827 1828 1829
	regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->mac[i])
			continue;
		val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
		val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
	}
	regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);

1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	/* Set GE2 driving and slew rate */
	regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);

	/* set GE2 TDSEL */
	regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);

	/* set GE2 TUNE */
	regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);

	/* GE1, Force 1000M/FD, FC ON */
	mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));

	/* GE2, Force 1000M/FD, FC ON */
	mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));

	/* Enable RX VLan Offloading */
	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);

	/* disable delay and normal interrupt */
	mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1850 1851 1852
	mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
	mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
	mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
1853 1854 1855 1856
	mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
	mtk_w32(eth, 0, MTK_RST_GL);

	/* FE int grouping */
1857 1858 1859 1860 1861
	mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
	mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
	mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
	mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
	mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1862 1863 1864 1865

	for (i = 0; i < 2; i++) {
		u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));

1866
		/* setup the forward port to send frame to PDMA */
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
		val &= ~0xffff;

		/* Enable RX checksum */
		val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;

		/* setup the mac dma */
		mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
	}

	return 0;
}

1879 1880
static int mtk_hw_deinit(struct mtk_eth *eth)
{
1881 1882 1883
	if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
		return 0;

1884 1885 1886 1887 1888
	clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
	clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
	clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
	clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);

1889 1890 1891
	pm_runtime_put_sync(eth->dev);
	pm_runtime_disable(eth->dev);

1892 1893 1894
	return 0;
}

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
static int __init mtk_init(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;
	const char *mac_addr;

	mac_addr = of_get_mac_address(mac->of_node);
	if (mac_addr)
		ether_addr_copy(dev->dev_addr, mac_addr);

	/* If the mac address is invalid, use random mac address  */
	if (!is_valid_ether_addr(dev->dev_addr)) {
		random_ether_addr(dev->dev_addr);
		dev_err(eth->dev, "generated random MAC address %pM\n",
			dev->dev_addr);
		dev->addr_assign_type = NET_ADDR_RANDOM;
	}

1913
	return mtk_phy_connect(dev);
1914 1915 1916 1917 1918 1919 1920
}

static void mtk_uninit(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_eth *eth = mac->hw;

1921
	phy_disconnect(dev->phydev);
1922 1923
	mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
	mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
1924 1925 1926 1927 1928 1929 1930 1931
}

static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
1932
		return phy_mii_ioctl(dev->phydev, ifr, cmd);
1933 1934 1935 1936 1937 1938 1939 1940 1941
	default:
		break;
	}

	return -EOPNOTSUPP;
}

static void mtk_pending_work(struct work_struct *work)
{
1942
	struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
1943 1944
	int err, i;
	unsigned long restart = 0;
1945 1946 1947

	rtnl_lock();

1948 1949 1950 1951 1952 1953
	dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);

	while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
		cpu_relax();

	dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
1954 1955
	/* stop all devices to make sure that dma is properly shut down */
	for (i = 0; i < MTK_MAC_COUNT; i++) {
1956
		if (!eth->netdev[i])
1957 1958 1959 1960
			continue;
		mtk_stop(eth->netdev[i]);
		__set_bit(i, &restart);
	}
1961
	dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
1962

1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	/* restart underlying hardware such as power, clock, pin mux
	 * and the connected phy
	 */
	mtk_hw_deinit(eth);

	if (eth->dev->pins)
		pinctrl_select_state(eth->dev->pins->p,
				     eth->dev->pins->default_state);
	mtk_hw_init(eth);

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->mac[i] ||
		    of_phy_is_fixed_link(eth->mac[i]->of_node))
			continue;
1977
		err = phy_init_hw(eth->netdev[i]->phydev);
1978 1979 1980 1981 1982
		if (err)
			dev_err(eth->dev, "%s: PHY init failed.\n",
				eth->netdev[i]->name);
	}

1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
	/* restart DMA and enable IRQs */
	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!test_bit(i, &restart))
			continue;
		err = mtk_open(eth->netdev[i]);
		if (err) {
			netif_alert(eth, ifup, eth->netdev[i],
			      "Driver up/down cycle failed, closing device.\n");
			dev_close(eth->netdev[i]);
		}
1993
	}
1994 1995 1996 1997 1998

	dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);

	clear_bit_unlock(MTK_RESETTING, &eth->state);

1999 2000 2001
	rtnl_unlock();
}

2002
static int mtk_free_dev(struct mtk_eth *eth)
2003 2004 2005 2006 2007 2008
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
			continue;
2009 2010 2011 2012 2013
		free_netdev(eth->netdev[i]);
	}

	return 0;
}
2014

2015 2016 2017 2018 2019 2020 2021
static int mtk_unreg_dev(struct mtk_eth *eth)
{
	int i;

	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
			continue;
2022 2023
		unregister_netdev(eth->netdev[i]);
	}
2024 2025 2026 2027 2028 2029 2030 2031

	return 0;
}

static int mtk_cleanup(struct mtk_eth *eth)
{
	mtk_unreg_dev(eth);
	mtk_free_dev(eth);
2032
	cancel_work_sync(&eth->pending_work);
2033 2034 2035 2036

	return 0;
}

2037 2038
int mtk_get_link_ksettings(struct net_device *ndev,
			   struct ethtool_link_ksettings *cmd)
2039
{
2040
	struct mtk_mac *mac = netdev_priv(ndev);
2041

2042 2043 2044
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return -EBUSY;

2045
	return phy_ethtool_ksettings_get(ndev->phydev, cmd);
2046 2047
}

2048 2049
int mtk_set_link_ksettings(struct net_device *ndev,
			   const struct ethtool_link_ksettings *cmd)
2050
{
2051
	struct mtk_mac *mac = netdev_priv(ndev);
2052

2053 2054
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return -EBUSY;
2055

2056
	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
}

static void mtk_get_drvinfo(struct net_device *dev,
			    struct ethtool_drvinfo *info)
{
	struct mtk_mac *mac = netdev_priv(dev);

	strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
	strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
	info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}

static u32 mtk_get_msglevel(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);

	return mac->hw->msg_enable;
}

static void mtk_set_msglevel(struct net_device *dev, u32 value)
{
	struct mtk_mac *mac = netdev_priv(dev);

	mac->hw->msg_enable = value;
}

static int mtk_nway_reset(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);

2087 2088 2089
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return -EBUSY;

2090
	return genphy_restart_aneg(dev->phydev);
2091 2092 2093 2094 2095 2096 2097
}

static u32 mtk_get_link(struct net_device *dev)
{
	struct mtk_mac *mac = netdev_priv(dev);
	int err;

2098 2099 2100
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return -EBUSY;

2101
	err = genphy_update_link(dev->phydev);
2102 2103 2104
	if (err)
		return ethtool_op_get_link(dev);

2105
	return dev->phydev->link;
2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
}

static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
			memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		break;
	}
}

static int mtk_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(mtk_ethtool_stats);
	default:
		return -EOPNOTSUPP;
	}
}

static void mtk_get_ethtool_stats(struct net_device *dev,
				  struct ethtool_stats *stats, u64 *data)
{
	struct mtk_mac *mac = netdev_priv(dev);
	struct mtk_hw_stats *hwstats = mac->hw_stats;
	u64 *data_src, *data_dst;
	unsigned int start;
	int i;

2141 2142 2143
	if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
		return;

2144 2145 2146 2147 2148 2149 2150
	if (netif_running(dev) && netif_device_present(dev)) {
		if (spin_trylock(&hwstats->stats_lock)) {
			mtk_stats_update_mac(mac);
			spin_unlock(&hwstats->stats_lock);
		}
	}

2151 2152
	data_src = (u64 *)hwstats;

2153 2154 2155 2156 2157 2158 2159 2160 2161
	do {
		data_dst = data;
		start = u64_stats_fetch_begin_irq(&hwstats->syncp);

		for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
			*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
	} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}

2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
			 u32 *rule_locs)
{
	int ret = -EOPNOTSUPP;

	switch (cmd->cmd) {
	case ETHTOOL_GRXRINGS:
		if (dev->features & NETIF_F_LRO) {
			cmd->data = MTK_MAX_RX_RING_NUM;
			ret = 0;
		}
		break;
	case ETHTOOL_GRXCLSRLCNT:
		if (dev->features & NETIF_F_LRO) {
			struct mtk_mac *mac = netdev_priv(dev);

			cmd->rule_cnt = mac->hwlro_ip_cnt;
			ret = 0;
		}
		break;
	case ETHTOOL_GRXCLSRULE:
		if (dev->features & NETIF_F_LRO)
			ret = mtk_hwlro_get_fdir_entry(dev, cmd);
		break;
	case ETHTOOL_GRXCLSRLALL:
		if (dev->features & NETIF_F_LRO)
			ret = mtk_hwlro_get_fdir_all(dev, cmd,
						     rule_locs);
		break;
	default:
		break;
	}

	return ret;
}

static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
	int ret = -EOPNOTSUPP;

	switch (cmd->cmd) {
	case ETHTOOL_SRXCLSRLINS:
		if (dev->features & NETIF_F_LRO)
			ret = mtk_hwlro_add_ipaddr(dev, cmd);
		break;
	case ETHTOOL_SRXCLSRLDEL:
		if (dev->features & NETIF_F_LRO)
			ret = mtk_hwlro_del_ipaddr(dev, cmd);
		break;
	default:
		break;
	}

	return ret;
}

2218
static const struct ethtool_ops mtk_ethtool_ops = {
2219 2220
	.get_link_ksettings	= mtk_get_link_ksettings,
	.set_link_ksettings	= mtk_set_link_ksettings,
2221 2222 2223 2224 2225 2226 2227 2228
	.get_drvinfo		= mtk_get_drvinfo,
	.get_msglevel		= mtk_get_msglevel,
	.set_msglevel		= mtk_set_msglevel,
	.nway_reset		= mtk_nway_reset,
	.get_link		= mtk_get_link,
	.get_strings		= mtk_get_strings,
	.get_sset_count		= mtk_get_sset_count,
	.get_ethtool_stats	= mtk_get_ethtool_stats,
2229 2230
	.get_rxnfc		= mtk_get_rxnfc,
	.set_rxnfc              = mtk_set_rxnfc,
2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244
};

static const struct net_device_ops mtk_netdev_ops = {
	.ndo_init		= mtk_init,
	.ndo_uninit		= mtk_uninit,
	.ndo_open		= mtk_open,
	.ndo_stop		= mtk_stop,
	.ndo_start_xmit		= mtk_start_xmit,
	.ndo_set_mac_address	= mtk_set_mac_address,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_do_ioctl		= mtk_do_ioctl,
	.ndo_change_mtu		= eth_change_mtu,
	.ndo_tx_timeout		= mtk_tx_timeout,
	.ndo_get_stats64        = mtk_get_stats64,
2245 2246
	.ndo_fix_features	= mtk_fix_features,
	.ndo_set_features	= mtk_set_features,
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= mtk_poll_controller,
#endif
};

static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
	struct mtk_mac *mac;
	const __be32 *_id = of_get_property(np, "reg", NULL);
	int id, err;

	if (!_id) {
		dev_err(eth->dev, "missing mac id\n");
		return -EINVAL;
	}

	id = be32_to_cpup(_id);
	if (id >= MTK_MAC_COUNT) {
		dev_err(eth->dev, "%d is not a valid mac id\n", id);
		return -EINVAL;
	}

	if (eth->netdev[id]) {
		dev_err(eth->dev, "duplicate mac id found: %d\n", id);
		return -EINVAL;
	}

	eth->netdev[id] = alloc_etherdev(sizeof(*mac));
	if (!eth->netdev[id]) {
		dev_err(eth->dev, "alloc_etherdev failed\n");
		return -ENOMEM;
	}
	mac = netdev_priv(eth->netdev[id]);
	eth->mac[id] = mac;
	mac->id = id;
	mac->hw = eth;
	mac->of_node = np;

2285 2286 2287
	memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
	mac->hwlro_ip_cnt = 0;

2288 2289 2290 2291 2292 2293 2294 2295 2296
	mac->hw_stats = devm_kzalloc(eth->dev,
				     sizeof(*mac->hw_stats),
				     GFP_KERNEL);
	if (!mac->hw_stats) {
		dev_err(eth->dev, "failed to allocate counter memory\n");
		err = -ENOMEM;
		goto free_netdev;
	}
	spin_lock_init(&mac->hw_stats->stats_lock);
2297
	u64_stats_init(&mac->hw_stats->syncp);
2298 2299 2300
	mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;

	SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2301
	eth->netdev[id]->watchdog_timeo = 5 * HZ;
2302 2303
	eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
	eth->netdev[id]->base_addr = (unsigned long)eth->base;
2304 2305 2306 2307 2308

	eth->netdev[id]->hw_features = MTK_HW_FEATURES;
	if (eth->hwlro)
		eth->netdev[id]->hw_features |= NETIF_F_LRO;

2309 2310 2311 2312 2313
	eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
	eth->netdev[id]->features |= MTK_HW_FEATURES;
	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;

2314
	eth->netdev[id]->irq = eth->irq[0];
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
	return 0;

free_netdev:
	free_netdev(eth->netdev[id]);
	return err;
}

static int mtk_probe(struct platform_device *pdev)
{
	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	struct device_node *mac_np;
	const struct of_device_id *match;
	struct mtk_soc_data *soc;
	struct mtk_eth *eth;
	int err;
2330
	int i;
2331 2332 2333 2334 2335 2336 2337 2338

	match = of_match_device(of_mtk_match, &pdev->dev);
	soc = (struct mtk_soc_data *)match->data;

	eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
	if (!eth)
		return -ENOMEM;

2339
	eth->dev = &pdev->dev;
2340
	eth->base = devm_ioremap_resource(&pdev->dev, res);
2341 2342
	if (IS_ERR(eth->base))
		return PTR_ERR(eth->base);
2343 2344

	spin_lock_init(&eth->page_lock);
2345
	spin_lock_init(&eth->irq_lock);
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360

	eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
						      "mediatek,ethsys");
	if (IS_ERR(eth->ethsys)) {
		dev_err(&pdev->dev, "no ethsys regmap found\n");
		return PTR_ERR(eth->ethsys);
	}

	eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
						    "mediatek,pctl");
	if (IS_ERR(eth->pctl)) {
		dev_err(&pdev->dev, "no pctl regmap found\n");
		return PTR_ERR(eth->pctl);
	}

2361 2362
	eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");

2363 2364 2365 2366 2367 2368
	for (i = 0; i < 3; i++) {
		eth->irq[i] = platform_get_irq(pdev, i);
		if (eth->irq[i] < 0) {
			dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
			return -ENXIO;
		}
2369
	}
2370 2371 2372 2373 2374 2375 2376 2377 2378
	for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
		eth->clks[i] = devm_clk_get(eth->dev,
					    mtk_clks_source_name[i]);
		if (IS_ERR(eth->clks[i])) {
			if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
				return -EPROBE_DEFER;
			return -ENODEV;
		}
	}
2379 2380

	eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2381
	INIT_WORK(&eth->pending_work, mtk_pending_work);
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396

	err = mtk_hw_init(eth);
	if (err)
		return err;

	for_each_child_of_node(pdev->dev.of_node, mac_np) {
		if (!of_device_is_compatible(mac_np,
					     "mediatek,eth-mac"))
			continue;

		if (!of_device_is_available(mac_np))
			continue;

		err = mtk_add_mac(eth, mac_np);
		if (err)
2397
			goto err_deinit_hw;
2398 2399
	}

2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
			       dev_name(eth->dev), eth);
	if (err)
		goto err_free_dev;

	err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
			       dev_name(eth->dev), eth);
	if (err)
		goto err_free_dev;

	err = mtk_mdio_init(eth);
	if (err)
		goto err_free_dev;

	for (i = 0; i < MTK_MAX_DEVS; i++) {
		if (!eth->netdev[i])
			continue;

		err = register_netdev(eth->netdev[i]);
		if (err) {
			dev_err(eth->dev, "error bringing up device\n");
2421
			goto err_deinit_mdio;
2422 2423 2424 2425 2426 2427
		} else
			netif_info(eth, probe, eth->netdev[i],
				   "mediatek frame engine at 0x%08lx, irq %d\n",
				   eth->netdev[i]->base_addr, eth->irq[0]);
	}

2428 2429 2430 2431
	/* we run 2 devices on the same DMA ring so we need a dummy device
	 * for NAPI to work
	 */
	init_dummy_netdev(&eth->dummy_dev);
2432 2433 2434
	netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
		       MTK_NAPI_WEIGHT);
	netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
2435 2436 2437 2438 2439 2440
		       MTK_NAPI_WEIGHT);

	platform_set_drvdata(pdev, eth);

	return 0;

2441 2442
err_deinit_mdio:
	mtk_mdio_cleanup(eth);
2443
err_free_dev:
2444 2445 2446 2447
	mtk_free_dev(eth);
err_deinit_hw:
	mtk_hw_deinit(eth);

2448 2449 2450 2451 2452 2453
	return err;
}

static int mtk_remove(struct platform_device *pdev)
{
	struct mtk_eth *eth = platform_get_drvdata(pdev);
2454 2455 2456 2457 2458 2459 2460 2461
	int i;

	/* stop all devices to make sure that dma is properly shut down */
	for (i = 0; i < MTK_MAC_COUNT; i++) {
		if (!eth->netdev[i])
			continue;
		mtk_stop(eth->netdev[i]);
	}
2462

2463
	mtk_hw_deinit(eth);
2464

2465
	netif_napi_del(&eth->tx_napi);
2466 2467
	netif_napi_del(&eth->rx_napi);
	mtk_cleanup(eth);
2468
	mtk_mdio_cleanup(eth);
2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491

	return 0;
}

const struct of_device_id of_mtk_match[] = {
	{ .compatible = "mediatek,mt7623-eth" },
	{},
};

static struct platform_driver mtk_driver = {
	.probe = mtk_probe,
	.remove = mtk_remove,
	.driver = {
		.name = "mtk_soc_eth",
		.of_match_table = of_mtk_match,
	},
};

module_platform_driver(mtk_driver);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");