macb_main.c 104.3 KB
Newer Older
1
/*
J
Jamie Iles 已提交
2
 * Cadence MACB/GEM Ethernet Controller driver
3 4 5 6 7 8 9 10
 *
 * Copyright (C) 2004-2006 Atmel Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

11
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 13 14 15 16
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
17
#include <linux/circ_buf.h>
18 19
#include <linux/slab.h>
#include <linux/init.h>
S
Soren Brinkmann 已提交
20
#include <linux/io.h>
21
#include <linux/gpio.h>
22
#include <linux/gpio/consumer.h>
23
#include <linux/interrupt.h>
24 25 26
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
27
#include <linux/platform_data/macb.h>
28
#include <linux/platform_device.h>
F
frederic RODO 已提交
29
#include <linux/phy.h>
30
#include <linux/of.h>
31
#include <linux/of_device.h>
32
#include <linux/of_gpio.h>
33
#include <linux/of_mdio.h>
34
#include <linux/of_net.h>
R
Rafal Ozieblo 已提交
35 36 37
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
38 39
#include "macb.h"

40 41
#define MACB_RX_BUFFER_SIZE	128
#define RX_BUFFER_MULTIPLE	64  /* bytes */
42

43
#define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
44 45
#define MIN_RX_RING_SIZE	64
#define MAX_RX_RING_SIZE	8192
46
#define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
47
				 * (bp)->rx_ring_size)
48

49
#define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
50 51
#define MIN_TX_RING_SIZE	64
#define MAX_TX_RING_SIZE	4096
52
#define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
53
				 * (bp)->tx_ring_size)
54

55
/* level of occupied TX descriptors under which we wake up TX process */
56
#define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
57 58 59

#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
				 | MACB_BIT(ISR_ROVR))
N
Nicolas Ferre 已提交
60 61 62 63 64
#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
					| MACB_BIT(ISR_RLE)		\
					| MACB_BIT(TXERR))
#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))

R
Rafal Ozieblo 已提交
65 66 67 68
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN	8
#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69

70
#define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
71
#define MACB_NETIF_LSO		NETIF_F_TSO
72

73 74 75
#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
#define MACB_WOL_ENABLED		(0x1 << 1)

76
/* Graceful stop timeouts in us. We should allow up to
N
Nicolas Ferre 已提交
77 78 79
 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 */
#define MACB_HALT_TIMEOUT	1230
80

81
/* DMA buffer descriptor might be different size
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
 * depends on hardware configuration:
 *
 * 1. dma address width 32 bits:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *
 * 2. dma address width 64 bits:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: upper 32 bit address of Data Buffer
 *    word 4: unused
 *
 * 3. dma address width 32 bits with hardware timestamping:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: timestamp word 1
 *    word 4: timestamp word 2
 *
 * 4. dma address width 64 bits with hardware timestamping:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: upper 32 bit address of Data Buffer
 *    word 4: unused
 *    word 5: timestamp word 1
 *    word 6: timestamp word 2
107 108 109
 */
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#ifdef MACB_EXT_DESC
	unsigned int desc_size;

	switch (bp->hw_dma_cap) {
	case HW_DMA_CAP_64B:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_64);
		break;
	case HW_DMA_CAP_PTP:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_ptp);
		break;
	case HW_DMA_CAP_64B_PTP:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_64)
			+ sizeof(struct macb_dma_desc_ptp);
		break;
	default:
		desc_size = sizeof(struct macb_dma_desc);
	}
	return desc_size;
131 132 133 134
#endif
	return sizeof(struct macb_dma_desc);
}

135
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
136
{
137 138 139 140 141 142 143 144 145 146 147 148
#ifdef MACB_EXT_DESC
	switch (bp->hw_dma_cap) {
	case HW_DMA_CAP_64B:
	case HW_DMA_CAP_PTP:
		desc_idx <<= 1;
		break;
	case HW_DMA_CAP_64B_PTP:
		desc_idx *= 3;
		break;
	default:
		break;
	}
149
#endif
150
	return desc_idx;
151 152 153 154 155
}

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
156 157 158
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
		return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
	return NULL;
159 160 161
}
#endif

162
/* Ring buffer accessors */
163
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
164
{
165
	return index & (bp->tx_ring_size - 1);
166 167
}

168 169
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
					  unsigned int index)
170
{
171 172 173
	index = macb_tx_ring_wrap(queue->bp, index);
	index = macb_adj_dma_desc_idx(queue->bp, index);
	return &queue->tx_ring[index];
174 175
}

176 177
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
				       unsigned int index)
178
{
179
	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
180 181
}

182
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
183 184 185
{
	dma_addr_t offset;

186
	offset = macb_tx_ring_wrap(queue->bp, index) *
187
			macb_dma_desc_get_size(queue->bp);
188

189
	return queue->tx_ring_dma + offset;
190 191
}

192
static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
193
{
194
	return index & (bp->rx_ring_size - 1);
195 196
}

197
static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
198
{
199 200 201
	index = macb_rx_ring_wrap(queue->bp, index);
	index = macb_adj_dma_desc_idx(queue->bp, index);
	return &queue->rx_ring[index];
202 203
}

204
static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
205
{
206 207
	return queue->rx_buffers + queue->bp->rx_buffer_size *
	       macb_rx_ring_wrap(queue->bp, index);
208 209
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
/* I/O accessors */
static u32 hw_readl_native(struct macb *bp, int offset)
{
	return __raw_readl(bp->regs + offset);
}

static void hw_writel_native(struct macb *bp, int offset, u32 value)
{
	__raw_writel(value, bp->regs + offset);
}

static u32 hw_readl(struct macb *bp, int offset)
{
	return readl_relaxed(bp->regs + offset);
}

static void hw_writel(struct macb *bp, int offset, u32 value)
{
	writel_relaxed(value, bp->regs + offset);
}

231
/* Find the CPU endianness by using the loopback bit of NCR register. When the
M
Moritz Fischer 已提交
232
 * CPU is in big endian we need to program swapped mode for management
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
 * descriptor access.
 */
static bool hw_is_native_io(void __iomem *addr)
{
	u32 value = MACB_BIT(LLB);

	__raw_writel(value, addr + MACB_NCR);
	value = __raw_readl(addr + MACB_NCR);

	/* Write 0 back to disable everything */
	__raw_writel(0, addr + MACB_NCR);

	return value == MACB_BIT(LLB);
}

static bool hw_is_gem(void __iomem *addr, bool native_io)
{
	u32 id;

	if (native_io)
		id = __raw_readl(addr + MACB_MID);
	else
		id = readl_relaxed(addr + MACB_MID);

	return MACB_BFEXT(IDNUM, id) >= 0x2;
}

260
static void macb_set_hwaddr(struct macb *bp)
261 262 263 264 265
{
	u32 bottom;
	u16 top;

	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
J
Jamie Iles 已提交
266
	macb_or_gem_writel(bp, SA1B, bottom);
267
	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
J
Jamie Iles 已提交
268
	macb_or_gem_writel(bp, SA1T, top);
269 270 271 272 273 274 275 276

	/* Clear unused address register sets */
	macb_or_gem_writel(bp, SA2B, 0);
	macb_or_gem_writel(bp, SA2T, 0);
	macb_or_gem_writel(bp, SA3B, 0);
	macb_or_gem_writel(bp, SA3T, 0);
	macb_or_gem_writel(bp, SA4B, 0);
	macb_or_gem_writel(bp, SA4T, 0);
277 278
}

279
static void macb_get_hwaddr(struct macb *bp)
280
{
281
	struct macb_platform_data *pdata;
282 283 284
	u32 bottom;
	u16 top;
	u8 addr[6];
285 286
	int i;

J
Jingoo Han 已提交
287
	pdata = dev_get_platdata(&bp->pdev->dev);
288

289
	/* Check all 4 address register for valid address */
290 291 292 293
	for (i = 0; i < 4; i++) {
		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
		top = macb_or_gem_readl(bp, SA1T + i * 8);

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		if (pdata && pdata->rev_eth_addr) {
			addr[5] = bottom & 0xff;
			addr[4] = (bottom >> 8) & 0xff;
			addr[3] = (bottom >> 16) & 0xff;
			addr[2] = (bottom >> 24) & 0xff;
			addr[1] = top & 0xff;
			addr[0] = (top & 0xff00) >> 8;
		} else {
			addr[0] = bottom & 0xff;
			addr[1] = (bottom >> 8) & 0xff;
			addr[2] = (bottom >> 16) & 0xff;
			addr[3] = (bottom >> 24) & 0xff;
			addr[4] = top & 0xff;
			addr[5] = (top >> 8) & 0xff;
		}
309 310 311 312 313

		if (is_valid_ether_addr(addr)) {
			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
			return;
		}
314
	}
315

316
	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
317
	eth_hw_addr_random(bp->dev);
318 319
}

F
frederic RODO 已提交
320
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
321
{
F
frederic RODO 已提交
322
	struct macb *bp = bus->priv;
323 324 325 326
	int value;

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_READ)
F
frederic RODO 已提交
327 328
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
329 330
			      | MACB_BF(CODE, MACB_MAN_CODE)));

F
frederic RODO 已提交
331 332 333
	/* wait for end of transfer */
	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
		cpu_relax();
334 335 336 337 338 339

	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));

	return value;
}

F
frederic RODO 已提交
340 341
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
342
{
F
frederic RODO 已提交
343
	struct macb *bp = bus->priv;
344 345 346

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_WRITE)
F
frederic RODO 已提交
347 348
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
349
			      | MACB_BF(CODE, MACB_MAN_CODE)
F
frederic RODO 已提交
350
			      | MACB_BF(DATA, value)));
351

F
frederic RODO 已提交
352 353 354 355 356 357
	/* wait for end of transfer */
	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
		cpu_relax();

	return 0;
}
358

359 360 361 362 363 364 365 366 367 368
/**
 * macb_set_tx_clk() - Set a clock to a new frequency
 * @clk		Pointer to the clock to change
 * @rate	New frequency in Hz
 * @dev		Pointer to the struct net_device
 */
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
	long ferr, rate, rate_rounded;

369 370 371
	if (!clk)
		return;

372 373 374 375 376 377 378 379 380 381 382
	switch (speed) {
	case SPEED_10:
		rate = 2500000;
		break;
	case SPEED_100:
		rate = 25000000;
		break;
	case SPEED_1000:
		rate = 125000000;
		break;
	default:
S
Soren Brinkmann 已提交
383
		return;
384 385 386 387 388 389 390 391 392 393 394 395 396
	}

	rate_rounded = clk_round_rate(clk, rate);
	if (rate_rounded < 0)
		return;

	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
	 * is not satisfied.
	 */
	ferr = abs(rate_rounded - rate);
	ferr = DIV_ROUND_UP(ferr, rate / 100000);
	if (ferr > 5)
		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
397
			    rate);
398 399 400 401 402

	if (clk_set_rate(clk, rate_rounded))
		netdev_err(dev, "adjusting tx_clk failed.\n");
}

F
frederic RODO 已提交
403
static void macb_handle_link_change(struct net_device *dev)
404
{
F
frederic RODO 已提交
405
	struct macb *bp = netdev_priv(dev);
406
	struct phy_device *phydev = dev->phydev;
F
frederic RODO 已提交
407 408
	unsigned long flags;
	int status_change = 0;
409

F
frederic RODO 已提交
410 411 412 413 414 415 416 417 418
	spin_lock_irqsave(&bp->lock, flags);

	if (phydev->link) {
		if ((bp->speed != phydev->speed) ||
		    (bp->duplex != phydev->duplex)) {
			u32 reg;

			reg = macb_readl(bp, NCFGR);
			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
419 420
			if (macb_is_gem(bp))
				reg &= ~GEM_BIT(GBE);
F
frederic RODO 已提交
421 422 423

			if (phydev->duplex)
				reg |= MACB_BIT(FD);
A
Atsushi Nemoto 已提交
424
			if (phydev->speed == SPEED_100)
F
frederic RODO 已提交
425
				reg |= MACB_BIT(SPD);
426 427
			if (phydev->speed == SPEED_1000 &&
			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
428
				reg |= GEM_BIT(GBE);
F
frederic RODO 已提交
429

430
			macb_or_gem_writel(bp, NCFGR, reg);
F
frederic RODO 已提交
431 432 433 434 435

			bp->speed = phydev->speed;
			bp->duplex = phydev->duplex;
			status_change = 1;
		}
436 437
	}

F
frederic RODO 已提交
438
	if (phydev->link != bp->link) {
439
		if (!phydev->link) {
F
frederic RODO 已提交
440 441 442 443
			bp->speed = 0;
			bp->duplex = -1;
		}
		bp->link = phydev->link;
444

F
frederic RODO 已提交
445 446
		status_change = 1;
	}
447

F
frederic RODO 已提交
448 449 450
	spin_unlock_irqrestore(&bp->lock, flags);

	if (status_change) {
451
		if (phydev->link) {
452 453 454 455 456
			/* Update the TX clock rate if and only if the link is
			 * up and there has been a link change.
			 */
			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);

457
			netif_carrier_on(dev);
458 459 460 461
			netdev_info(dev, "link up (%d/%s)\n",
				    phydev->speed,
				    phydev->duplex == DUPLEX_FULL ?
				    "Full" : "Half");
462 463
		} else {
			netif_carrier_off(dev);
464
			netdev_info(dev, "link down\n");
465
		}
F
frederic RODO 已提交
466
	}
467 468
}

F
frederic RODO 已提交
469 470
/* based on au1000_eth. c*/
static int macb_mii_probe(struct net_device *dev)
471
{
F
frederic RODO 已提交
472
	struct macb *bp = netdev_priv(dev);
473
	struct macb_platform_data *pdata;
474
	struct phy_device *phydev;
475
	int phy_irq;
476
	int ret;
F
frederic RODO 已提交
477

478 479 480 481 482 483 484 485 486 487 488 489
	if (bp->phy_node) {
		phydev = of_phy_connect(dev, bp->phy_node,
					&macb_handle_link_change, 0,
					bp->phy_interface);
		if (!phydev)
			return -ENODEV;
	} else {
		phydev = phy_find_first(bp->mii_bus);
		if (!phydev) {
			netdev_err(dev, "no PHY found\n");
			return -ENXIO;
		}
F
frederic RODO 已提交
490

491 492 493 494 495 496 497 498 499 500 501
		pdata = dev_get_platdata(&bp->pdev->dev);
		if (pdata) {
			if (gpio_is_valid(pdata->phy_irq_pin)) {
				ret = devm_gpio_request(&bp->pdev->dev,
							pdata->phy_irq_pin, "phy int");
				if (!ret) {
					phy_irq = gpio_to_irq(pdata->phy_irq_pin);
					phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
				}
			} else {
				phydev->irq = PHY_POLL;
502
			}
503
		}
F
frederic RODO 已提交
504

505 506 507 508 509 510 511
		/* attach the mac to the phy */
		ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
					 bp->phy_interface);
		if (ret) {
			netdev_err(dev, "Could not attach to PHY\n");
			return ret;
		}
F
frederic RODO 已提交
512 513 514
	}

	/* mask with MAC supported features */
515
	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
516 517 518
		phydev->supported &= PHY_GBIT_FEATURES;
	else
		phydev->supported &= PHY_BASIC_FEATURES;
F
frederic RODO 已提交
519

520 521 522
	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
		phydev->supported &= ~SUPPORTED_1000baseT_Half;

F
frederic RODO 已提交
523 524 525 526 527 528 529
	phydev->advertising = phydev->supported;

	bp->link = 0;
	bp->speed = 0;
	bp->duplex = -1;

	return 0;
530 531
}

532
static int macb_mii_init(struct macb *bp)
533
{
534
	struct macb_platform_data *pdata;
535
	struct device_node *np;
F
frederic RODO 已提交
536
	int err = -ENXIO, i;
537

538
	/* Enable management port */
F
frederic RODO 已提交
539
	macb_writel(bp, NCR, MACB_BIT(MPE));
540

541
	bp->mii_bus = mdiobus_alloc();
542
	if (!bp->mii_bus) {
543 544 545 546 547 548 549
		err = -ENOMEM;
		goto err_out;
	}

	bp->mii_bus->name = "MACB_mii_bus";
	bp->mii_bus->read = &macb_mdio_read;
	bp->mii_bus->write = &macb_mdio_write;
550
	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
551
		 bp->pdev->name, bp->pdev->id);
552
	bp->mii_bus->priv = bp;
553
	bp->mii_bus->parent = &bp->pdev->dev;
J
Jingoo Han 已提交
554
	pdata = dev_get_platdata(&bp->pdev->dev);
555

556
	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
557

558 559
	np = bp->pdev->dev.of_node;
	if (np) {
560 561 562 563 564 565 566
		if (of_phy_is_fixed_link(np)) {
			if (of_phy_register_fixed_link(np) < 0) {
				dev_err(&bp->pdev->dev,
					"broken fixed-link specification\n");
				goto err_out_unregister_bus;
			}
			bp->phy_node = of_node_get(np);
567

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
			err = mdiobus_register(bp->mii_bus);
		} else {
			/* try dt phy registration */
			err = of_mdiobus_register(bp->mii_bus, np);

			/* fallback to standard phy registration if no phy were
			 * found during dt phy registration
			 */
			if (!err && !phy_find_first(bp->mii_bus)) {
				for (i = 0; i < PHY_MAX_ADDR; i++) {
					struct phy_device *phydev;

					phydev = mdiobus_scan(bp->mii_bus, i);
					if (IS_ERR(phydev) &&
					    PTR_ERR(phydev) != -ENODEV) {
						err = PTR_ERR(phydev);
						break;
					}
586 587
				}

588 589 590
				if (err)
					goto err_out_unregister_bus;
			}
591 592
		}
	} else {
593 594 595
		for (i = 0; i < PHY_MAX_ADDR; i++)
			bp->mii_bus->irq[i] = PHY_POLL;

596 597 598 599 600 601 602
		if (pdata)
			bp->mii_bus->phy_mask = pdata->phy_mask;

		err = mdiobus_register(bp->mii_bus);
	}

	if (err)
603
		goto err_out_free_mdiobus;
604

605 606
	err = macb_mii_probe(bp->dev);
	if (err)
F
frederic RODO 已提交
607
		goto err_out_unregister_bus;
608

F
frederic RODO 已提交
609
	return 0;
610

F
frederic RODO 已提交
611
err_out_unregister_bus:
612 613
	mdiobus_unregister(bp->mii_bus);
err_out_free_mdiobus:
614
	of_node_put(bp->phy_node);
615 616
	if (np && of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
617
	mdiobus_free(bp->mii_bus);
F
frederic RODO 已提交
618 619
err_out:
	return err;
620 621 622 623
}

static void macb_update_stats(struct macb *bp)
{
624 625
	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
626
	int offset = MACB_PFR;
627 628 629

	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);

630
	for (; p < end; p++, offset += 4)
631
		*p += bp->macb_reg_readl(bp, offset);
632 633
}

N
Nicolas Ferre 已提交
634
static int macb_halt_tx(struct macb *bp)
635
{
N
Nicolas Ferre 已提交
636 637
	unsigned long	halt_time, timeout;
	u32		status;
638

N
Nicolas Ferre 已提交
639
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
640

N
Nicolas Ferre 已提交
641 642 643 644 645 646
	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
	do {
		halt_time = jiffies;
		status = macb_readl(bp, TSR);
		if (!(status & MACB_BIT(TGO)))
			return 0;
647

N
Nicolas Ferre 已提交
648 649
		usleep_range(10, 250);
	} while (time_before(halt_time, timeout));
650

N
Nicolas Ferre 已提交
651 652
	return -ETIMEDOUT;
}
653

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
{
	if (tx_skb->mapping) {
		if (tx_skb->mapped_as_page)
			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
				       tx_skb->size, DMA_TO_DEVICE);
		else
			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
					 tx_skb->size, DMA_TO_DEVICE);
		tx_skb->mapping = 0;
	}

	if (tx_skb->skb) {
		dev_kfree_skb_any(tx_skb->skb);
		tx_skb->skb = NULL;
	}
}

672
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
673 674
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
675 676
	struct macb_dma_desc_64 *desc_64;

677
	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
678 679 680
		desc_64 = macb_64b_desc(bp, desc);
		desc_64->addrh = upper_32_bits(addr);
	}
681
#endif
682 683 684 685 686 687 688 689 690
	desc->addr = lower_32_bits(addr);
}

static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
	dma_addr_t addr = 0;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	struct macb_dma_desc_64 *desc_64;

691
	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
692 693 694 695 696 697
		desc_64 = macb_64b_desc(bp, desc);
		addr = ((u64)(desc_64->addrh) << 32);
	}
#endif
	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
	return addr;
698 699
}

N
Nicolas Ferre 已提交
700 701
static void macb_tx_error_task(struct work_struct *work)
{
702 703 704
	struct macb_queue	*queue = container_of(work, struct macb_queue,
						      tx_error_task);
	struct macb		*bp = queue->bp;
N
Nicolas Ferre 已提交
705
	struct macb_tx_skb	*tx_skb;
706
	struct macb_dma_desc	*desc;
N
Nicolas Ferre 已提交
707 708
	struct sk_buff		*skb;
	unsigned int		tail;
709 710 711 712 713
	unsigned long		flags;

	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
		    (unsigned int)(queue - bp->queues),
		    queue->tx_tail, queue->tx_head);
714

715 716 717 718 719 720 721
	/* Prevent the queue IRQ handlers from running: each of them may call
	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
	 * As explained below, we have to halt the transmission before updating
	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
	 * network engine about the macb/gem being halted.
	 */
	spin_lock_irqsave(&bp->lock, flags);
722

N
Nicolas Ferre 已提交
723
	/* Make sure nobody is trying to queue up new packets */
724
	netif_tx_stop_all_queues(bp->dev);
725

726
	/* Stop transmission now
N
Nicolas Ferre 已提交
727
	 * (in case we have just queued new packets)
728
	 * macb/gem must be halted to write TBQP register
N
Nicolas Ferre 已提交
729 730 731 732
	 */
	if (macb_halt_tx(bp))
		/* Just complain for now, reinitializing TX path can be good */
		netdev_err(bp->dev, "BUG: halt tx timed out\n");
733

734
	/* Treat frames in TX queue including the ones that caused the error.
N
Nicolas Ferre 已提交
735 736
	 * Free transmit buffers in upper layer.
	 */
737 738
	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
		u32	ctrl;
739

740
		desc = macb_tx_desc(queue, tail);
N
Nicolas Ferre 已提交
741
		ctrl = desc->ctrl;
742
		tx_skb = macb_tx_skb(queue, tail);
N
Nicolas Ferre 已提交
743
		skb = tx_skb->skb;
744

N
Nicolas Ferre 已提交
745
		if (ctrl & MACB_BIT(TX_USED)) {
746 747 748 749
			/* skb is set for the last buffer of the frame */
			while (!skb) {
				macb_tx_unmap(bp, tx_skb);
				tail++;
750
				tx_skb = macb_tx_skb(queue, tail);
751 752 753 754 755 756 757 758
				skb = tx_skb->skb;
			}

			/* ctrl still refers to the first buffer descriptor
			 * since it's the only one written back by the hardware
			 */
			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
759 760
					    macb_tx_ring_wrap(bp, tail),
					    skb->data);
761
				bp->dev->stats.tx_packets++;
762
				queue->stats.tx_packets++;
763
				bp->dev->stats.tx_bytes += skb->len;
764
				queue->stats.tx_bytes += skb->len;
765
			}
N
Nicolas Ferre 已提交
766
		} else {
767 768 769
			/* "Buffers exhausted mid-frame" errors may only happen
			 * if the driver is buggy, so complain loudly about
			 * those. Statistics are updated by hardware.
N
Nicolas Ferre 已提交
770 771 772 773
			 */
			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
				netdev_err(bp->dev,
					   "BUG: TX buffers exhausted mid-frame\n");
774

N
Nicolas Ferre 已提交
775 776 777
			desc->ctrl = ctrl | MACB_BIT(TX_USED);
		}

778
		macb_tx_unmap(bp, tx_skb);
779 780
	}

781 782
	/* Set end of TX queue */
	desc = macb_tx_desc(queue, 0);
783
	macb_set_addr(bp, desc, 0);
784 785
	desc->ctrl = MACB_BIT(TX_USED);

N
Nicolas Ferre 已提交
786 787 788 789
	/* Make descriptor updates visible to hardware */
	wmb();

	/* Reinitialize the TX desc queue */
790
	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
791
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
792
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
793
		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
794
#endif
N
Nicolas Ferre 已提交
795
	/* Make TX ring reflect state of hardware */
796 797
	queue->tx_head = 0;
	queue->tx_tail = 0;
N
Nicolas Ferre 已提交
798 799 800

	/* Housework before enabling TX IRQ */
	macb_writel(bp, TSR, macb_readl(bp, TSR));
801 802 803 804 805 806 807
	queue_writel(queue, IER, MACB_TX_INT_FLAGS);

	/* Now we are ready to start transmission again */
	netif_tx_start_all_queues(bp->dev);
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

	spin_unlock_irqrestore(&bp->lock, flags);
N
Nicolas Ferre 已提交
808 809
}

810
static void macb_tx_interrupt(struct macb_queue *queue)
N
Nicolas Ferre 已提交
811 812 813 814
{
	unsigned int tail;
	unsigned int head;
	u32 status;
815 816
	struct macb *bp = queue->bp;
	u16 queue_index = queue - bp->queues;
N
Nicolas Ferre 已提交
817 818 819 820

	status = macb_readl(bp, TSR);
	macb_writel(bp, TSR, status);

821
	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
822
		queue_writel(queue, ISR, MACB_BIT(TCOMP));
823

N
Nicolas Ferre 已提交
824
	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
825
		    (unsigned long)status);
826

827 828
	head = queue->tx_head;
	for (tail = queue->tx_tail; tail != head; tail++) {
829 830 831 832
		struct macb_tx_skb	*tx_skb;
		struct sk_buff		*skb;
		struct macb_dma_desc	*desc;
		u32			ctrl;
833

834
		desc = macb_tx_desc(queue, tail);
835

836
		/* Make hw descriptor updates visible to CPU */
837
		rmb();
838

839
		ctrl = desc->ctrl;
840

841 842 843
		/* TX_USED bit is only set by hardware on the very first buffer
		 * descriptor of the transmitted frame.
		 */
844
		if (!(ctrl & MACB_BIT(TX_USED)))
845 846
			break;

847 848
		/* Process all buffers of the current transmitted frame */
		for (;; tail++) {
849
			tx_skb = macb_tx_skb(queue, tail);
850 851 852 853
			skb = tx_skb->skb;

			/* First, update TX stats if needed */
			if (skb) {
854 855 856 857 858 859
				if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
					/* skb now belongs to timestamp buffer
					 * and will be removed later
					 */
					tx_skb->skb = NULL;
				}
860
				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
861 862
					    macb_tx_ring_wrap(bp, tail),
					    skb->data);
863
				bp->dev->stats.tx_packets++;
864
				queue->stats.tx_packets++;
865
				bp->dev->stats.tx_bytes += skb->len;
866
				queue->stats.tx_bytes += skb->len;
867
			}
868

869 870 871 872 873 874 875 876 877 878
			/* Now we can safely release resources */
			macb_tx_unmap(bp, tx_skb);

			/* skb is set only for the last buffer of the frame.
			 * WARNING: at this point skb has been freed by
			 * macb_tx_unmap().
			 */
			if (skb)
				break;
		}
879 880
	}

881 882 883
	queue->tx_tail = tail;
	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
	    CIRC_CNT(queue->tx_head, queue->tx_tail,
884
		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
885
		netif_wake_subqueue(bp->dev, queue_index);
886 887
}

888
static void gem_rx_refill(struct macb_queue *queue)
N
Nicolas Ferre 已提交
889 890 891 892
{
	unsigned int		entry;
	struct sk_buff		*skb;
	dma_addr_t		paddr;
893
	struct macb *bp = queue->bp;
894
	struct macb_dma_desc *desc;
N
Nicolas Ferre 已提交
895

896 897 898
	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
			bp->rx_ring_size) > 0) {
		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
N
Nicolas Ferre 已提交
899 900 901 902

		/* Make hw descriptor updates visible to CPU */
		rmb();

903 904
		queue->rx_prepared_head++;
		desc = macb_rx_desc(queue, entry);
N
Nicolas Ferre 已提交
905

906
		if (!queue->rx_skbuff[entry]) {
N
Nicolas Ferre 已提交
907 908
			/* allocate sk_buff for this free entry in ring */
			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
909
			if (unlikely(!skb)) {
N
Nicolas Ferre 已提交
910 911 912 913 914 915 916
				netdev_err(bp->dev,
					   "Unable to allocate sk_buff\n");
				break;
			}

			/* now fill corresponding descriptor entry */
			paddr = dma_map_single(&bp->pdev->dev, skb->data,
917 918
					       bp->rx_buffer_size,
					       DMA_FROM_DEVICE);
919 920 921 922 923
			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
				dev_kfree_skb(skb);
				break;
			}

924
			queue->rx_skbuff[entry] = skb;
N
Nicolas Ferre 已提交
925

926
			if (entry == bp->rx_ring_size - 1)
N
Nicolas Ferre 已提交
927
				paddr |= MACB_BIT(RX_WRAP);
928 929
			macb_set_addr(bp, desc, paddr);
			desc->ctrl = 0;
N
Nicolas Ferre 已提交
930 931 932

			/* properly align Ethernet header */
			skb_reserve(skb, NET_IP_ALIGN);
933
		} else {
934 935
			desc->addr &= ~MACB_BIT(RX_USED);
			desc->ctrl = 0;
N
Nicolas Ferre 已提交
936 937 938 939 940 941
		}
	}

	/* Make descriptor updates visible to hardware */
	wmb();

942 943
	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
			queue, queue->rx_prepared_head, queue->rx_tail);
N
Nicolas Ferre 已提交
944 945 946
}

/* Mark DMA descriptors from begin up to and not including end as unused */
947
static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
N
Nicolas Ferre 已提交
948 949 950 951 952
				  unsigned int end)
{
	unsigned int frag;

	for (frag = begin; frag != end; frag++) {
953
		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
954

N
Nicolas Ferre 已提交
955 956 957 958 959 960
		desc->addr &= ~MACB_BIT(RX_USED);
	}

	/* Make descriptor updates visible to hardware */
	wmb();

961
	/* When this happens, the hardware stats registers for
N
Nicolas Ferre 已提交
962 963 964 965 966
	 * whatever caused this is updated, so we don't have to record
	 * anything.
	 */
}

967
static int gem_rx(struct macb_queue *queue, int budget)
N
Nicolas Ferre 已提交
968
{
969
	struct macb *bp = queue->bp;
N
Nicolas Ferre 已提交
970 971 972 973 974 975 976
	unsigned int		len;
	unsigned int		entry;
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
	int			count = 0;

	while (count < budget) {
977 978 979
		u32 ctrl;
		dma_addr_t addr;
		bool rxused;
N
Nicolas Ferre 已提交
980

981 982
		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
		desc = macb_rx_desc(queue, entry);
N
Nicolas Ferre 已提交
983 984 985 986

		/* Make hw descriptor updates visible to CPU */
		rmb();

987
		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
988
		addr = macb_get_addr(bp, desc);
N
Nicolas Ferre 已提交
989 990
		ctrl = desc->ctrl;

991
		if (!rxused)
N
Nicolas Ferre 已提交
992 993
			break;

994
		queue->rx_tail++;
N
Nicolas Ferre 已提交
995 996 997 998 999
		count++;

		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
			netdev_err(bp->dev,
				   "not whole frame pointed by descriptor\n");
1000
			bp->dev->stats.rx_dropped++;
1001
			queue->stats.rx_dropped++;
N
Nicolas Ferre 已提交
1002 1003
			break;
		}
1004
		skb = queue->rx_skbuff[entry];
N
Nicolas Ferre 已提交
1005 1006 1007
		if (unlikely(!skb)) {
			netdev_err(bp->dev,
				   "inconsistent Rx descriptor chain\n");
1008
			bp->dev->stats.rx_dropped++;
1009
			queue->stats.rx_dropped++;
N
Nicolas Ferre 已提交
1010 1011 1012
			break;
		}
		/* now everything is ready for receiving packet */
1013
		queue->rx_skbuff[entry] = NULL;
1014
		len = ctrl & bp->rx_frm_len_mask;
N
Nicolas Ferre 已提交
1015 1016 1017 1018 1019

		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);

		skb_put(skb, len);
		dma_unmap_single(&bp->pdev->dev, addr,
1020
				 bp->rx_buffer_size, DMA_FROM_DEVICE);
N
Nicolas Ferre 已提交
1021 1022 1023

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb_checksum_none_assert(skb);
1024 1025 1026 1027
		if (bp->dev->features & NETIF_F_RXCSUM &&
		    !(bp->dev->flags & IFF_PROMISC) &&
		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
N
Nicolas Ferre 已提交
1028

1029
		bp->dev->stats.rx_packets++;
1030
		queue->stats.rx_packets++;
1031
		bp->dev->stats.rx_bytes += skb->len;
1032
		queue->stats.rx_bytes += skb->len;
N
Nicolas Ferre 已提交
1033

1034 1035
		gem_ptp_do_rxstamp(bp, skb, desc);

N
Nicolas Ferre 已提交
1036 1037 1038 1039
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
			    skb->len, skb->csum);
		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1040
			       skb_mac_header(skb), 16, true);
N
Nicolas Ferre 已提交
1041 1042 1043 1044 1045 1046 1047
		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
			       skb->data, 32, true);
#endif

		netif_receive_skb(skb);
	}

1048
	gem_rx_refill(queue);
N
Nicolas Ferre 已提交
1049 1050 1051 1052

	return count;
}

1053
static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
1054 1055 1056 1057
			 unsigned int last_frag)
{
	unsigned int len;
	unsigned int frag;
1058
	unsigned int offset;
1059
	struct sk_buff *skb;
1060
	struct macb_dma_desc *desc;
1061
	struct macb *bp = queue->bp;
1062

1063
	desc = macb_rx_desc(queue, last_frag);
1064
	len = desc->ctrl & bp->rx_frm_len_mask;
1065

1066
	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1067 1068
		macb_rx_ring_wrap(bp, first_frag),
		macb_rx_ring_wrap(bp, last_frag), len);
1069

1070
	/* The ethernet header starts NET_IP_ALIGN bytes into the
1071 1072 1073 1074 1075 1076 1077 1078
	 * first buffer. Since the header is 14 bytes, this makes the
	 * payload word-aligned.
	 *
	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
	 * the two padding bytes into the skb so that we avoid hitting
	 * the slowpath in memcpy(), and pull them off afterwards.
	 */
	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1079
	if (!skb) {
1080
		bp->dev->stats.rx_dropped++;
1081
		for (frag = first_frag; ; frag++) {
1082
			desc = macb_rx_desc(queue, frag);
1083
			desc->addr &= ~MACB_BIT(RX_USED);
1084 1085 1086
			if (frag == last_frag)
				break;
		}
1087 1088

		/* Make descriptor updates visible to hardware */
1089
		wmb();
1090

1091 1092 1093
		return 1;
	}

1094 1095
	offset = 0;
	len += NET_IP_ALIGN;
1096
	skb_checksum_none_assert(skb);
1097 1098
	skb_put(skb, len);

1099
	for (frag = first_frag; ; frag++) {
1100
		unsigned int frag_len = bp->rx_buffer_size;
1101 1102

		if (offset + frag_len > len) {
1103 1104 1105 1106
			if (unlikely(frag != last_frag)) {
				dev_kfree_skb_any(skb);
				return -1;
			}
1107 1108
			frag_len = len - offset;
		}
1109
		skb_copy_to_linear_data_offset(skb, offset,
1110
					       macb_rx_buffer(queue, frag),
1111
					       frag_len);
1112
		offset += bp->rx_buffer_size;
1113
		desc = macb_rx_desc(queue, frag);
1114
		desc->addr &= ~MACB_BIT(RX_USED);
1115 1116 1117 1118 1119

		if (frag == last_frag)
			break;
	}

1120 1121 1122
	/* Make descriptor updates visible to hardware */
	wmb();

1123
	__skb_pull(skb, NET_IP_ALIGN);
1124 1125
	skb->protocol = eth_type_trans(skb, bp->dev);

1126 1127
	bp->dev->stats.rx_packets++;
	bp->dev->stats.rx_bytes += skb->len;
1128
	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1129
		    skb->len, skb->csum);
1130 1131 1132 1133 1134
	netif_receive_skb(skb);

	return 0;
}

1135
static inline void macb_init_rx_ring(struct macb_queue *queue)
1136
{
1137
	struct macb *bp = queue->bp;
1138
	dma_addr_t addr;
1139
	struct macb_dma_desc *desc = NULL;
1140 1141
	int i;

1142
	addr = queue->rx_buffers_dma;
1143
	for (i = 0; i < bp->rx_ring_size; i++) {
1144
		desc = macb_rx_desc(queue, i);
1145 1146
		macb_set_addr(bp, desc, addr);
		desc->ctrl = 0;
1147 1148
		addr += bp->rx_buffer_size;
	}
1149
	desc->addr |= MACB_BIT(RX_WRAP);
1150
	queue->rx_tail = 0;
1151 1152
}

1153
static int macb_rx(struct macb_queue *queue, int budget)
1154
{
1155
	struct macb *bp = queue->bp;
1156
	bool reset_rx_queue = false;
1157
	int received = 0;
1158
	unsigned int tail;
1159 1160
	int first_frag = -1;

1161 1162
	for (tail = queue->rx_tail; budget > 0; tail++) {
		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1163
		u32 ctrl;
1164

1165
		/* Make hw descriptor updates visible to CPU */
1166
		rmb();
1167

1168
		ctrl = desc->ctrl;
1169

1170
		if (!(desc->addr & MACB_BIT(RX_USED)))
1171 1172 1173 1174
			break;

		if (ctrl & MACB_BIT(RX_SOF)) {
			if (first_frag != -1)
1175
				discard_partial_frame(queue, first_frag, tail);
1176 1177 1178 1179 1180
			first_frag = tail;
		}

		if (ctrl & MACB_BIT(RX_EOF)) {
			int dropped;
1181 1182 1183 1184 1185

			if (unlikely(first_frag == -1)) {
				reset_rx_queue = true;
				continue;
			}
1186

1187
			dropped = macb_rx_frame(queue, first_frag, tail);
1188
			first_frag = -1;
1189 1190 1191 1192
			if (unlikely(dropped < 0)) {
				reset_rx_queue = true;
				continue;
			}
1193 1194 1195 1196 1197 1198 1199
			if (!dropped) {
				received++;
				budget--;
			}
		}
	}

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
	if (unlikely(reset_rx_queue)) {
		unsigned long flags;
		u32 ctrl;

		netdev_err(bp->dev, "RX queue corruption: reset it\n");

		spin_lock_irqsave(&bp->lock, flags);

		ctrl = macb_readl(bp, NCR);
		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));

1211 1212
		macb_init_rx_ring(queue);
		queue_writel(queue, RBQP, queue->rx_ring_dma);
1213 1214 1215 1216 1217 1218 1219

		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));

		spin_unlock_irqrestore(&bp->lock, flags);
		return received;
	}

1220
	if (first_frag != -1)
1221
		queue->rx_tail = first_frag;
1222
	else
1223
		queue->rx_tail = tail;
1224 1225 1226 1227

	return received;
}

1228
static int macb_poll(struct napi_struct *napi, int budget)
1229
{
1230 1231
	struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
	struct macb *bp = queue->bp;
1232
	int work_done;
1233 1234 1235 1236 1237
	u32 status;

	status = macb_readl(bp, RSR);
	macb_writel(bp, RSR, status);

1238
	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1239
		    (unsigned long)status, budget);
1240

1241
	work_done = bp->macbgem_ops.mog_rx(queue, budget);
1242
	if (work_done < budget) {
1243
		napi_complete_done(napi, work_done);
1244

1245 1246
		/* Packets received while interrupts were disabled */
		status = macb_readl(bp, RSR);
1247
		if (status) {
1248
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1249
				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1250
			napi_reschedule(napi);
1251
		} else {
1252
			queue_writel(queue, IER, MACB_RX_INT_FLAGS);
1253
		}
1254
	}
1255 1256 1257

	/* TODO: Handle errors */

1258
	return work_done;
1259 1260
}

H
Harini Katakam 已提交
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
static void macb_hresp_error_task(unsigned long data)
{
	struct macb *bp = (struct macb *)data;
	struct net_device *dev = bp->dev;
	struct macb_queue *queue = bp->queues;
	unsigned int q;
	u32 ctrl;

	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
					 MACB_TX_INT_FLAGS |
					 MACB_BIT(HRESP));
	}
	ctrl = macb_readl(bp, NCR);
	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
	macb_writel(bp, NCR, ctrl);

	netif_tx_stop_all_queues(dev);
	netif_carrier_off(dev);

	bp->macbgem_ops.mog_init_rings(bp);

	/* Initialize TX and RX buffers */
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
			queue_writel(queue, RBQPH,
				     upper_32_bits(queue->rx_ring_dma));
#endif
		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
			queue_writel(queue, TBQPH,
				     upper_32_bits(queue->tx_ring_dma));
#endif

		/* Enable interrupts */
		queue_writel(queue, IER,
			     MACB_RX_INT_FLAGS |
			     MACB_TX_INT_FLAGS |
			     MACB_BIT(HRESP));
	}

	ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
	macb_writel(bp, NCR, ctrl);

	netif_carrier_on(dev);
	netif_tx_start_all_queues(dev);
}

1312 1313
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
1314 1315 1316
	struct macb_queue *queue = dev_id;
	struct macb *bp = queue->bp;
	struct net_device *dev = bp->dev;
1317
	u32 status, ctrl;
1318

1319
	status = queue_readl(queue, ISR);
1320 1321 1322 1323 1324 1325 1326 1327 1328

	if (unlikely(!status))
		return IRQ_NONE;

	spin_lock(&bp->lock);

	while (status) {
		/* close possible race with dev_close */
		if (unlikely(!netif_running(dev))) {
1329
			queue_writel(queue, IDR, -1);
1330 1331
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
				queue_writel(queue, ISR, -1);
1332 1333 1334
			break;
		}

1335 1336 1337
		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
			    (unsigned int)(queue - bp->queues),
			    (unsigned long)status);
1338

1339
		if (status & MACB_RX_INT_FLAGS) {
1340
			/* There's no point taking any more interrupts
1341 1342 1343 1344 1345
			 * until we have processed the buffers. The
			 * scheduling call may fail if the poll routine
			 * is already scheduled, so disable interrupts
			 * now.
			 */
1346
			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1347
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1348
				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1349

1350
			if (napi_schedule_prep(&queue->napi)) {
1351
				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1352
				__napi_schedule(&queue->napi);
1353 1354 1355
			}
		}

N
Nicolas Ferre 已提交
1356
		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1357 1358
			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
			schedule_work(&queue->tx_error_task);
1359 1360

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1361
				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1362

N
Nicolas Ferre 已提交
1363 1364 1365 1366
			break;
		}

		if (status & MACB_BIT(TCOMP))
1367
			macb_tx_interrupt(queue);
1368

1369
		/* Link change detection isn't possible with RMII, so we'll
1370 1371 1372
		 * add that if/when we get our hands on a full-blown MII PHY.
		 */

1373 1374 1375 1376 1377 1378
		/* There is a hardware issue under heavy load where DMA can
		 * stop, this causes endless "used buffer descriptor read"
		 * interrupts but it can be cleared by re-enabling RX. See
		 * the at91 manual, section 41.3.1 or the Zynq manual
		 * section 16.7.4 for details.
		 */
1379 1380 1381
		if (status & MACB_BIT(RXUBR)) {
			ctrl = macb_readl(bp, NCR);
			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1382
			wmb();
1383 1384 1385
			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1386
				queue_writel(queue, ISR, MACB_BIT(RXUBR));
1387 1388
		}

A
Alexander Stein 已提交
1389 1390
		if (status & MACB_BIT(ISR_ROVR)) {
			/* We missed at least one packet */
J
Jamie Iles 已提交
1391 1392 1393 1394
			if (macb_is_gem(bp))
				bp->hw_stats.gem.rx_overruns++;
			else
				bp->hw_stats.macb.rx_overruns++;
1395 1396

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1397
				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
A
Alexander Stein 已提交
1398 1399
		}

1400
		if (status & MACB_BIT(HRESP)) {
H
Harini Katakam 已提交
1401
			tasklet_schedule(&bp->hresp_err_tasklet);
1402
			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1403 1404

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1405
				queue_writel(queue, ISR, MACB_BIT(HRESP));
1406
		}
1407
		status = queue_readl(queue, ISR);
1408 1409 1410 1411 1412 1413 1414
	}

	spin_unlock(&bp->lock);

	return IRQ_HANDLED;
}

1415
#ifdef CONFIG_NET_POLL_CONTROLLER
1416
/* Polling receive - used by netconsole and other diagnostic tools
1417 1418 1419 1420
 * to allow network i/o with interrupts disabled.
 */
static void macb_poll_controller(struct net_device *dev)
{
1421 1422
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
1423
	unsigned long flags;
1424
	unsigned int q;
1425 1426

	local_irq_save(flags);
1427 1428
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
		macb_interrupt(dev->irq, queue);
1429 1430 1431 1432
	local_irq_restore(flags);
}
#endif

1433
static unsigned int macb_tx_map(struct macb *bp,
1434
				struct macb_queue *queue,
R
Rafal Ozieblo 已提交
1435 1436
				struct sk_buff *skb,
				unsigned int hdrlen)
1437 1438
{
	dma_addr_t mapping;
1439
	unsigned int len, entry, i, tx_head = queue->tx_head;
1440
	struct macb_tx_skb *tx_skb = NULL;
1441
	struct macb_dma_desc *desc;
1442 1443
	unsigned int offset, size, count = 0;
	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
R
Rafal Ozieblo 已提交
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	unsigned int eof = 1, mss_mfs = 0;
	u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;

	/* LSO */
	if (skb_shinfo(skb)->gso_size != 0) {
		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
			/* UDP - UFO */
			lso_ctrl = MACB_LSO_UFO_ENABLE;
		else
			/* TCP - TSO */
			lso_ctrl = MACB_LSO_TSO_ENABLE;
	}
1456 1457 1458

	/* First, map non-paged data */
	len = skb_headlen(skb);
R
Rafal Ozieblo 已提交
1459 1460 1461 1462

	/* first buffer length */
	size = hdrlen;

1463 1464
	offset = 0;
	while (len) {
1465
		entry = macb_tx_ring_wrap(bp, tx_head);
1466
		tx_skb = &queue->tx_skb[entry];
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483

		mapping = dma_map_single(&bp->pdev->dev,
					 skb->data + offset,
					 size, DMA_TO_DEVICE);
		if (dma_mapping_error(&bp->pdev->dev, mapping))
			goto dma_error;

		/* Save info to properly release resources */
		tx_skb->skb = NULL;
		tx_skb->mapping = mapping;
		tx_skb->size = size;
		tx_skb->mapped_as_page = false;

		len -= size;
		offset += size;
		count++;
		tx_head++;
R
Rafal Ozieblo 已提交
1484 1485

		size = min(len, bp->max_tx_length);
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
	}

	/* Then, map paged data from fragments */
	for (f = 0; f < nr_frags; f++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];

		len = skb_frag_size(frag);
		offset = 0;
		while (len) {
			size = min(len, bp->max_tx_length);
1496
			entry = macb_tx_ring_wrap(bp, tx_head);
1497
			tx_skb = &queue->tx_skb[entry];
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517

			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
						   offset, size, DMA_TO_DEVICE);
			if (dma_mapping_error(&bp->pdev->dev, mapping))
				goto dma_error;

			/* Save info to properly release resources */
			tx_skb->skb = NULL;
			tx_skb->mapping = mapping;
			tx_skb->size = size;
			tx_skb->mapped_as_page = true;

			len -= size;
			offset += size;
			count++;
			tx_head++;
		}
	}

	/* Should never happen */
1518
	if (unlikely(!tx_skb)) {
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
		netdev_err(bp->dev, "BUG! empty skb!\n");
		return 0;
	}

	/* This is the last buffer of the frame: save socket buffer */
	tx_skb->skb = skb;

	/* Update TX ring: update buffer descriptors in reverse order
	 * to avoid race condition
	 */

	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
	 * to set the end of TX queue
	 */
	i = tx_head;
1534
	entry = macb_tx_ring_wrap(bp, i);
1535
	ctrl = MACB_BIT(TX_USED);
1536
	desc = macb_tx_desc(queue, entry);
1537 1538
	desc->ctrl = ctrl;

R
Rafal Ozieblo 已提交
1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	if (lso_ctrl) {
		if (lso_ctrl == MACB_LSO_UFO_ENABLE)
			/* include header and FCS in value given to h/w */
			mss_mfs = skb_shinfo(skb)->gso_size +
					skb_transport_offset(skb) +
					ETH_FCS_LEN;
		else /* TSO */ {
			mss_mfs = skb_shinfo(skb)->gso_size;
			/* TCP Sequence Number Source Select
			 * can be set only for TSO
			 */
			seq_ctrl = 0;
		}
	}

1554 1555
	do {
		i--;
1556
		entry = macb_tx_ring_wrap(bp, i);
1557
		tx_skb = &queue->tx_skb[entry];
1558
		desc = macb_tx_desc(queue, entry);
1559 1560 1561 1562 1563 1564

		ctrl = (u32)tx_skb->size;
		if (eof) {
			ctrl |= MACB_BIT(TX_LAST);
			eof = 0;
		}
1565
		if (unlikely(entry == (bp->tx_ring_size - 1)))
1566 1567
			ctrl |= MACB_BIT(TX_WRAP);

R
Rafal Ozieblo 已提交
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
		/* First descriptor is header descriptor */
		if (i == queue->tx_head) {
			ctrl |= MACB_BF(TX_LSO, lso_ctrl);
			ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
		} else
			/* Only set MSS/MFS on payload descriptors
			 * (second or later descriptor)
			 */
			ctrl |= MACB_BF(MSS_MFS, mss_mfs);

1578
		/* Set TX buffer descriptor */
1579
		macb_set_addr(bp, desc, tx_skb->mapping);
1580 1581 1582 1583 1584
		/* desc->addr must be visible to hardware before clearing
		 * 'TX_USED' bit in desc->ctrl.
		 */
		wmb();
		desc->ctrl = ctrl;
1585
	} while (i != queue->tx_head);
1586

1587
	queue->tx_head = tx_head;
1588 1589 1590 1591 1592 1593

	return count;

dma_error:
	netdev_err(bp->dev, "TX DMA map failed\n");

1594 1595
	for (i = queue->tx_head; i != tx_head; i++) {
		tx_skb = macb_tx_skb(queue, i);
1596 1597 1598 1599 1600 1601 1602

		macb_tx_unmap(bp, tx_skb);
	}

	return 0;
}

R
Rafal Ozieblo 已提交
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
static netdev_features_t macb_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
{
	unsigned int nr_frags, f;
	unsigned int hdrlen;

	/* Validate LSO compatibility */

	/* there is only one buffer */
	if (!skb_is_nonlinear(skb))
		return features;

	/* length of header */
	hdrlen = skb_transport_offset(skb);
	if (ip_hdr(skb)->protocol == IPPROTO_TCP)
		hdrlen += tcp_hdrlen(skb);

	/* For LSO:
	 * When software supplies two or more payload buffers all payload buffers
	 * apart from the last must be a multiple of 8 bytes in size.
	 */
	if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
		return features & ~MACB_NETIF_LSO;

	nr_frags = skb_shinfo(skb)->nr_frags;
	/* No need to check last fragment */
	nr_frags--;
	for (f = 0; f < nr_frags; f++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];

		if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
			return features & ~MACB_NETIF_LSO;
	}
	return features;
}

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
static inline int macb_clear_csum(struct sk_buff *skb)
{
	/* no change for packets without checksum offloading */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	/* make sure we can modify the header */
	if (unlikely(skb_cow_head(skb, 0)))
		return -1;

	/* initialize checksum field
	 * This is required - at least for Zynq, which otherwise calculates
	 * wrong UDP header checksums for UDP packets with UDP data len <=2
	 */
	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
	return 0;
}

1658 1659
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
1660
	u16 queue_index = skb_get_queue_mapping(skb);
1661
	struct macb *bp = netdev_priv(dev);
1662
	struct macb_queue *queue = &bp->queues[queue_index];
1663
	unsigned long flags;
R
Rafal Ozieblo 已提交
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	unsigned int desc_cnt, nr_frags, frag_size, f;
	unsigned int hdrlen;
	bool is_lso, is_udp = 0;

	is_lso = (skb_shinfo(skb)->gso_size != 0);

	if (is_lso) {
		is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);

		/* length of headers */
		if (is_udp)
			/* only queue eth + ip headers separately for UDP */
			hdrlen = skb_transport_offset(skb);
		else
			hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
		if (skb_headlen(skb) < hdrlen) {
			netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
			/* if this is required, would need to copy to single buffer */
			return NETDEV_TX_BUSY;
		}
	} else
		hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1686

1687 1688
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
	netdev_vdbg(bp->dev,
1689 1690 1691
		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
		    queue_index, skb->len, skb->head, skb->data,
		    skb_tail_pointer(skb), skb_end_pointer(skb));
1692 1693
	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
		       skb->data, 16, true);
1694 1695
#endif

1696 1697
	/* Count how many TX buffer descriptors are needed to send this
	 * socket buffer: skb fragments of jumbo frames may need to be
1698
	 * split into many buffer descriptors.
1699
	 */
R
Rafal Ozieblo 已提交
1700 1701 1702 1703 1704
	if (is_lso && (skb_headlen(skb) > hdrlen))
		/* extra header descriptor if also payload in first buffer */
		desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
	else
		desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1705 1706 1707
	nr_frags = skb_shinfo(skb)->nr_frags;
	for (f = 0; f < nr_frags; f++) {
		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
R
Rafal Ozieblo 已提交
1708
		desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1709 1710
	}

1711
	spin_lock_irqsave(&bp->lock, flags);
1712 1713

	/* This is a hard error, log it. */
1714
	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
R
Rafal Ozieblo 已提交
1715
		       bp->tx_ring_size) < desc_cnt) {
1716
		netif_stop_subqueue(dev, queue_index);
1717
		spin_unlock_irqrestore(&bp->lock, flags);
1718
		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1719
			   queue->tx_head, queue->tx_tail);
1720
		return NETDEV_TX_BUSY;
1721 1722
	}

1723 1724
	if (macb_clear_csum(skb)) {
		dev_kfree_skb_any(skb);
1725
		goto unlock;
1726 1727
	}

1728
	/* Map socket buffer for DMA transfer */
R
Rafal Ozieblo 已提交
1729
	if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1730
		dev_kfree_skb_any(skb);
1731 1732
		goto unlock;
	}
1733

1734
	/* Make newly initialized descriptor visible to hardware */
1735
	wmb();
1736 1737
	skb_tx_timestamp(skb);

1738 1739
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

1740
	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1741
		netif_stop_subqueue(dev, queue_index);
1742

1743
unlock:
1744
	spin_unlock_irqrestore(&bp->lock, flags);
1745

1746
	return NETDEV_TX_OK;
1747 1748
}

N
Nicolas Ferre 已提交
1749
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1750 1751 1752 1753
{
	if (!macb_is_gem(bp)) {
		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
	} else {
N
Nicolas Ferre 已提交
1754
		bp->rx_buffer_size = size;
1755 1756

		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
N
Nicolas Ferre 已提交
1757
			netdev_dbg(bp->dev,
1758 1759
				   "RX buffer must be multiple of %d bytes, expanding\n",
				   RX_BUFFER_MULTIPLE);
1760
			bp->rx_buffer_size =
N
Nicolas Ferre 已提交
1761
				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1762 1763
		}
	}
N
Nicolas Ferre 已提交
1764

1765
	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
N
Nicolas Ferre 已提交
1766
		   bp->dev->mtu, bp->rx_buffer_size);
1767 1768
}

N
Nicolas Ferre 已提交
1769 1770 1771 1772
static void gem_free_rx_buffers(struct macb *bp)
{
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
1773
	struct macb_queue *queue;
N
Nicolas Ferre 已提交
1774
	dma_addr_t		addr;
1775
	unsigned int q;
N
Nicolas Ferre 已提交
1776 1777
	int i;

1778 1779 1780
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		if (!queue->rx_skbuff)
			continue;
N
Nicolas Ferre 已提交
1781

1782 1783
		for (i = 0; i < bp->rx_ring_size; i++) {
			skb = queue->rx_skbuff[i];
N
Nicolas Ferre 已提交
1784

1785 1786
			if (!skb)
				continue;
N
Nicolas Ferre 已提交
1787

1788 1789
			desc = macb_rx_desc(queue, i);
			addr = macb_get_addr(bp, desc);
1790

1791 1792 1793 1794 1795
			dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
					DMA_FROM_DEVICE);
			dev_kfree_skb_any(skb);
			skb = NULL;
		}
N
Nicolas Ferre 已提交
1796

1797 1798 1799
		kfree(queue->rx_skbuff);
		queue->rx_skbuff = NULL;
	}
N
Nicolas Ferre 已提交
1800 1801 1802 1803
}

static void macb_free_rx_buffers(struct macb *bp)
{
1804 1805 1806
	struct macb_queue *queue = &bp->queues[0];

	if (queue->rx_buffers) {
N
Nicolas Ferre 已提交
1807
		dma_free_coherent(&bp->pdev->dev,
1808
				  bp->rx_ring_size * bp->rx_buffer_size,
1809 1810
				  queue->rx_buffers, queue->rx_buffers_dma);
		queue->rx_buffers = NULL;
N
Nicolas Ferre 已提交
1811 1812
	}
}
1813

1814 1815
static void macb_free_consistent(struct macb *bp)
{
1816 1817 1818
	struct macb_queue *queue;
	unsigned int q;

1819
	queue = &bp->queues[0];
N
Nicolas Ferre 已提交
1820
	bp->macbgem_ops.mog_free_rx_buffers(bp);
1821
	if (queue->rx_ring) {
1822
		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1823 1824
				queue->rx_ring, queue->rx_ring_dma);
		queue->rx_ring = NULL;
1825
	}
1826 1827 1828 1829 1830

	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		kfree(queue->tx_skb);
		queue->tx_skb = NULL;
		if (queue->tx_ring) {
1831
			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1832 1833 1834
					  queue->tx_ring, queue->tx_ring_dma);
			queue->tx_ring = NULL;
		}
1835
	}
N
Nicolas Ferre 已提交
1836 1837 1838 1839
}

static int gem_alloc_rx_buffers(struct macb *bp)
{
1840 1841
	struct macb_queue *queue;
	unsigned int q;
N
Nicolas Ferre 已提交
1842 1843
	int size;

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		size = bp->rx_ring_size * sizeof(struct sk_buff *);
		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
		if (!queue->rx_skbuff)
			return -ENOMEM;
		else
			netdev_dbg(bp->dev,
				   "Allocated %d RX struct sk_buff entries at %p\n",
				   bp->rx_ring_size, queue->rx_skbuff);
	}
N
Nicolas Ferre 已提交
1854 1855 1856 1857 1858
	return 0;
}

static int macb_alloc_rx_buffers(struct macb *bp)
{
1859
	struct macb_queue *queue = &bp->queues[0];
N
Nicolas Ferre 已提交
1860 1861
	int size;

1862
	size = bp->rx_ring_size * bp->rx_buffer_size;
1863 1864 1865
	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
					    &queue->rx_buffers_dma, GFP_KERNEL);
	if (!queue->rx_buffers)
N
Nicolas Ferre 已提交
1866
		return -ENOMEM;
1867 1868 1869

	netdev_dbg(bp->dev,
		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1870
		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
N
Nicolas Ferre 已提交
1871
	return 0;
1872 1873 1874 1875
}

static int macb_alloc_consistent(struct macb *bp)
{
1876 1877
	struct macb_queue *queue;
	unsigned int q;
1878 1879
	int size;

1880
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1881
		size = TX_RING_BYTES(bp);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
						    &queue->tx_ring_dma,
						    GFP_KERNEL);
		if (!queue->tx_ring)
			goto out_err;
		netdev_dbg(bp->dev,
			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
			   q, size, (unsigned long)queue->tx_ring_dma,
			   queue->tx_ring);

1892
		size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1893 1894 1895
		queue->tx_skb = kmalloc(size, GFP_KERNEL);
		if (!queue->tx_skb)
			goto out_err;
1896

1897 1898 1899 1900 1901 1902 1903 1904 1905
		size = RX_RING_BYTES(bp);
		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
						 &queue->rx_ring_dma, GFP_KERNEL);
		if (!queue->rx_ring)
			goto out_err;
		netdev_dbg(bp->dev,
			   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
	}
N
Nicolas Ferre 已提交
1906
	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1907 1908 1909 1910 1911 1912 1913 1914 1915
		goto out_err;

	return 0;

out_err:
	macb_free_consistent(bp);
	return -ENOMEM;
}

N
Nicolas Ferre 已提交
1916 1917
static void gem_init_rings(struct macb *bp)
{
1918
	struct macb_queue *queue;
1919
	struct macb_dma_desc *desc = NULL;
1920
	unsigned int q;
N
Nicolas Ferre 已提交
1921 1922
	int i;

1923
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1924
		for (i = 0; i < bp->tx_ring_size; i++) {
1925 1926 1927
			desc = macb_tx_desc(queue, i);
			macb_set_addr(bp, desc, 0);
			desc->ctrl = MACB_BIT(TX_USED);
1928
		}
1929
		desc->ctrl |= MACB_BIT(TX_WRAP);
1930 1931
		queue->tx_head = 0;
		queue->tx_tail = 0;
N
Nicolas Ferre 已提交
1932

1933 1934 1935 1936 1937
		queue->rx_tail = 0;
		queue->rx_prepared_head = 0;

		gem_rx_refill(queue);
	}
N
Nicolas Ferre 已提交
1938 1939 1940

}

1941 1942 1943
static void macb_init_rings(struct macb *bp)
{
	int i;
1944
	struct macb_dma_desc *desc = NULL;
1945

1946
	macb_init_rx_ring(&bp->queues[0]);
1947

1948
	for (i = 0; i < bp->tx_ring_size; i++) {
1949 1950 1951
		desc = macb_tx_desc(&bp->queues[0], i);
		macb_set_addr(bp, desc, 0);
		desc->ctrl = MACB_BIT(TX_USED);
1952
	}
1953 1954
	bp->queues[0].tx_head = 0;
	bp->queues[0].tx_tail = 0;
1955
	desc->ctrl |= MACB_BIT(TX_WRAP);
1956 1957 1958 1959
}

static void macb_reset_hw(struct macb *bp)
{
1960 1961 1962
	struct macb_queue *queue;
	unsigned int q;

1963
	/* Disable RX and TX (XXX: Should we halt the transmission
1964 1965 1966 1967 1968 1969 1970 1971
	 * more gracefully?)
	 */
	macb_writel(bp, NCR, 0);

	/* Clear the stats registers (XXX: Update stats first?) */
	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));

	/* Clear all status flags */
J
Joachim Eastwood 已提交
1972 1973
	macb_writel(bp, TSR, -1);
	macb_writel(bp, RSR, -1);
1974 1975

	/* Disable all interrupts */
1976 1977 1978
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, IDR, -1);
		queue_readl(queue, ISR);
1979 1980
		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
			queue_writel(queue, ISR, -1);
1981
	}
1982 1983
}

1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
static u32 gem_mdc_clk_div(struct macb *bp)
{
	u32 config;
	unsigned long pclk_hz = clk_get_rate(bp->pclk);

	if (pclk_hz <= 20000000)
		config = GEM_BF(CLK, GEM_CLK_DIV8);
	else if (pclk_hz <= 40000000)
		config = GEM_BF(CLK, GEM_CLK_DIV16);
	else if (pclk_hz <= 80000000)
		config = GEM_BF(CLK, GEM_CLK_DIV32);
	else if (pclk_hz <= 120000000)
		config = GEM_BF(CLK, GEM_CLK_DIV48);
	else if (pclk_hz <= 160000000)
		config = GEM_BF(CLK, GEM_CLK_DIV64);
	else
		config = GEM_BF(CLK, GEM_CLK_DIV96);

	return config;
}

static u32 macb_mdc_clk_div(struct macb *bp)
{
	u32 config;
	unsigned long pclk_hz;

	if (macb_is_gem(bp))
		return gem_mdc_clk_div(bp);

	pclk_hz = clk_get_rate(bp->pclk);
	if (pclk_hz <= 20000000)
		config = MACB_BF(CLK, MACB_CLK_DIV8);
	else if (pclk_hz <= 40000000)
		config = MACB_BF(CLK, MACB_CLK_DIV16);
	else if (pclk_hz <= 80000000)
		config = MACB_BF(CLK, MACB_CLK_DIV32);
	else
		config = MACB_BF(CLK, MACB_CLK_DIV64);

	return config;
}

2026
/* Get the DMA bus width field of the network configuration register that we
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
 * should program.  We find the width from decoding the design configuration
 * register to find the maximum supported data bus width.
 */
static u32 macb_dbw(struct macb *bp)
{
	if (!macb_is_gem(bp))
		return 0;

	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
	case 4:
		return GEM_BF(DBW, GEM_DBW128);
	case 2:
		return GEM_BF(DBW, GEM_DBW64);
	case 1:
	default:
		return GEM_BF(DBW, GEM_DBW32);
	}
}

2046
/* Configure the receive DMA engine
2047
 * - use the correct receive buffer size
2048
 * - set best burst length for DMA operations
2049 2050 2051
 *   (if not supported by FIFO, it will fallback to default)
 * - set both rx/tx packet buffers to full memory size
 * These are configurable parameters for GEM.
2052 2053 2054
 */
static void macb_configure_dma(struct macb *bp)
{
2055 2056 2057
	struct macb_queue *queue;
	u32 buffer_size;
	unsigned int q;
2058 2059
	u32 dmacfg;

2060
	buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2061 2062
	if (macb_is_gem(bp)) {
		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2063 2064 2065 2066 2067 2068
		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
			if (q)
				queue_writel(queue, RBQS, buffer_size);
			else
				dmacfg |= GEM_BF(RXBS, buffer_size);
		}
2069 2070
		if (bp->dma_burst_length)
			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2071
		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2072
		dmacfg &= ~GEM_BIT(ENDIA_PKT);
2073

2074
		if (bp->native_io)
2075 2076 2077 2078
			dmacfg &= ~GEM_BIT(ENDIA_DESC);
		else
			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */

2079 2080 2081 2082
		if (bp->dev->features & NETIF_F_HW_CSUM)
			dmacfg |= GEM_BIT(TXCOEN);
		else
			dmacfg &= ~GEM_BIT(TXCOEN);
2083 2084

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2085
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2086
			dmacfg |= GEM_BIT(ADDR64);
2087 2088 2089 2090
#endif
#ifdef CONFIG_MACB_USE_HWSTAMP
		if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
			dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2091
#endif
2092 2093
		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
			   dmacfg);
2094 2095 2096 2097
		gem_writel(bp, DMACFG, dmacfg);
	}
}

2098 2099
static void macb_init_hw(struct macb *bp)
{
2100 2101 2102
	struct macb_queue *queue;
	unsigned int q;

2103 2104 2105
	u32 config;

	macb_reset_hw(bp);
2106
	macb_set_hwaddr(bp);
2107

2108
	config = macb_mdc_clk_div(bp);
2109 2110
	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
		config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2111
	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
2112 2113
	config |= MACB_BIT(PAE);		/* PAuse Enable */
	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
D
Dan Carpenter 已提交
2114
	if (bp->caps & MACB_CAPS_JUMBO)
2115 2116 2117
		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
	else
		config |= MACB_BIT(BIG);	/* Receive oversized frames */
2118 2119
	if (bp->dev->flags & IFF_PROMISC)
		config |= MACB_BIT(CAF);	/* Copy All Frames */
2120 2121
	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
		config |= GEM_BIT(RXCOEN);
2122 2123
	if (!(bp->dev->flags & IFF_BROADCAST))
		config |= MACB_BIT(NBC);	/* No BroadCast */
2124
	config |= macb_dbw(bp);
2125
	macb_writel(bp, NCFGR, config);
D
Dan Carpenter 已提交
2126
	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2127
		gem_writel(bp, JML, bp->jumbo_max_len);
2128 2129
	bp->speed = SPEED_10;
	bp->duplex = DUPLEX_HALF;
2130
	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
D
Dan Carpenter 已提交
2131
	if (bp->caps & MACB_CAPS_JUMBO)
2132
		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2133

2134 2135
	macb_configure_dma(bp);

2136
	/* Initialize TX and RX buffers */
2137 2138
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2139
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2140 2141
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
			queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2142
#endif
2143
		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2144
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2145
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2146
			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2147
#endif
2148 2149 2150 2151 2152 2153 2154

		/* Enable interrupts */
		queue_writel(queue, IER,
			     MACB_RX_INT_FLAGS |
			     MACB_TX_INT_FLAGS |
			     MACB_BIT(HRESP));
	}
2155 2156

	/* Enable TX and RX */
F
frederic RODO 已提交
2157
	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
2158 2159
}

2160
/* The hash address register is 64 bits long and takes up two
P
Patrice Vilchez 已提交
2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
 * locations in the memory map.  The least significant bits are stored
 * in EMAC_HSL and the most significant bits in EMAC_HSH.
 *
 * The unicast hash enable and the multicast hash enable bits in the
 * network configuration register enable the reception of hash matched
 * frames. The destination address is reduced to a 6 bit index into
 * the 64 bit hash register using the following hash function.  The
 * hash function is an exclusive or of every sixth bit of the
 * destination address.
 *
 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
 *
 * da[0] represents the least significant bit of the first byte
 * received, that is, the multicast/unicast indicator, and da[47]
 * represents the most significant bit of the last byte received.  If
 * the hash index, hi[n], points to a bit that is set in the hash
 * register then the frame will be matched according to whether the
 * frame is multicast or unicast.  A multicast match will be signalled
 * if the multicast hash enable bit is set, da[0] is 1 and the hash
 * index points to a bit set in the hash register.  A unicast match
 * will be signalled if the unicast hash enable bit is set, da[0] is 0
 * and the hash index points to a bit set in the hash register.  To
 * receive all multicast frames, the hash register should be set with
 * all ones and the multicast hash enable bit should be set in the
 * network configuration register.
 */

static inline int hash_bit_value(int bitnr, __u8 *addr)
{
	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
		return 1;
	return 0;
}

2200
/* Return the hash index value for the specified address. */
P
Patrice Vilchez 已提交
2201 2202 2203 2204 2205 2206 2207
static int hash_get_index(__u8 *addr)
{
	int i, j, bitval;
	int hash_index = 0;

	for (j = 0; j < 6; j++) {
		for (i = 0, bitval = 0; i < 8; i++)
2208
			bitval ^= hash_bit_value(i * 6 + j, addr);
P
Patrice Vilchez 已提交
2209 2210 2211 2212 2213 2214 2215

		hash_index |= (bitval << j);
	}

	return hash_index;
}

2216
/* Add multicast addresses to the internal multicast-hash table. */
P
Patrice Vilchez 已提交
2217 2218
static void macb_sethashtable(struct net_device *dev)
{
2219
	struct netdev_hw_addr *ha;
P
Patrice Vilchez 已提交
2220
	unsigned long mc_filter[2];
2221
	unsigned int bitnr;
P
Patrice Vilchez 已提交
2222 2223
	struct macb *bp = netdev_priv(dev);

2224 2225
	mc_filter[0] = 0;
	mc_filter[1] = 0;
P
Patrice Vilchez 已提交
2226

2227 2228
	netdev_for_each_mc_addr(ha, dev) {
		bitnr = hash_get_index(ha->addr);
P
Patrice Vilchez 已提交
2229 2230 2231
		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
	}

J
Jamie Iles 已提交
2232 2233
	macb_or_gem_writel(bp, HRB, mc_filter[0]);
	macb_or_gem_writel(bp, HRT, mc_filter[1]);
P
Patrice Vilchez 已提交
2234 2235
}

2236
/* Enable/Disable promiscuous and multicast modes. */
2237
static void macb_set_rx_mode(struct net_device *dev)
P
Patrice Vilchez 已提交
2238 2239 2240 2241 2242 2243
{
	unsigned long cfg;
	struct macb *bp = netdev_priv(dev);

	cfg = macb_readl(bp, NCFGR);

2244
	if (dev->flags & IFF_PROMISC) {
P
Patrice Vilchez 已提交
2245 2246
		/* Enable promiscuous mode */
		cfg |= MACB_BIT(CAF);
2247 2248 2249 2250 2251 2252

		/* Disable RX checksum offload */
		if (macb_is_gem(bp))
			cfg &= ~GEM_BIT(RXCOEN);
	} else {
		/* Disable promiscuous mode */
P
Patrice Vilchez 已提交
2253 2254
		cfg &= ~MACB_BIT(CAF);

2255 2256 2257 2258 2259
		/* Enable RX checksum offload only if requested */
		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
			cfg |= GEM_BIT(RXCOEN);
	}

P
Patrice Vilchez 已提交
2260 2261
	if (dev->flags & IFF_ALLMULTI) {
		/* Enable all multicast mode */
J
Jamie Iles 已提交
2262 2263
		macb_or_gem_writel(bp, HRB, -1);
		macb_or_gem_writel(bp, HRT, -1);
P
Patrice Vilchez 已提交
2264
		cfg |= MACB_BIT(NCFGR_MTI);
2265
	} else if (!netdev_mc_empty(dev)) {
P
Patrice Vilchez 已提交
2266 2267 2268 2269 2270
		/* Enable specific multicasts */
		macb_sethashtable(dev);
		cfg |= MACB_BIT(NCFGR_MTI);
	} else if (dev->flags & (~IFF_ALLMULTI)) {
		/* Disable all multicast mode */
J
Jamie Iles 已提交
2271 2272
		macb_or_gem_writel(bp, HRB, 0);
		macb_or_gem_writel(bp, HRT, 0);
P
Patrice Vilchez 已提交
2273 2274 2275 2276 2277 2278
		cfg &= ~MACB_BIT(NCFGR_MTI);
	}

	macb_writel(bp, NCFGR, cfg);
}

2279 2280 2281
static int macb_open(struct net_device *dev)
{
	struct macb *bp = netdev_priv(dev);
N
Nicolas Ferre 已提交
2282
	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2283 2284
	struct macb_queue *queue;
	unsigned int q;
2285 2286
	int err;

2287
	netdev_dbg(bp->dev, "open\n");
2288

2289 2290 2291
	/* carrier starts down */
	netif_carrier_off(dev);

F
frederic RODO 已提交
2292
	/* if the phy is not yet register, retry later*/
2293
	if (!dev->phydev)
F
frederic RODO 已提交
2294
		return -EAGAIN;
2295 2296

	/* RX buffers initialization */
N
Nicolas Ferre 已提交
2297
	macb_init_rx_buffer_size(bp, bufsz);
F
frederic RODO 已提交
2298

2299 2300
	err = macb_alloc_consistent(bp);
	if (err) {
2301 2302
		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
			   err);
2303 2304 2305
		return err;
	}

N
Nicolas Ferre 已提交
2306
	bp->macbgem_ops.mog_init_rings(bp);
2307 2308
	macb_init_hw(bp);

2309 2310 2311
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
		napi_enable(&queue->napi);

F
frederic RODO 已提交
2312
	/* schedule a link state check */
2313
	phy_start(dev->phydev);
2314

2315
	netif_tx_start_all_queues(dev);
2316

2317 2318 2319
	if (bp->ptp_info)
		bp->ptp_info->ptp_init(dev);

2320 2321 2322 2323 2324 2325
	return 0;
}

static int macb_close(struct net_device *dev)
{
	struct macb *bp = netdev_priv(dev);
2326
	struct macb_queue *queue;
2327
	unsigned long flags;
2328
	unsigned int q;
2329

2330
	netif_tx_stop_all_queues(dev);
2331 2332 2333

	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
		napi_disable(&queue->napi);
2334

2335 2336
	if (dev->phydev)
		phy_stop(dev->phydev);
F
frederic RODO 已提交
2337

2338 2339 2340 2341 2342 2343 2344
	spin_lock_irqsave(&bp->lock, flags);
	macb_reset_hw(bp);
	netif_carrier_off(dev);
	spin_unlock_irqrestore(&bp->lock, flags);

	macb_free_consistent(bp);

2345 2346 2347
	if (bp->ptp_info)
		bp->ptp_info->ptp_remove(dev);

2348 2349 2350
	return 0;
}

2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
static int macb_change_mtu(struct net_device *dev, int new_mtu)
{
	if (netif_running(dev))
		return -EBUSY;

	dev->mtu = new_mtu;

	return 0;
}

2361 2362
static void gem_update_stats(struct macb *bp)
{
2363 2364 2365 2366
	struct macb_queue *queue;
	unsigned int i, q, idx;
	unsigned long *stat;

2367 2368
	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;

2369 2370
	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
		u32 offset = gem_statistics[i].offset;
2371
		u64 val = bp->macb_reg_readl(bp, offset);
2372 2373 2374 2375 2376 2377

		bp->ethtool_stats[i] += val;
		*p += val;

		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
			/* Add GEM_OCTTXH, GEM_OCTRXH */
2378
			val = bp->macb_reg_readl(bp, offset + 4);
2379
			bp->ethtool_stats[i] += ((u64)val) << 32;
2380 2381 2382
			*(++p) += val;
		}
	}
2383 2384 2385 2386 2387

	idx = GEM_STATS_LEN;
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
			bp->ethtool_stats[idx++] = *stat;
2388 2389 2390 2391 2392
}

static struct net_device_stats *gem_get_stats(struct macb *bp)
{
	struct gem_stats *hwstat = &bp->hw_stats.gem;
2393
	struct net_device_stats *nstat = &bp->dev->stats;
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427

	gem_update_stats(bp);

	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
			    hwstat->rx_alignment_errors +
			    hwstat->rx_resource_errors +
			    hwstat->rx_overruns +
			    hwstat->rx_oversize_frames +
			    hwstat->rx_jabbers +
			    hwstat->rx_undersized_frames +
			    hwstat->rx_length_field_frame_errors);
	nstat->tx_errors = (hwstat->tx_late_collisions +
			    hwstat->tx_excessive_collisions +
			    hwstat->tx_underrun +
			    hwstat->tx_carrier_sense_errors);
	nstat->multicast = hwstat->rx_multicast_frames;
	nstat->collisions = (hwstat->tx_single_collision_frames +
			     hwstat->tx_multiple_collision_frames +
			     hwstat->tx_excessive_collisions);
	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
				   hwstat->rx_jabbers +
				   hwstat->rx_undersized_frames +
				   hwstat->rx_length_field_frame_errors);
	nstat->rx_over_errors = hwstat->rx_resource_errors;
	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
	nstat->rx_fifo_errors = hwstat->rx_overruns;
	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
	nstat->tx_fifo_errors = hwstat->tx_underrun;

	return nstat;
}

2428 2429 2430 2431 2432 2433 2434
static void gem_get_ethtool_stats(struct net_device *dev,
				  struct ethtool_stats *stats, u64 *data)
{
	struct macb *bp;

	bp = netdev_priv(dev);
	gem_update_stats(bp);
2435 2436
	memcpy(data, &bp->ethtool_stats, sizeof(u64)
			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2437 2438 2439 2440
}

static int gem_get_sset_count(struct net_device *dev, int sset)
{
2441 2442
	struct macb *bp = netdev_priv(dev);

2443 2444
	switch (sset) {
	case ETH_SS_STATS:
2445
		return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2446 2447 2448 2449 2450 2451 2452
	default:
		return -EOPNOTSUPP;
	}
}

static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
2453 2454 2455
	char stat_string[ETH_GSTRING_LEN];
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
2456
	unsigned int i;
2457
	unsigned int q;
2458 2459 2460 2461 2462 2463

	switch (sset) {
	case ETH_SS_STATS:
		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
			memcpy(p, gem_statistics[i].stat_string,
			       ETH_GSTRING_LEN);
2464 2465 2466 2467 2468 2469 2470 2471

		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
				snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
						q, queue_statistics[i].stat_string);
				memcpy(p, stat_string, ETH_GSTRING_LEN);
			}
		}
2472 2473 2474 2475
		break;
	}
}

2476
static struct net_device_stats *macb_get_stats(struct net_device *dev)
2477 2478
{
	struct macb *bp = netdev_priv(dev);
2479
	struct net_device_stats *nstat = &bp->dev->stats;
2480 2481 2482 2483
	struct macb_stats *hwstat = &bp->hw_stats.macb;

	if (macb_is_gem(bp))
		return gem_get_stats(bp);
2484

F
frederic RODO 已提交
2485 2486 2487
	/* read stats from hardware */
	macb_update_stats(bp);

2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
	/* Convert HW stats into netdevice stats */
	nstat->rx_errors = (hwstat->rx_fcs_errors +
			    hwstat->rx_align_errors +
			    hwstat->rx_resource_errors +
			    hwstat->rx_overruns +
			    hwstat->rx_oversize_pkts +
			    hwstat->rx_jabbers +
			    hwstat->rx_undersize_pkts +
			    hwstat->rx_length_mismatch);
	nstat->tx_errors = (hwstat->tx_late_cols +
			    hwstat->tx_excessive_cols +
			    hwstat->tx_underruns +
2500 2501
			    hwstat->tx_carrier_errors +
			    hwstat->sqe_test_errors);
2502 2503 2504 2505 2506 2507 2508
	nstat->collisions = (hwstat->tx_single_cols +
			     hwstat->tx_multiple_cols +
			     hwstat->tx_excessive_cols);
	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
				   hwstat->rx_jabbers +
				   hwstat->rx_undersize_pkts +
				   hwstat->rx_length_mismatch);
A
Alexander Stein 已提交
2509 2510
	nstat->rx_over_errors = hwstat->rx_resource_errors +
				   hwstat->rx_overruns;
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
	nstat->rx_frame_errors = hwstat->rx_align_errors;
	nstat->rx_fifo_errors = hwstat->rx_overruns;
	/* XXX: What does "missed" mean? */
	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
	nstat->tx_fifo_errors = hwstat->tx_underruns;
	/* Don't know about heartbeat or window errors... */

	return nstat;
}

2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
static int macb_get_regs_len(struct net_device *netdev)
{
	return MACB_GREGS_NBR * sizeof(u32);
}

static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
			  void *p)
{
	struct macb *bp = netdev_priv(dev);
	unsigned int tail, head;
	u32 *regs_buff = p;

	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
			| MACB_GREGS_VERSION;

2538 2539
	tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
	head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551

	regs_buff[0]  = macb_readl(bp, NCR);
	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
	regs_buff[2]  = macb_readl(bp, NSR);
	regs_buff[3]  = macb_readl(bp, TSR);
	regs_buff[4]  = macb_readl(bp, RBQP);
	regs_buff[5]  = macb_readl(bp, TBQP);
	regs_buff[6]  = macb_readl(bp, RSR);
	regs_buff[7]  = macb_readl(bp, IMR);

	regs_buff[8]  = tail;
	regs_buff[9]  = head;
2552 2553
	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2554

2555 2556
	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2557
	if (macb_is_gem(bp))
2558 2559 2560
		regs_buff[13] = gem_readl(bp, DMACFG);
}

2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct macb *bp = netdev_priv(netdev);

	wol->supported = 0;
	wol->wolopts = 0;

	if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
		wol->supported = WAKE_MAGIC;

		if (bp->wol & MACB_WOL_ENABLED)
			wol->wolopts |= WAKE_MAGIC;
	}
}

static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct macb *bp = netdev_priv(netdev);

	if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
	    (wol->wolopts & ~WAKE_MAGIC))
		return -EOPNOTSUPP;

	if (wol->wolopts & WAKE_MAGIC)
		bp->wol |= MACB_WOL_ENABLED;
	else
		bp->wol &= ~MACB_WOL_ENABLED;

	device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);

	return 0;
}

2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643
static void macb_get_ringparam(struct net_device *netdev,
			       struct ethtool_ringparam *ring)
{
	struct macb *bp = netdev_priv(netdev);

	ring->rx_max_pending = MAX_RX_RING_SIZE;
	ring->tx_max_pending = MAX_TX_RING_SIZE;

	ring->rx_pending = bp->rx_ring_size;
	ring->tx_pending = bp->tx_ring_size;
}

static int macb_set_ringparam(struct net_device *netdev,
			      struct ethtool_ringparam *ring)
{
	struct macb *bp = netdev_priv(netdev);
	u32 new_rx_size, new_tx_size;
	unsigned int reset = 0;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_size = clamp_t(u32, ring->rx_pending,
			      MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
	new_rx_size = roundup_pow_of_two(new_rx_size);

	new_tx_size = clamp_t(u32, ring->tx_pending,
			      MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
	new_tx_size = roundup_pow_of_two(new_tx_size);

	if ((new_tx_size == bp->tx_ring_size) &&
	    (new_rx_size == bp->rx_ring_size)) {
		/* nothing to do */
		return 0;
	}

	if (netif_running(bp->dev)) {
		reset = 1;
		macb_close(bp->dev);
	}

	bp->rx_ring_size = new_rx_size;
	bp->tx_ring_size = new_tx_size;

	if (reset)
		macb_open(bp->dev);

	return 0;
}

2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
#ifdef CONFIG_MACB_USE_HWSTAMP
static unsigned int gem_get_tsu_rate(struct macb *bp)
{
	struct clk *tsu_clk;
	unsigned int tsu_rate;

	tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
	if (!IS_ERR(tsu_clk))
		tsu_rate = clk_get_rate(tsu_clk);
	/* try pclk instead */
	else if (!IS_ERR(bp->pclk)) {
		tsu_clk = bp->pclk;
		tsu_rate = clk_get_rate(tsu_clk);
	} else
		return -ENOTSUPP;
	return tsu_rate;
}

static s32 gem_get_ptp_max_adj(void)
{
	return 64000000;
}

static int gem_get_ts_info(struct net_device *dev,
			   struct ethtool_ts_info *info)
{
	struct macb *bp = netdev_priv(dev);

	if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
		ethtool_op_get_ts_info(dev, info);
		return 0;
	}

	info->so_timestamping =
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE |
		SOF_TIMESTAMPING_TX_HARDWARE |
		SOF_TIMESTAMPING_RX_HARDWARE |
		SOF_TIMESTAMPING_RAW_HARDWARE;
	info->tx_types =
		(1 << HWTSTAMP_TX_ONESTEP_SYNC) |
		(1 << HWTSTAMP_TX_OFF) |
		(1 << HWTSTAMP_TX_ON);
	info->rx_filters =
		(1 << HWTSTAMP_FILTER_NONE) |
		(1 << HWTSTAMP_FILTER_ALL);

	info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;

	return 0;
}

static struct macb_ptp_info gem_ptp_info = {
	.ptp_init	 = gem_ptp_init,
	.ptp_remove	 = gem_ptp_remove,
	.get_ptp_max_adj = gem_get_ptp_max_adj,
	.get_tsu_rate	 = gem_get_tsu_rate,
	.get_ts_info	 = gem_get_ts_info,
	.get_hwtst	 = gem_get_hwtst,
	.set_hwtst	 = gem_set_hwtst,
};
#endif

2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
static int macb_get_ts_info(struct net_device *netdev,
			    struct ethtool_ts_info *info)
{
	struct macb *bp = netdev_priv(netdev);

	if (bp->ptp_info)
		return bp->ptp_info->get_ts_info(netdev, info);

	return ethtool_op_get_ts_info(netdev, info);
}

2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
static void gem_enable_flow_filters(struct macb *bp, bool enable)
{
	struct ethtool_rx_fs_item *item;
	u32 t2_scr;
	int num_t2_scr;

	num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));

	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
		struct ethtool_rx_flow_spec *fs = &item->fs;
		struct ethtool_tcpip4_spec *tp4sp_m;

		if (fs->location >= num_t2_scr)
			continue;

		t2_scr = gem_readl_n(bp, SCRT2, fs->location);

		/* enable/disable screener regs for the flow entry */
		t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);

		/* only enable fields with no masking */
		tp4sp_m = &(fs->m_u.tcp_ip4_spec);

		if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
			t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
		else
			t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);

		if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
			t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
		else
			t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);

		if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
			t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
		else
			t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);

		gem_writel_n(bp, SCRT2, fs->location, t2_scr);
	}
}

static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
{
	struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
	uint16_t index = fs->location;
	u32 w0, w1, t2_scr;
	bool cmp_a = false;
	bool cmp_b = false;
	bool cmp_c = false;

	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
	tp4sp_m = &(fs->m_u.tcp_ip4_spec);

	/* ignore field if any masking set */
	if (tp4sp_m->ip4src == 0xFFFFFFFF) {
		/* 1st compare reg - IP source address */
		w0 = 0;
		w1 = 0;
		w0 = tp4sp_v->ip4src;
		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
		w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
		cmp_a = true;
	}

	/* ignore field if any masking set */
	if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
		/* 2nd compare reg - IP destination address */
		w0 = 0;
		w1 = 0;
		w0 = tp4sp_v->ip4dst;
		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
		w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
		cmp_b = true;
	}

	/* ignore both port fields if masking set in both */
	if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
		/* 3rd compare reg - source port, destination port */
		w0 = 0;
		w1 = 0;
		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
		if (tp4sp_m->psrc == tp4sp_m->pdst) {
			w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
			w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
			w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
			w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
		} else {
			/* only one port definition */
			w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
			w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
			if (tp4sp_m->psrc == 0xFFFF) { /* src port */
				w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
				w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
			} else { /* dst port */
				w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
				w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
			}
		}
		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
		cmp_c = true;
	}

	t2_scr = 0;
	t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
	t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
	if (cmp_a)
		t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
	if (cmp_b)
		t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
	if (cmp_c)
		t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
	gem_writel_n(bp, SCRT2, index, t2_scr);
}

static int gem_add_flow_filter(struct net_device *netdev,
		struct ethtool_rxnfc *cmd)
{
	struct macb *bp = netdev_priv(netdev);
	struct ethtool_rx_flow_spec *fs = &cmd->fs;
	struct ethtool_rx_fs_item *item, *newfs;
2847
	unsigned long flags;
2848 2849 2850
	int ret = -EINVAL;
	bool added = false;

2851
	newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
	if (newfs == NULL)
		return -ENOMEM;
	memcpy(&newfs->fs, fs, sizeof(newfs->fs));

	netdev_dbg(netdev,
			"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
			fs->flow_type, (int)fs->ring_cookie, fs->location,
			htonl(fs->h_u.tcp_ip4_spec.ip4src),
			htonl(fs->h_u.tcp_ip4_spec.ip4dst),
			htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));

2863 2864
	spin_lock_irqsave(&bp->rx_fs_lock, flags);

2865
	/* find correct place to add in list */
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
		if (item->fs.location > newfs->fs.location) {
			list_add_tail(&newfs->list, &item->list);
			added = true;
			break;
		} else if (item->fs.location == fs->location) {
			netdev_err(netdev, "Rule not added: location %d not free!\n",
					fs->location);
			ret = -EBUSY;
			goto err;
2876 2877
		}
	}
2878 2879
	if (!added)
		list_add_tail(&newfs->list, &bp->rx_fs_list.list);
2880 2881 2882 2883 2884 2885 2886

	gem_prog_cmp_regs(bp, fs);
	bp->rx_fs_list.count++;
	/* enable filtering if NTUPLE on */
	if (netdev->features & NETIF_F_NTUPLE)
		gem_enable_flow_filters(bp, 1);

2887
	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2888 2889 2890
	return 0;

err:
2891
	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
	kfree(newfs);
	return ret;
}

static int gem_del_flow_filter(struct net_device *netdev,
		struct ethtool_rxnfc *cmd)
{
	struct macb *bp = netdev_priv(netdev);
	struct ethtool_rx_fs_item *item;
	struct ethtool_rx_flow_spec *fs;
2902 2903 2904
	unsigned long flags;

	spin_lock_irqsave(&bp->rx_fs_lock, flags);
2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921

	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
		if (item->fs.location == cmd->fs.location) {
			/* disable screener regs for the flow entry */
			fs = &(item->fs);
			netdev_dbg(netdev,
					"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
					fs->flow_type, (int)fs->ring_cookie, fs->location,
					htonl(fs->h_u.tcp_ip4_spec.ip4src),
					htonl(fs->h_u.tcp_ip4_spec.ip4dst),
					htons(fs->h_u.tcp_ip4_spec.psrc),
					htons(fs->h_u.tcp_ip4_spec.pdst));

			gem_writel_n(bp, SCRT2, fs->location, 0);

			list_del(&item->list);
			bp->rx_fs_list.count--;
2922 2923
			spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
			kfree(item);
2924 2925 2926 2927
			return 0;
		}
	}

2928
	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
	return -EINVAL;
}

static int gem_get_flow_entry(struct net_device *netdev,
		struct ethtool_rxnfc *cmd)
{
	struct macb *bp = netdev_priv(netdev);
	struct ethtool_rx_fs_item *item;

	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
		if (item->fs.location == cmd->fs.location) {
			memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
			return 0;
		}
	}
	return -EINVAL;
}

static int gem_get_all_flow_entries(struct net_device *netdev,
		struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
	struct macb *bp = netdev_priv(netdev);
	struct ethtool_rx_fs_item *item;
	uint32_t cnt = 0;

	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
		if (cnt == cmd->rule_cnt)
			return -EMSGSIZE;
		rule_locs[cnt] = item->fs.location;
		cnt++;
	}
	cmd->data = bp->max_tuples;
	cmd->rule_cnt = cnt;

	return 0;
}

static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
		u32 *rule_locs)
{
	struct macb *bp = netdev_priv(netdev);
	int ret = 0;

	switch (cmd->cmd) {
	case ETHTOOL_GRXRINGS:
		cmd->data = bp->num_queues;
		break;
	case ETHTOOL_GRXCLSRLCNT:
		cmd->rule_cnt = bp->rx_fs_list.count;
		break;
	case ETHTOOL_GRXCLSRULE:
		ret = gem_get_flow_entry(netdev, cmd);
		break;
	case ETHTOOL_GRXCLSRLALL:
		ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
		break;
	default:
		netdev_err(netdev,
			  "Command parameter %d is not supported\n", cmd->cmd);
		ret = -EOPNOTSUPP;
	}

	return ret;
}

static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
	struct macb *bp = netdev_priv(netdev);
	int ret;

	switch (cmd->cmd) {
	case ETHTOOL_SRXCLSRLINS:
		if ((cmd->fs.location >= bp->max_tuples)
				|| (cmd->fs.ring_cookie >= bp->num_queues)) {
			ret = -EINVAL;
			break;
		}
		ret = gem_add_flow_filter(netdev, cmd);
		break;
	case ETHTOOL_SRXCLSRLDEL:
		ret = gem_del_flow_filter(netdev, cmd);
		break;
	default:
		netdev_err(netdev,
			  "Command parameter %d is not supported\n", cmd->cmd);
		ret = -EOPNOTSUPP;
	}

	return ret;
}

3020
static const struct ethtool_ops macb_ethtool_ops = {
3021 3022
	.get_regs_len		= macb_get_regs_len,
	.get_regs		= macb_get_regs,
3023
	.get_link		= ethtool_op_get_link,
3024
	.get_ts_info		= ethtool_op_get_ts_info,
3025 3026
	.get_wol		= macb_get_wol,
	.set_wol		= macb_set_wol,
3027 3028
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
3029 3030
	.get_ringparam		= macb_get_ringparam,
	.set_ringparam		= macb_set_ringparam,
3031 3032
};

L
Lad, Prabhakar 已提交
3033
static const struct ethtool_ops gem_ethtool_ops = {
3034 3035 3036
	.get_regs_len		= macb_get_regs_len,
	.get_regs		= macb_get_regs,
	.get_link		= ethtool_op_get_link,
3037
	.get_ts_info		= macb_get_ts_info,
3038 3039 3040
	.get_ethtool_stats	= gem_get_ethtool_stats,
	.get_strings		= gem_get_ethtool_strings,
	.get_sset_count		= gem_get_sset_count,
3041 3042
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
3043 3044
	.get_ringparam		= macb_get_ringparam,
	.set_ringparam		= macb_set_ringparam,
3045 3046
	.get_rxnfc			= gem_get_rxnfc,
	.set_rxnfc			= gem_set_rxnfc,
3047 3048
};

3049
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3050
{
3051
	struct phy_device *phydev = dev->phydev;
3052
	struct macb *bp = netdev_priv(dev);
3053 3054 3055 3056

	if (!netif_running(dev))
		return -EINVAL;

F
frederic RODO 已提交
3057 3058
	if (!phydev)
		return -ENODEV;
3059

3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
	if (!bp->ptp_info)
		return phy_mii_ioctl(phydev, rq, cmd);

	switch (cmd) {
	case SIOCSHWTSTAMP:
		return bp->ptp_info->set_hwtst(dev, rq, cmd);
	case SIOCGHWTSTAMP:
		return bp->ptp_info->get_hwtst(dev, rq);
	default:
		return phy_mii_ioctl(phydev, rq, cmd);
	}
3071 3072
}

3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090
static int macb_set_features(struct net_device *netdev,
			     netdev_features_t features)
{
	struct macb *bp = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;

	/* TX checksum offload */
	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
		u32 dmacfg;

		dmacfg = gem_readl(bp, DMACFG);
		if (features & NETIF_F_HW_CSUM)
			dmacfg |= GEM_BIT(TXCOEN);
		else
			dmacfg &= ~GEM_BIT(TXCOEN);
		gem_writel(bp, DMACFG, dmacfg);
	}

3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103
	/* RX checksum offload */
	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
		u32 netcfg;

		netcfg = gem_readl(bp, NCFGR);
		if (features & NETIF_F_RXCSUM &&
		    !(netdev->flags & IFF_PROMISC))
			netcfg |= GEM_BIT(RXCOEN);
		else
			netcfg &= ~GEM_BIT(RXCOEN);
		gem_writel(bp, NCFGR, netcfg);
	}

3104 3105 3106 3107 3108 3109
	/* RX Flow Filters */
	if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
		bool turn_on = features & NETIF_F_NTUPLE;

		gem_enable_flow_filters(bp, turn_on);
	}
3110 3111 3112
	return 0;
}

3113 3114 3115 3116
static const struct net_device_ops macb_netdev_ops = {
	.ndo_open		= macb_open,
	.ndo_stop		= macb_close,
	.ndo_start_xmit		= macb_start_xmit,
3117
	.ndo_set_rx_mode	= macb_set_rx_mode,
3118 3119 3120
	.ndo_get_stats		= macb_get_stats,
	.ndo_do_ioctl		= macb_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
3121
	.ndo_change_mtu		= macb_change_mtu,
3122
	.ndo_set_mac_address	= eth_mac_addr,
3123 3124 3125
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= macb_poll_controller,
#endif
3126
	.ndo_set_features	= macb_set_features,
R
Rafal Ozieblo 已提交
3127
	.ndo_features_check	= macb_features_check,
3128 3129
};

3130
/* Configure peripheral capabilities according to device tree
3131 3132
 * and integration options used
 */
3133 3134
static void macb_configure_caps(struct macb *bp,
				const struct macb_config *dt_conf)
3135 3136 3137
{
	u32 dcfg;

3138 3139 3140
	if (dt_conf)
		bp->caps = dt_conf->caps;

3141
	if (hw_is_gem(bp->regs, bp->native_io)) {
3142 3143 3144 3145 3146 3147 3148 3149
		bp->caps |= MACB_CAPS_MACB_IS_GEM;

		dcfg = gem_readl(bp, DCFG1);
		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
		dcfg = gem_readl(bp, DCFG2);
		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
			bp->caps |= MACB_CAPS_FIFO_MODE;
3150 3151
#ifdef CONFIG_MACB_USE_HWSTAMP
		if (gem_has_ptp(bp)) {
3152 3153
			if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
				pr_err("GEM doesn't support hardware ptp.\n");
3154
			else {
3155
				bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3156 3157
				bp->ptp_info = &gem_ptp_info;
			}
3158
		}
3159
#endif
3160 3161
	}

3162
	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3163 3164
}

3165
static void macb_probe_queues(void __iomem *mem,
3166
			      bool native_io,
3167 3168 3169 3170 3171 3172 3173 3174
			      unsigned int *queue_mask,
			      unsigned int *num_queues)
{
	unsigned int hw_q;

	*queue_mask = 0x1;
	*num_queues = 1;

3175 3176 3177 3178 3179 3180
	/* is it macb or gem ?
	 *
	 * We need to read directly from the hardware here because
	 * we are early in the probe process and don't have the
	 * MACB_CAPS_MACB_IS_GEM flag positioned
	 */
3181
	if (!hw_is_gem(mem, native_io))
3182 3183 3184
		return;

	/* bit 0 is never set but queue 0 always exists */
3185 3186
	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;

3187 3188 3189 3190 3191 3192 3193
	*queue_mask |= 0x1;

	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
		if (*queue_mask & (1 << hw_q))
			(*num_queues)++;
}

3194
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3195 3196
			 struct clk **hclk, struct clk **tx_clk,
			 struct clk **rx_clk)
3197
{
3198
	struct macb_platform_data *pdata;
3199
	int err;
3200

3201 3202 3203 3204 3205 3206 3207 3208 3209
	pdata = dev_get_platdata(&pdev->dev);
	if (pdata) {
		*pclk = pdata->pclk;
		*hclk = pdata->hclk;
	} else {
		*pclk = devm_clk_get(&pdev->dev, "pclk");
		*hclk = devm_clk_get(&pdev->dev, "hclk");
	}

3210 3211
	if (IS_ERR(*pclk)) {
		err = PTR_ERR(*pclk);
3212
		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3213
		return err;
A
Andrew Victor 已提交
3214
	}
J
Jamie Iles 已提交
3215

3216 3217
	if (IS_ERR(*hclk)) {
		err = PTR_ERR(*hclk);
3218
		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3219
		return err;
3220 3221
	}

3222 3223 3224
	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
	if (IS_ERR(*tx_clk))
		*tx_clk = NULL;
3225

3226 3227 3228 3229
	*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
	if (IS_ERR(*rx_clk))
		*rx_clk = NULL;

3230
	err = clk_prepare_enable(*pclk);
3231 3232
	if (err) {
		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3233
		return err;
3234 3235
	}

3236
	err = clk_prepare_enable(*hclk);
3237 3238
	if (err) {
		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3239
		goto err_disable_pclk;
3240 3241
	}

3242
	err = clk_prepare_enable(*tx_clk);
3243 3244
	if (err) {
		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3245
		goto err_disable_hclk;
3246 3247
	}

3248 3249 3250 3251 3252 3253
	err = clk_prepare_enable(*rx_clk);
	if (err) {
		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
		goto err_disable_txclk;
	}

3254 3255
	return 0;

3256 3257 3258
err_disable_txclk:
	clk_disable_unprepare(*tx_clk);

3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274
err_disable_hclk:
	clk_disable_unprepare(*hclk);

err_disable_pclk:
	clk_disable_unprepare(*pclk);

	return err;
}

static int macb_init(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	unsigned int hw_q, q;
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
	int err;
3275
	u32 val, reg;
3276

3277 3278 3279
	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;

3280 3281 3282 3283
	/* set the queue register mapping once for all: queue0 has a special
	 * register mapping but we don't want to test the queue index then
	 * compute the corresponding register offset at run time.
	 */
3284
	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3285
		if (!(bp->queue_mask & (1 << hw_q)))
3286 3287
			continue;

3288
		queue = &bp->queues[q];
3289
		queue->bp = bp;
3290
		netif_napi_add(dev, &queue->napi, macb_poll, 64);
3291 3292 3293 3294 3295 3296
		if (hw_q) {
			queue->ISR  = GEM_ISR(hw_q - 1);
			queue->IER  = GEM_IER(hw_q - 1);
			queue->IDR  = GEM_IDR(hw_q - 1);
			queue->IMR  = GEM_IMR(hw_q - 1);
			queue->TBQP = GEM_TBQP(hw_q - 1);
3297 3298
			queue->RBQP = GEM_RBQP(hw_q - 1);
			queue->RBQS = GEM_RBQS(hw_q - 1);
3299
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3300
			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3301
				queue->TBQPH = GEM_TBQPH(hw_q - 1);
3302 3303
				queue->RBQPH = GEM_RBQPH(hw_q - 1);
			}
3304
#endif
3305 3306 3307 3308 3309 3310 3311
		} else {
			/* queue0 uses legacy registers */
			queue->ISR  = MACB_ISR;
			queue->IER  = MACB_IER;
			queue->IDR  = MACB_IDR;
			queue->IMR  = MACB_IMR;
			queue->TBQP = MACB_TBQP;
3312
			queue->RBQP = MACB_RBQP;
3313
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3314
			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3315
				queue->TBQPH = MACB_TBQPH;
3316 3317
				queue->RBQPH = MACB_RBQPH;
			}
3318
#endif
3319 3320 3321 3322 3323 3324 3325
		}

		/* get irq: here we use the linux queue index, not the hardware
		 * queue index. the queue irq definitions in the device tree
		 * must remove the optional gaps that could exist in the
		 * hardware queue mask.
		 */
3326
		queue->irq = platform_get_irq(pdev, q);
3327
		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3328
				       IRQF_SHARED, dev->name, queue);
3329 3330 3331 3332
		if (err) {
			dev_err(&pdev->dev,
				"Unable to request IRQ %d (error %d)\n",
				queue->irq, err);
3333
			return err;
3334 3335 3336
		}

		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3337
		q++;
3338 3339
	}

3340
	dev->netdev_ops = &macb_netdev_ops;
3341

N
Nicolas Ferre 已提交
3342 3343
	/* setup appropriated routines according to adapter type */
	if (macb_is_gem(bp)) {
3344
		bp->max_tx_length = GEM_MAX_TX_LEN;
N
Nicolas Ferre 已提交
3345 3346 3347 3348
		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
		bp->macbgem_ops.mog_init_rings = gem_init_rings;
		bp->macbgem_ops.mog_rx = gem_rx;
3349
		dev->ethtool_ops = &gem_ethtool_ops;
N
Nicolas Ferre 已提交
3350
	} else {
3351
		bp->max_tx_length = MACB_MAX_TX_LEN;
N
Nicolas Ferre 已提交
3352 3353 3354 3355
		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
		bp->macbgem_ops.mog_init_rings = macb_init_rings;
		bp->macbgem_ops.mog_rx = macb_rx;
3356
		dev->ethtool_ops = &macb_ethtool_ops;
N
Nicolas Ferre 已提交
3357 3358
	}

3359 3360
	/* Set features */
	dev->hw_features = NETIF_F_SG;
R
Rafal Ozieblo 已提交
3361 3362 3363 3364 3365

	/* Check LSO capability */
	if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
		dev->hw_features |= MACB_NETIF_LSO;

3366 3367
	/* Checksum offload is only available on gem with packet buffer */
	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3368
		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3369 3370 3371 3372
	if (bp->caps & MACB_CAPS_SG_DISABLED)
		dev->hw_features &= ~NETIF_F_SG;
	dev->features = dev->hw_features;

3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396
	/* Check RX Flow Filters support.
	 * Max Rx flows set by availability of screeners & compare regs:
	 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
	 */
	reg = gem_readl(bp, DCFG8);
	bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
			GEM_BFEXT(T2SCR, reg));
	if (bp->max_tuples > 0) {
		/* also needs one ethtype match to check IPv4 */
		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
			/* program this reg now */
			reg = 0;
			reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
			gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
			/* Filtering is supported in hw but don't enable it in kernel now */
			dev->hw_features |= NETIF_F_NTUPLE;
			/* init Rx flow definitions */
			INIT_LIST_HEAD(&bp->rx_fs_list.list);
			bp->rx_fs_list.count = 0;
			spin_lock_init(&bp->rx_fs_lock);
		} else
			bp->max_tuples = 0;
	}

3397 3398 3399 3400 3401
	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
		val = 0;
		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
			val = GEM_BIT(RGMII);
		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3402
			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3403
			val = MACB_BIT(RMII);
3404
		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3405
			val = MACB_BIT(MII);
3406

3407 3408
		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
			val |= MACB_BIT(CLKEN);
3409

3410 3411
		macb_or_gem_writel(bp, USRIO, val);
	}
3412

3413
	/* Set MII management clock divider */
3414 3415
	val = macb_mdc_clk_div(bp);
	val |= macb_dbw(bp);
3416 3417
	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
	macb_writel(bp, NCFGR, val);

	return 0;
}

#if defined(CONFIG_OF)
/* 1518 rounded up */
#define AT91ETHER_MAX_RBUFF_SZ	0x600
/* max number of receive buffers */
#define AT91ETHER_MAX_RX_DESCR	9

/* Initialize and start the Receiver and Transmit subsystems */
static int at91ether_start(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
3433
	struct macb_queue *q = &lp->queues[0];
3434
	struct macb_dma_desc *desc;
3435 3436 3437 3438
	dma_addr_t addr;
	u32 ctl;
	int i;

3439
	q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3440
					 (AT91ETHER_MAX_RX_DESCR *
3441
					  macb_dma_desc_get_size(lp)),
3442 3443
					 &q->rx_ring_dma, GFP_KERNEL);
	if (!q->rx_ring)
3444 3445
		return -ENOMEM;

3446
	q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3447 3448
					    AT91ETHER_MAX_RX_DESCR *
					    AT91ETHER_MAX_RBUFF_SZ,
3449 3450
					    &q->rx_buffers_dma, GFP_KERNEL);
	if (!q->rx_buffers) {
3451 3452
		dma_free_coherent(&lp->pdev->dev,
				  AT91ETHER_MAX_RX_DESCR *
3453
				  macb_dma_desc_get_size(lp),
3454 3455
				  q->rx_ring, q->rx_ring_dma);
		q->rx_ring = NULL;
3456 3457 3458
		return -ENOMEM;
	}

3459
	addr = q->rx_buffers_dma;
3460
	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3461
		desc = macb_rx_desc(q, i);
3462 3463
		macb_set_addr(lp, desc, addr);
		desc->ctrl = 0;
3464 3465 3466 3467
		addr += AT91ETHER_MAX_RBUFF_SZ;
	}

	/* Set the Wrap bit on the last descriptor */
3468
	desc->addr |= MACB_BIT(RX_WRAP);
3469 3470

	/* Reset buffer index */
3471
	q->rx_tail = 0;
3472 3473

	/* Program address of descriptor list in Rx Buffer Queue register */
3474
	macb_writel(lp, RBQP, q->rx_ring_dma);
3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509

	/* Enable Receive and Transmit */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));

	return 0;
}

/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	u32 ctl;
	int ret;

	/* Clear internal statistics */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));

	macb_set_hwaddr(lp);

	ret = at91ether_start(dev);
	if (ret)
		return ret;

	/* Enable MAC interrupts */
	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
			     MACB_BIT(RXUBR)	|
			     MACB_BIT(ISR_TUND)	|
			     MACB_BIT(ISR_RLE)	|
			     MACB_BIT(TCOMP)	|
			     MACB_BIT(ISR_ROVR)	|
			     MACB_BIT(HRESP));

	/* schedule a link state check */
3510
	phy_start(dev->phydev);
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520

	netif_start_queue(dev);

	return 0;
}

/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
3521
	struct macb_queue *q = &lp->queues[0];
3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
	u32 ctl;

	/* Disable Receiver and Transmitter */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));

	/* Disable MAC interrupts */
	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
			     MACB_BIT(RXUBR)	|
			     MACB_BIT(ISR_TUND)	|
			     MACB_BIT(ISR_RLE)	|
			     MACB_BIT(TCOMP)	|
			     MACB_BIT(ISR_ROVR) |
			     MACB_BIT(HRESP));

	netif_stop_queue(dev);

	dma_free_coherent(&lp->pdev->dev,
			  AT91ETHER_MAX_RX_DESCR *
3541
			  macb_dma_desc_get_size(lp),
3542 3543
			  q->rx_ring, q->rx_ring_dma);
	q->rx_ring = NULL;
3544 3545 3546

	dma_free_coherent(&lp->pdev->dev,
			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3547 3548
			  q->rx_buffers, q->rx_buffers_dma);
	q->rx_buffers = NULL;
3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565

	return 0;
}

/* Transmit packet */
static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);

	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
		netif_stop_queue(dev);

		/* Store packet information (to free when Tx completed) */
		lp->skb = skb;
		lp->skb_length = skb->len;
		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
							DMA_TO_DEVICE);
3566 3567 3568 3569 3570 3571
		if (dma_mapping_error(NULL, lp->skb_physaddr)) {
			dev_kfree_skb_any(skb);
			dev->stats.tx_dropped++;
			netdev_err(dev, "%s: DMA mapping error\n", __func__);
			return NETDEV_TX_OK;
		}
3572 3573 3574 3575 3576

		/* Set address of the data in the Transmit Address register */
		macb_writel(lp, TAR, lp->skb_physaddr);
		/* Set length of the packet in the Transmit Control register */
		macb_writel(lp, TCR, skb->len);
3577

3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591
	} else {
		netdev_err(dev, "%s called, but device is busy!\n", __func__);
		return NETDEV_TX_BUSY;
	}

	return NETDEV_TX_OK;
}

/* Extract received frame from buffer descriptors and sent to upper layers.
 * (Called from interrupt context)
 */
static void at91ether_rx(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
3592
	struct macb_queue *q = &lp->queues[0];
3593
	struct macb_dma_desc *desc;
3594 3595 3596 3597
	unsigned char *p_recv;
	struct sk_buff *skb;
	unsigned int pktlen;

3598
	desc = macb_rx_desc(q, q->rx_tail);
3599
	while (desc->addr & MACB_BIT(RX_USED)) {
3600
		p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3601
		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3602 3603 3604
		skb = netdev_alloc_skb(dev, pktlen + 2);
		if (skb) {
			skb_reserve(skb, 2);
3605
			skb_put_data(skb, p_recv, pktlen);
3606 3607

			skb->protocol = eth_type_trans(skb, dev);
3608 3609
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pktlen;
3610 3611
			netif_rx(skb);
		} else {
3612
			dev->stats.rx_dropped++;
3613 3614
		}

3615
		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3616
			dev->stats.multicast++;
3617 3618

		/* reset ownership bit */
3619
		desc->addr &= ~MACB_BIT(RX_USED);
3620 3621

		/* wrap after last buffer */
3622 3623
		if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
			q->rx_tail = 0;
3624
		else
3625
			q->rx_tail++;
3626

3627
		desc = macb_rx_desc(q, q->rx_tail);
3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
	}
}

/* MAC interrupt handler */
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct macb *lp = netdev_priv(dev);
	u32 intstatus, ctl;

	/* MAC Interrupt Status register indicates what interrupts are pending.
	 * It is automatically cleared once read.
	 */
	intstatus = macb_readl(lp, ISR);

	/* Receive complete */
	if (intstatus & MACB_BIT(RCOMP))
		at91ether_rx(dev);

	/* Transmit complete */
	if (intstatus & MACB_BIT(TCOMP)) {
		/* The TCOM bit is set even if the transmission failed */
		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3651
			dev->stats.tx_errors++;
3652 3653 3654 3655 3656 3657

		if (lp->skb) {
			dev_kfree_skb_irq(lp->skb);
			lp->skb = NULL;
			dma_unmap_single(NULL, lp->skb_physaddr,
					 lp->skb_length, DMA_TO_DEVICE);
3658 3659
			dev->stats.tx_packets++;
			dev->stats.tx_bytes += lp->skb_length;
3660 3661 3662 3663 3664 3665 3666 3667
		}
		netif_wake_queue(dev);
	}

	/* Work-around for EMAC Errata section 41.3.1 */
	if (intstatus & MACB_BIT(RXUBR)) {
		ctl = macb_readl(lp, NCR);
		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3668
		wmb();
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702
		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
	}

	if (intstatus & MACB_BIT(ISR_ROVR))
		netdev_err(dev, "ROVR error\n");

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void at91ether_poll_controller(struct net_device *dev)
{
	unsigned long flags;

	local_irq_save(flags);
	at91ether_interrupt(dev->irq, dev);
	local_irq_restore(flags);
}
#endif

static const struct net_device_ops at91ether_netdev_ops = {
	.ndo_open		= at91ether_open,
	.ndo_stop		= at91ether_close,
	.ndo_start_xmit		= at91ether_start_xmit,
	.ndo_get_stats		= macb_get_stats,
	.ndo_set_rx_mode	= macb_set_rx_mode,
	.ndo_set_mac_address	= eth_mac_addr,
	.ndo_do_ioctl		= macb_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= at91ether_poll_controller,
#endif
};

3703
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3704 3705
			      struct clk **hclk, struct clk **tx_clk,
			      struct clk **rx_clk)
3706 3707 3708
{
	int err;

3709 3710
	*hclk = NULL;
	*tx_clk = NULL;
3711
	*rx_clk = NULL;
3712 3713 3714 3715

	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
	if (IS_ERR(*pclk))
		return PTR_ERR(*pclk);
3716

3717
	err = clk_prepare_enable(*pclk);
3718 3719 3720 3721 3722
	if (err) {
		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
		return err;
	}

3723 3724 3725 3726 3727 3728 3729 3730 3731 3732
	return 0;
}

static int at91ether_init(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(dev);
	int err;
	u32 reg;

3733 3734 3735 3736 3737 3738
	dev->netdev_ops = &at91ether_netdev_ops;
	dev->ethtool_ops = &macb_ethtool_ops;

	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
			       0, dev->name, dev);
	if (err)
3739
		return err;
3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751

	macb_writel(bp, NCR, 0);

	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
		reg |= MACB_BIT(RM9200_RMII);

	macb_writel(bp, NCFGR, reg);

	return 0;
}

3752
static const struct macb_config at91sam9260_config = {
3753
	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3754
	.clk_init = macb_clk_init,
3755 3756 3757
	.init = macb_init,
};

3758
static const struct macb_config pc302gem_config = {
3759 3760
	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
	.dma_burst_length = 16,
3761
	.clk_init = macb_clk_init,
3762 3763 3764
	.init = macb_init,
};

3765
static const struct macb_config sama5d2_config = {
3766
	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3767 3768 3769 3770 3771
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
};

3772
static const struct macb_config sama5d3_config = {
3773
	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3774
	      | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
3775
	.dma_burst_length = 16,
3776
	.clk_init = macb_clk_init,
3777
	.init = macb_init,
3778
	.jumbo_max_len = 10240,
3779 3780
};

3781
static const struct macb_config sama5d4_config = {
3782
	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3783
	.dma_burst_length = 4,
3784
	.clk_init = macb_clk_init,
3785 3786 3787
	.init = macb_init,
};

3788
static const struct macb_config emac_config = {
3789
	.clk_init = at91ether_clk_init,
3790 3791 3792
	.init = at91ether_init,
};

3793 3794 3795 3796 3797
static const struct macb_config np4_config = {
	.caps = MACB_CAPS_USRIO_DISABLED,
	.clk_init = macb_clk_init,
	.init = macb_init,
};
3798

3799
static const struct macb_config zynqmp_config = {
3800 3801 3802
	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
			MACB_CAPS_JUMBO |
			MACB_CAPS_GEM_HAS_PTP,
3803 3804 3805
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
3806
	.jumbo_max_len = 10240,
3807 3808
};

3809
static const struct macb_config zynq_config = {
3810
	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3811 3812 3813 3814 3815
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
};

3816 3817 3818 3819
static const struct of_device_id macb_dt_ids[] = {
	{ .compatible = "cdns,at32ap7000-macb" },
	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
	{ .compatible = "cdns,macb" },
3820
	{ .compatible = "cdns,np4-macb", .data = &np4_config },
3821 3822
	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
	{ .compatible = "cdns,gem", .data = &pc302gem_config },
3823
	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3824 3825 3826 3827
	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
	{ .compatible = "cdns,emac", .data = &emac_config },
3828
	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3829
	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
3830 3831 3832 3833 3834
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */

3835
static const struct macb_config default_gem_config = {
3836 3837 3838
	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
			MACB_CAPS_JUMBO |
			MACB_CAPS_GEM_HAS_PTP,
3839 3840 3841 3842 3843 3844
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
	.jumbo_max_len = 10240,
};

3845 3846
static int macb_probe(struct platform_device *pdev)
{
3847
	const struct macb_config *macb_config = &default_gem_config;
3848
	int (*clk_init)(struct platform_device *, struct clk **,
3849
			struct clk **, struct clk **,  struct clk **)
3850 3851
					      = macb_config->clk_init;
	int (*init)(struct platform_device *) = macb_config->init;
3852
	struct device_node *np = pdev->dev.of_node;
3853
	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3854 3855
	unsigned int queue_mask, num_queues;
	struct macb_platform_data *pdata;
3856
	bool native_io;
3857 3858 3859 3860 3861 3862 3863 3864
	struct phy_device *phydev;
	struct net_device *dev;
	struct resource *regs;
	void __iomem *mem;
	const char *mac;
	struct macb *bp;
	int err;

3865 3866 3867 3868 3869
	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mem = devm_ioremap_resource(&pdev->dev, regs);
	if (IS_ERR(mem))
		return PTR_ERR(mem);

3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880
	if (np) {
		const struct of_device_id *match;

		match = of_match_node(macb_dt_ids, np);
		if (match && match->data) {
			macb_config = match->data;
			clk_init = macb_config->clk_init;
			init = macb_config->init;
		}
	}

3881
	err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3882 3883 3884
	if (err)
		return err;

3885
	native_io = hw_is_native_io(mem);
3886

3887
	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3888
	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3889 3890 3891 3892
	if (!dev) {
		err = -ENOMEM;
		goto err_disable_clocks;
	}
3893 3894 3895 3896 3897 3898 3899 3900 3901

	dev->base_addr = regs->start;

	SET_NETDEV_DEV(dev, &pdev->dev);

	bp = netdev_priv(dev);
	bp->pdev = pdev;
	bp->dev = dev;
	bp->regs = mem;
3902 3903
	bp->native_io = native_io;
	if (native_io) {
3904 3905
		bp->macb_reg_readl = hw_readl_native;
		bp->macb_reg_writel = hw_writel_native;
3906
	} else {
3907 3908
		bp->macb_reg_readl = hw_readl;
		bp->macb_reg_writel = hw_writel;
3909
	}
3910
	bp->num_queues = num_queues;
3911
	bp->queue_mask = queue_mask;
3912 3913 3914 3915 3916
	if (macb_config)
		bp->dma_burst_length = macb_config->dma_burst_length;
	bp->pclk = pclk;
	bp->hclk = hclk;
	bp->tx_clk = tx_clk;
3917
	bp->rx_clk = rx_clk;
3918
	if (macb_config)
3919 3920
		bp->jumbo_max_len = macb_config->jumbo_max_len;

3921
	bp->wol = 0;
3922
	if (of_get_property(np, "magic-packet", NULL))
3923 3924 3925
		bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);

3926 3927
	spin_lock_init(&bp->lock);

3928
	/* setup capabilities */
3929 3930
	macb_configure_caps(bp, macb_config);

3931 3932 3933 3934 3935 3936
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
		bp->hw_dma_cap |= HW_DMA_CAP_64B;
	}
#endif
3937 3938 3939
	platform_set_drvdata(pdev, dev);

	dev->irq = platform_get_irq(pdev, 0);
3940 3941
	if (dev->irq < 0) {
		err = dev->irq;
3942
		goto err_out_free_netdev;
3943
	}
3944

3945 3946 3947 3948 3949 3950 3951
	/* MTU range: 68 - 1500 or 10240 */
	dev->min_mtu = GEM_MTU_MIN_SIZE;
	if (bp->caps & MACB_CAPS_JUMBO)
		dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
	else
		dev->max_mtu = ETH_DATA_LEN;

3952
	mac = of_get_mac_address(np);
3953
	if (mac)
3954
		ether_addr_copy(bp->dev->dev_addr, mac);
3955
	else
3956 3957
		macb_get_hwaddr(bp);

3958
	err = of_get_phy_mode(np);
3959
	if (err < 0) {
J
Jingoo Han 已提交
3960
		pdata = dev_get_platdata(&pdev->dev);
3961 3962 3963 3964 3965 3966 3967
		if (pdata && pdata->is_rmii)
			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
		else
			bp->phy_interface = PHY_INTERFACE_MODE_MII;
	} else {
		bp->phy_interface = err;
	}
F
frederic RODO 已提交
3968

3969 3970 3971 3972
	/* IP specific init */
	err = init(pdev);
	if (err)
		goto err_out_free_netdev;
3973

3974 3975 3976 3977
	err = macb_mii_init(bp);
	if (err)
		goto err_out_free_netdev;

3978
	phydev = dev->phydev;
3979 3980 3981

	netif_carrier_off(dev);

3982 3983 3984
	err = register_netdev(dev);
	if (err) {
		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3985
		goto err_out_unregister_mdio;
3986 3987
	}

H
Harini Katakam 已提交
3988 3989 3990
	tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
		     (unsigned long)bp);

3991
	phy_attached_info(phydev);
3992

3993 3994 3995
	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
		    dev->base_addr, dev->irq, dev->dev_addr);
3996 3997 3998

	return 0;

3999
err_out_unregister_mdio:
4000
	phy_disconnect(dev->phydev);
4001
	mdiobus_unregister(bp->mii_bus);
4002
	of_node_put(bp->phy_node);
4003 4004
	if (np && of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
4005 4006
	mdiobus_free(bp->mii_bus);

4007
err_out_free_netdev:
4008
	free_netdev(dev);
4009

4010 4011 4012 4013
err_disable_clocks:
	clk_disable_unprepare(tx_clk);
	clk_disable_unprepare(hclk);
	clk_disable_unprepare(pclk);
4014
	clk_disable_unprepare(rx_clk);
4015

4016 4017 4018
	return err;
}

4019
static int macb_remove(struct platform_device *pdev)
4020 4021 4022
{
	struct net_device *dev;
	struct macb *bp;
4023
	struct device_node *np = pdev->dev.of_node;
4024 4025 4026 4027 4028

	dev = platform_get_drvdata(pdev);

	if (dev) {
		bp = netdev_priv(dev);
4029 4030
		if (dev->phydev)
			phy_disconnect(dev->phydev);
4031
		mdiobus_unregister(bp->mii_bus);
4032 4033
		if (np && of_phy_is_fixed_link(np))
			of_phy_deregister_fixed_link(np);
4034
		dev->phydev = NULL;
4035
		mdiobus_free(bp->mii_bus);
4036

4037
		unregister_netdev(dev);
4038
		clk_disable_unprepare(bp->tx_clk);
4039 4040
		clk_disable_unprepare(bp->hclk);
		clk_disable_unprepare(bp->pclk);
4041
		clk_disable_unprepare(bp->rx_clk);
4042
		of_node_put(bp->phy_node);
4043
		free_netdev(dev);
4044 4045 4046 4047 4048
	}

	return 0;
}

4049
static int __maybe_unused macb_suspend(struct device *dev)
4050
{
S
Soren Brinkmann 已提交
4051
	struct platform_device *pdev = to_platform_device(dev);
4052 4053 4054
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(netdev);

4055
	netif_carrier_off(netdev);
4056 4057
	netif_device_detach(netdev);

4058 4059 4060 4061 4062 4063 4064 4065
	if (bp->wol & MACB_WOL_ENABLED) {
		macb_writel(bp, IER, MACB_BIT(WOL));
		macb_writel(bp, WOL, MACB_BIT(MAG));
		enable_irq_wake(bp->queues[0].irq);
	} else {
		clk_disable_unprepare(bp->tx_clk);
		clk_disable_unprepare(bp->hclk);
		clk_disable_unprepare(bp->pclk);
4066
		clk_disable_unprepare(bp->rx_clk);
4067
	}
4068 4069 4070 4071

	return 0;
}

4072
static int __maybe_unused macb_resume(struct device *dev)
4073
{
S
Soren Brinkmann 已提交
4074
	struct platform_device *pdev = to_platform_device(dev);
4075 4076 4077
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(netdev);

4078 4079 4080 4081 4082 4083 4084 4085
	if (bp->wol & MACB_WOL_ENABLED) {
		macb_writel(bp, IDR, MACB_BIT(WOL));
		macb_writel(bp, WOL, 0);
		disable_irq_wake(bp->queues[0].irq);
	} else {
		clk_prepare_enable(bp->pclk);
		clk_prepare_enable(bp->hclk);
		clk_prepare_enable(bp->tx_clk);
4086
		clk_prepare_enable(bp->rx_clk);
4087
	}
4088 4089 4090 4091 4092 4093

	netif_device_attach(netdev);

	return 0;
}

S
Soren Brinkmann 已提交
4094 4095
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);

4096
static struct platform_driver macb_driver = {
4097 4098
	.probe		= macb_probe,
	.remove		= macb_remove,
4099 4100
	.driver		= {
		.name		= "macb",
4101
		.of_match_table	= of_match_ptr(macb_dt_ids),
S
Soren Brinkmann 已提交
4102
		.pm	= &macb_pm_ops,
4103 4104 4105
	},
};

4106
module_platform_driver(macb_driver);
4107 4108

MODULE_LICENSE("GPL");
J
Jamie Iles 已提交
4109
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
J
Jean Delvare 已提交
4110
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4111
MODULE_ALIAS("platform:macb");