macb_main.c 91.3 KB
Newer Older
1
/*
J
Jamie Iles 已提交
2
 * Cadence MACB/GEM Ethernet Controller driver
3 4 5 6 7 8 9 10
 *
 * Copyright (C) 2004-2006 Atmel Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

11
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 13 14 15 16
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
17
#include <linux/circ_buf.h>
18 19
#include <linux/slab.h>
#include <linux/init.h>
S
Soren Brinkmann 已提交
20
#include <linux/io.h>
21
#include <linux/gpio.h>
22
#include <linux/gpio/consumer.h>
23
#include <linux/interrupt.h>
24 25 26
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
27
#include <linux/platform_data/macb.h>
28
#include <linux/platform_device.h>
F
frederic RODO 已提交
29
#include <linux/phy.h>
30
#include <linux/of.h>
31
#include <linux/of_device.h>
32
#include <linux/of_gpio.h>
33
#include <linux/of_mdio.h>
34
#include <linux/of_net.h>
R
Rafal Ozieblo 已提交
35 36 37
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/tcp.h>
38 39
#include "macb.h"

40 41
#define MACB_RX_BUFFER_SIZE	128
#define RX_BUFFER_MULTIPLE	64  /* bytes */
42

43
#define DEFAULT_RX_RING_SIZE	512 /* must be power of 2 */
44 45
#define MIN_RX_RING_SIZE	64
#define MAX_RX_RING_SIZE	8192
46
#define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
47
				 * (bp)->rx_ring_size)
48

49
#define DEFAULT_TX_RING_SIZE	512 /* must be power of 2 */
50 51
#define MIN_TX_RING_SIZE	64
#define MAX_TX_RING_SIZE	4096
52
#define TX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\
53
				 * (bp)->tx_ring_size)
54

55
/* level of occupied TX descriptors under which we wake up TX process */
56
#define MACB_TX_WAKEUP_THRESH(bp)	(3 * (bp)->tx_ring_size / 4)
57 58 59

#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
				 | MACB_BIT(ISR_ROVR))
N
Nicolas Ferre 已提交
60 61 62 63 64
#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
					| MACB_BIT(ISR_RLE)		\
					| MACB_BIT(TXERR))
#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))

R
Rafal Ozieblo 已提交
65 66 67 68
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN	8
#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69

70
#define GEM_MTU_MIN_SIZE	ETH_MIN_MTU
71
#define MACB_NETIF_LSO		NETIF_F_TSO
72

73 74 75
#define MACB_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
#define MACB_WOL_ENABLED		(0x1 << 1)

76
/* Graceful stop timeouts in us. We should allow up to
N
Nicolas Ferre 已提交
77 78 79
 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 */
#define MACB_HALT_TIMEOUT	1230
80

81
/* DMA buffer descriptor might be different size
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
 * depends on hardware configuration:
 *
 * 1. dma address width 32 bits:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *
 * 2. dma address width 64 bits:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: upper 32 bit address of Data Buffer
 *    word 4: unused
 *
 * 3. dma address width 32 bits with hardware timestamping:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: timestamp word 1
 *    word 4: timestamp word 2
 *
 * 4. dma address width 64 bits with hardware timestamping:
 *    word 1: 32 bit address of Data Buffer
 *    word 2: control
 *    word 3: upper 32 bit address of Data Buffer
 *    word 4: unused
 *    word 5: timestamp word 1
 *    word 6: timestamp word 2
107 108 109
 */
static unsigned int macb_dma_desc_get_size(struct macb *bp)
{
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#ifdef MACB_EXT_DESC
	unsigned int desc_size;

	switch (bp->hw_dma_cap) {
	case HW_DMA_CAP_64B:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_64);
		break;
	case HW_DMA_CAP_PTP:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_ptp);
		break;
	case HW_DMA_CAP_64B_PTP:
		desc_size = sizeof(struct macb_dma_desc)
			+ sizeof(struct macb_dma_desc_64)
			+ sizeof(struct macb_dma_desc_ptp);
		break;
	default:
		desc_size = sizeof(struct macb_dma_desc);
	}
	return desc_size;
131 132 133 134
#endif
	return sizeof(struct macb_dma_desc);
}

135
static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
136
{
137 138 139 140 141 142 143 144 145 146 147 148
#ifdef MACB_EXT_DESC
	switch (bp->hw_dma_cap) {
	case HW_DMA_CAP_64B:
	case HW_DMA_CAP_PTP:
		desc_idx <<= 1;
		break;
	case HW_DMA_CAP_64B_PTP:
		desc_idx *= 3;
		break;
	default:
		break;
	}
149
#endif
150
	return desc_idx;
151 152 153 154 155
}

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
{
156 157 158
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
		return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
	return NULL;
159 160 161
}
#endif

162
/* Ring buffer accessors */
163
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
164
{
165
	return index & (bp->tx_ring_size - 1);
166 167
}

168 169
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
					  unsigned int index)
170
{
171 172 173
	index = macb_tx_ring_wrap(queue->bp, index);
	index = macb_adj_dma_desc_idx(queue->bp, index);
	return &queue->tx_ring[index];
174 175
}

176 177
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
				       unsigned int index)
178
{
179
	return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
180 181
}

182
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
183 184 185
{
	dma_addr_t offset;

186
	offset = macb_tx_ring_wrap(queue->bp, index) *
187
			macb_dma_desc_get_size(queue->bp);
188

189
	return queue->tx_ring_dma + offset;
190 191
}

192
static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
193
{
194
	return index & (bp->rx_ring_size - 1);
195 196 197 198
}

static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{
199 200 201
	index = macb_rx_ring_wrap(bp, index);
	index = macb_adj_dma_desc_idx(bp, index);
	return &bp->rx_ring[index];
202 203 204 205
}

static void *macb_rx_buffer(struct macb *bp, unsigned int index)
{
206 207
	return bp->rx_buffers + bp->rx_buffer_size *
	       macb_rx_ring_wrap(bp, index);
208 209
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
/* I/O accessors */
static u32 hw_readl_native(struct macb *bp, int offset)
{
	return __raw_readl(bp->regs + offset);
}

static void hw_writel_native(struct macb *bp, int offset, u32 value)
{
	__raw_writel(value, bp->regs + offset);
}

static u32 hw_readl(struct macb *bp, int offset)
{
	return readl_relaxed(bp->regs + offset);
}

static void hw_writel(struct macb *bp, int offset, u32 value)
{
	writel_relaxed(value, bp->regs + offset);
}

231
/* Find the CPU endianness by using the loopback bit of NCR register. When the
M
Moritz Fischer 已提交
232
 * CPU is in big endian we need to program swapped mode for management
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
 * descriptor access.
 */
static bool hw_is_native_io(void __iomem *addr)
{
	u32 value = MACB_BIT(LLB);

	__raw_writel(value, addr + MACB_NCR);
	value = __raw_readl(addr + MACB_NCR);

	/* Write 0 back to disable everything */
	__raw_writel(0, addr + MACB_NCR);

	return value == MACB_BIT(LLB);
}

static bool hw_is_gem(void __iomem *addr, bool native_io)
{
	u32 id;

	if (native_io)
		id = __raw_readl(addr + MACB_MID);
	else
		id = readl_relaxed(addr + MACB_MID);

	return MACB_BFEXT(IDNUM, id) >= 0x2;
}

260
static void macb_set_hwaddr(struct macb *bp)
261 262 263 264 265
{
	u32 bottom;
	u16 top;

	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
J
Jamie Iles 已提交
266
	macb_or_gem_writel(bp, SA1B, bottom);
267
	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
J
Jamie Iles 已提交
268
	macb_or_gem_writel(bp, SA1T, top);
269 270 271 272 273 274 275 276

	/* Clear unused address register sets */
	macb_or_gem_writel(bp, SA2B, 0);
	macb_or_gem_writel(bp, SA2T, 0);
	macb_or_gem_writel(bp, SA3B, 0);
	macb_or_gem_writel(bp, SA3T, 0);
	macb_or_gem_writel(bp, SA4B, 0);
	macb_or_gem_writel(bp, SA4T, 0);
277 278
}

279
static void macb_get_hwaddr(struct macb *bp)
280
{
281
	struct macb_platform_data *pdata;
282 283 284
	u32 bottom;
	u16 top;
	u8 addr[6];
285 286
	int i;

J
Jingoo Han 已提交
287
	pdata = dev_get_platdata(&bp->pdev->dev);
288

289
	/* Check all 4 address register for valid address */
290 291 292 293
	for (i = 0; i < 4; i++) {
		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
		top = macb_or_gem_readl(bp, SA1T + i * 8);

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
		if (pdata && pdata->rev_eth_addr) {
			addr[5] = bottom & 0xff;
			addr[4] = (bottom >> 8) & 0xff;
			addr[3] = (bottom >> 16) & 0xff;
			addr[2] = (bottom >> 24) & 0xff;
			addr[1] = top & 0xff;
			addr[0] = (top & 0xff00) >> 8;
		} else {
			addr[0] = bottom & 0xff;
			addr[1] = (bottom >> 8) & 0xff;
			addr[2] = (bottom >> 16) & 0xff;
			addr[3] = (bottom >> 24) & 0xff;
			addr[4] = top & 0xff;
			addr[5] = (top >> 8) & 0xff;
		}
309 310 311 312 313

		if (is_valid_ether_addr(addr)) {
			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
			return;
		}
314
	}
315

316
	dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
317
	eth_hw_addr_random(bp->dev);
318 319
}

F
frederic RODO 已提交
320
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
321
{
F
frederic RODO 已提交
322
	struct macb *bp = bus->priv;
323 324 325 326
	int value;

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_READ)
F
frederic RODO 已提交
327 328
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
329 330
			      | MACB_BF(CODE, MACB_MAN_CODE)));

F
frederic RODO 已提交
331 332 333
	/* wait for end of transfer */
	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
		cpu_relax();
334 335 336 337 338 339

	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));

	return value;
}

F
frederic RODO 已提交
340 341
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
342
{
F
frederic RODO 已提交
343
	struct macb *bp = bus->priv;
344 345 346

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_WRITE)
F
frederic RODO 已提交
347 348
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
349
			      | MACB_BF(CODE, MACB_MAN_CODE)
F
frederic RODO 已提交
350
			      | MACB_BF(DATA, value)));
351

F
frederic RODO 已提交
352 353 354 355 356 357
	/* wait for end of transfer */
	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
		cpu_relax();

	return 0;
}
358

359 360 361 362 363 364 365 366 367 368
/**
 * macb_set_tx_clk() - Set a clock to a new frequency
 * @clk		Pointer to the clock to change
 * @rate	New frequency in Hz
 * @dev		Pointer to the struct net_device
 */
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
	long ferr, rate, rate_rounded;

369 370 371
	if (!clk)
		return;

372 373 374 375 376 377 378 379 380 381 382
	switch (speed) {
	case SPEED_10:
		rate = 2500000;
		break;
	case SPEED_100:
		rate = 25000000;
		break;
	case SPEED_1000:
		rate = 125000000;
		break;
	default:
S
Soren Brinkmann 已提交
383
		return;
384 385 386 387 388 389 390 391 392 393 394 395 396
	}

	rate_rounded = clk_round_rate(clk, rate);
	if (rate_rounded < 0)
		return;

	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
	 * is not satisfied.
	 */
	ferr = abs(rate_rounded - rate);
	ferr = DIV_ROUND_UP(ferr, rate / 100000);
	if (ferr > 5)
		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
397
			    rate);
398 399 400 401 402

	if (clk_set_rate(clk, rate_rounded))
		netdev_err(dev, "adjusting tx_clk failed.\n");
}

F
frederic RODO 已提交
403
static void macb_handle_link_change(struct net_device *dev)
404
{
F
frederic RODO 已提交
405
	struct macb *bp = netdev_priv(dev);
406
	struct phy_device *phydev = dev->phydev;
F
frederic RODO 已提交
407 408
	unsigned long flags;
	int status_change = 0;
409

F
frederic RODO 已提交
410 411 412 413 414 415 416 417 418
	spin_lock_irqsave(&bp->lock, flags);

	if (phydev->link) {
		if ((bp->speed != phydev->speed) ||
		    (bp->duplex != phydev->duplex)) {
			u32 reg;

			reg = macb_readl(bp, NCFGR);
			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
419 420
			if (macb_is_gem(bp))
				reg &= ~GEM_BIT(GBE);
F
frederic RODO 已提交
421 422 423

			if (phydev->duplex)
				reg |= MACB_BIT(FD);
A
Atsushi Nemoto 已提交
424
			if (phydev->speed == SPEED_100)
F
frederic RODO 已提交
425
				reg |= MACB_BIT(SPD);
426 427
			if (phydev->speed == SPEED_1000 &&
			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
428
				reg |= GEM_BIT(GBE);
F
frederic RODO 已提交
429

430
			macb_or_gem_writel(bp, NCFGR, reg);
F
frederic RODO 已提交
431 432 433 434 435

			bp->speed = phydev->speed;
			bp->duplex = phydev->duplex;
			status_change = 1;
		}
436 437
	}

F
frederic RODO 已提交
438
	if (phydev->link != bp->link) {
439
		if (!phydev->link) {
F
frederic RODO 已提交
440 441 442 443
			bp->speed = 0;
			bp->duplex = -1;
		}
		bp->link = phydev->link;
444

F
frederic RODO 已提交
445 446
		status_change = 1;
	}
447

F
frederic RODO 已提交
448 449 450
	spin_unlock_irqrestore(&bp->lock, flags);

	if (status_change) {
451
		if (phydev->link) {
452 453 454 455 456
			/* Update the TX clock rate if and only if the link is
			 * up and there has been a link change.
			 */
			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);

457
			netif_carrier_on(dev);
458 459 460 461
			netdev_info(dev, "link up (%d/%s)\n",
				    phydev->speed,
				    phydev->duplex == DUPLEX_FULL ?
				    "Full" : "Half");
462 463
		} else {
			netif_carrier_off(dev);
464
			netdev_info(dev, "link down\n");
465
		}
F
frederic RODO 已提交
466
	}
467 468
}

F
frederic RODO 已提交
469 470
/* based on au1000_eth. c*/
static int macb_mii_probe(struct net_device *dev)
471
{
F
frederic RODO 已提交
472
	struct macb *bp = netdev_priv(dev);
473
	struct macb_platform_data *pdata;
474
	struct phy_device *phydev;
475
	int phy_irq;
476
	int ret;
F
frederic RODO 已提交
477

478 479 480 481 482 483 484 485 486 487 488 489
	if (bp->phy_node) {
		phydev = of_phy_connect(dev, bp->phy_node,
					&macb_handle_link_change, 0,
					bp->phy_interface);
		if (!phydev)
			return -ENODEV;
	} else {
		phydev = phy_find_first(bp->mii_bus);
		if (!phydev) {
			netdev_err(dev, "no PHY found\n");
			return -ENXIO;
		}
F
frederic RODO 已提交
490

491 492 493 494 495 496 497 498 499 500 501
		pdata = dev_get_platdata(&bp->pdev->dev);
		if (pdata) {
			if (gpio_is_valid(pdata->phy_irq_pin)) {
				ret = devm_gpio_request(&bp->pdev->dev,
							pdata->phy_irq_pin, "phy int");
				if (!ret) {
					phy_irq = gpio_to_irq(pdata->phy_irq_pin);
					phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
				}
			} else {
				phydev->irq = PHY_POLL;
502
			}
503
		}
F
frederic RODO 已提交
504

505 506 507 508 509 510 511
		/* attach the mac to the phy */
		ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
					 bp->phy_interface);
		if (ret) {
			netdev_err(dev, "Could not attach to PHY\n");
			return ret;
		}
F
frederic RODO 已提交
512 513 514
	}

	/* mask with MAC supported features */
515
	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
516 517 518
		phydev->supported &= PHY_GBIT_FEATURES;
	else
		phydev->supported &= PHY_BASIC_FEATURES;
F
frederic RODO 已提交
519

520 521 522
	if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
		phydev->supported &= ~SUPPORTED_1000baseT_Half;

F
frederic RODO 已提交
523 524 525 526 527 528 529
	phydev->advertising = phydev->supported;

	bp->link = 0;
	bp->speed = 0;
	bp->duplex = -1;

	return 0;
530 531
}

532
static int macb_mii_init(struct macb *bp)
533
{
534
	struct macb_platform_data *pdata;
535
	struct device_node *np;
F
frederic RODO 已提交
536
	int err = -ENXIO, i;
537

538
	/* Enable management port */
F
frederic RODO 已提交
539
	macb_writel(bp, NCR, MACB_BIT(MPE));
540

541
	bp->mii_bus = mdiobus_alloc();
542
	if (!bp->mii_bus) {
543 544 545 546 547 548 549
		err = -ENOMEM;
		goto err_out;
	}

	bp->mii_bus->name = "MACB_mii_bus";
	bp->mii_bus->read = &macb_mdio_read;
	bp->mii_bus->write = &macb_mdio_write;
550
	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
551
		 bp->pdev->name, bp->pdev->id);
552
	bp->mii_bus->priv = bp;
553
	bp->mii_bus->parent = &bp->pdev->dev;
J
Jingoo Han 已提交
554
	pdata = dev_get_platdata(&bp->pdev->dev);
555

556
	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
557

558 559
	np = bp->pdev->dev.of_node;
	if (np) {
560 561 562 563 564 565 566
		if (of_phy_is_fixed_link(np)) {
			if (of_phy_register_fixed_link(np) < 0) {
				dev_err(&bp->pdev->dev,
					"broken fixed-link specification\n");
				goto err_out_unregister_bus;
			}
			bp->phy_node = of_node_get(np);
567

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
			err = mdiobus_register(bp->mii_bus);
		} else {
			/* try dt phy registration */
			err = of_mdiobus_register(bp->mii_bus, np);

			/* fallback to standard phy registration if no phy were
			 * found during dt phy registration
			 */
			if (!err && !phy_find_first(bp->mii_bus)) {
				for (i = 0; i < PHY_MAX_ADDR; i++) {
					struct phy_device *phydev;

					phydev = mdiobus_scan(bp->mii_bus, i);
					if (IS_ERR(phydev) &&
					    PTR_ERR(phydev) != -ENODEV) {
						err = PTR_ERR(phydev);
						break;
					}
586 587
				}

588 589 590
				if (err)
					goto err_out_unregister_bus;
			}
591 592
		}
	} else {
593 594 595
		for (i = 0; i < PHY_MAX_ADDR; i++)
			bp->mii_bus->irq[i] = PHY_POLL;

596 597 598 599 600 601 602
		if (pdata)
			bp->mii_bus->phy_mask = pdata->phy_mask;

		err = mdiobus_register(bp->mii_bus);
	}

	if (err)
603
		goto err_out_free_mdiobus;
604

605 606
	err = macb_mii_probe(bp->dev);
	if (err)
F
frederic RODO 已提交
607
		goto err_out_unregister_bus;
608

F
frederic RODO 已提交
609
	return 0;
610

F
frederic RODO 已提交
611
err_out_unregister_bus:
612 613 614
	mdiobus_unregister(bp->mii_bus);
err_out_free_mdiobus:
	mdiobus_free(bp->mii_bus);
F
frederic RODO 已提交
615 616
err_out:
	return err;
617 618 619 620
}

static void macb_update_stats(struct macb *bp)
{
621 622
	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
623
	int offset = MACB_PFR;
624 625 626

	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);

627
	for (; p < end; p++, offset += 4)
628
		*p += bp->macb_reg_readl(bp, offset);
629 630
}

N
Nicolas Ferre 已提交
631
static int macb_halt_tx(struct macb *bp)
632
{
N
Nicolas Ferre 已提交
633 634
	unsigned long	halt_time, timeout;
	u32		status;
635

N
Nicolas Ferre 已提交
636
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
637

N
Nicolas Ferre 已提交
638 639 640 641 642 643
	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
	do {
		halt_time = jiffies;
		status = macb_readl(bp, TSR);
		if (!(status & MACB_BIT(TGO)))
			return 0;
644

N
Nicolas Ferre 已提交
645 646
		usleep_range(10, 250);
	} while (time_before(halt_time, timeout));
647

N
Nicolas Ferre 已提交
648 649
	return -ETIMEDOUT;
}
650

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
{
	if (tx_skb->mapping) {
		if (tx_skb->mapped_as_page)
			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
				       tx_skb->size, DMA_TO_DEVICE);
		else
			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
					 tx_skb->size, DMA_TO_DEVICE);
		tx_skb->mapping = 0;
	}

	if (tx_skb->skb) {
		dev_kfree_skb_any(tx_skb->skb);
		tx_skb->skb = NULL;
	}
}

669
static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
670 671
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
672 673
	struct macb_dma_desc_64 *desc_64;

674
	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
675 676 677
		desc_64 = macb_64b_desc(bp, desc);
		desc_64->addrh = upper_32_bits(addr);
	}
678
#endif
679 680 681 682 683 684 685 686 687
	desc->addr = lower_32_bits(addr);
}

static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
{
	dma_addr_t addr = 0;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	struct macb_dma_desc_64 *desc_64;

688
	if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
689 690 691 692 693 694
		desc_64 = macb_64b_desc(bp, desc);
		addr = ((u64)(desc_64->addrh) << 32);
	}
#endif
	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
	return addr;
695 696
}

N
Nicolas Ferre 已提交
697 698
static void macb_tx_error_task(struct work_struct *work)
{
699 700 701
	struct macb_queue	*queue = container_of(work, struct macb_queue,
						      tx_error_task);
	struct macb		*bp = queue->bp;
N
Nicolas Ferre 已提交
702
	struct macb_tx_skb	*tx_skb;
703
	struct macb_dma_desc	*desc;
N
Nicolas Ferre 已提交
704 705
	struct sk_buff		*skb;
	unsigned int		tail;
706 707 708 709 710
	unsigned long		flags;

	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
		    (unsigned int)(queue - bp->queues),
		    queue->tx_tail, queue->tx_head);
711

712 713 714 715 716 717 718
	/* Prevent the queue IRQ handlers from running: each of them may call
	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
	 * As explained below, we have to halt the transmission before updating
	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
	 * network engine about the macb/gem being halted.
	 */
	spin_lock_irqsave(&bp->lock, flags);
719

N
Nicolas Ferre 已提交
720
	/* Make sure nobody is trying to queue up new packets */
721
	netif_tx_stop_all_queues(bp->dev);
722

723
	/* Stop transmission now
N
Nicolas Ferre 已提交
724
	 * (in case we have just queued new packets)
725
	 * macb/gem must be halted to write TBQP register
N
Nicolas Ferre 已提交
726 727 728 729
	 */
	if (macb_halt_tx(bp))
		/* Just complain for now, reinitializing TX path can be good */
		netdev_err(bp->dev, "BUG: halt tx timed out\n");
730

731
	/* Treat frames in TX queue including the ones that caused the error.
N
Nicolas Ferre 已提交
732 733
	 * Free transmit buffers in upper layer.
	 */
734 735
	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
		u32	ctrl;
736

737
		desc = macb_tx_desc(queue, tail);
N
Nicolas Ferre 已提交
738
		ctrl = desc->ctrl;
739
		tx_skb = macb_tx_skb(queue, tail);
N
Nicolas Ferre 已提交
740
		skb = tx_skb->skb;
741

N
Nicolas Ferre 已提交
742
		if (ctrl & MACB_BIT(TX_USED)) {
743 744 745 746
			/* skb is set for the last buffer of the frame */
			while (!skb) {
				macb_tx_unmap(bp, tx_skb);
				tail++;
747
				tx_skb = macb_tx_skb(queue, tail);
748 749 750 751 752 753 754 755
				skb = tx_skb->skb;
			}

			/* ctrl still refers to the first buffer descriptor
			 * since it's the only one written back by the hardware
			 */
			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
756 757
					    macb_tx_ring_wrap(bp, tail),
					    skb->data);
758 759
				bp->dev->stats.tx_packets++;
				bp->dev->stats.tx_bytes += skb->len;
760
			}
N
Nicolas Ferre 已提交
761
		} else {
762 763 764
			/* "Buffers exhausted mid-frame" errors may only happen
			 * if the driver is buggy, so complain loudly about
			 * those. Statistics are updated by hardware.
N
Nicolas Ferre 已提交
765 766 767 768
			 */
			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
				netdev_err(bp->dev,
					   "BUG: TX buffers exhausted mid-frame\n");
769

N
Nicolas Ferre 已提交
770 771 772
			desc->ctrl = ctrl | MACB_BIT(TX_USED);
		}

773
		macb_tx_unmap(bp, tx_skb);
774 775
	}

776 777
	/* Set end of TX queue */
	desc = macb_tx_desc(queue, 0);
778
	macb_set_addr(bp, desc, 0);
779 780
	desc->ctrl = MACB_BIT(TX_USED);

N
Nicolas Ferre 已提交
781 782 783 784
	/* Make descriptor updates visible to hardware */
	wmb();

	/* Reinitialize the TX desc queue */
785
	queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
786
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
787
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
788
		queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
789
#endif
N
Nicolas Ferre 已提交
790
	/* Make TX ring reflect state of hardware */
791 792
	queue->tx_head = 0;
	queue->tx_tail = 0;
N
Nicolas Ferre 已提交
793 794 795

	/* Housework before enabling TX IRQ */
	macb_writel(bp, TSR, macb_readl(bp, TSR));
796 797 798 799 800 801 802
	queue_writel(queue, IER, MACB_TX_INT_FLAGS);

	/* Now we are ready to start transmission again */
	netif_tx_start_all_queues(bp->dev);
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

	spin_unlock_irqrestore(&bp->lock, flags);
N
Nicolas Ferre 已提交
803 804
}

805
static void macb_tx_interrupt(struct macb_queue *queue)
N
Nicolas Ferre 已提交
806 807 808 809
{
	unsigned int tail;
	unsigned int head;
	u32 status;
810 811
	struct macb *bp = queue->bp;
	u16 queue_index = queue - bp->queues;
N
Nicolas Ferre 已提交
812 813 814 815

	status = macb_readl(bp, TSR);
	macb_writel(bp, TSR, status);

816
	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
817
		queue_writel(queue, ISR, MACB_BIT(TCOMP));
818

N
Nicolas Ferre 已提交
819
	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
820
		    (unsigned long)status);
821

822 823
	head = queue->tx_head;
	for (tail = queue->tx_tail; tail != head; tail++) {
824 825 826 827
		struct macb_tx_skb	*tx_skb;
		struct sk_buff		*skb;
		struct macb_dma_desc	*desc;
		u32			ctrl;
828

829
		desc = macb_tx_desc(queue, tail);
830

831
		/* Make hw descriptor updates visible to CPU */
832
		rmb();
833

834
		ctrl = desc->ctrl;
835

836 837 838
		/* TX_USED bit is only set by hardware on the very first buffer
		 * descriptor of the transmitted frame.
		 */
839
		if (!(ctrl & MACB_BIT(TX_USED)))
840 841
			break;

842 843
		/* Process all buffers of the current transmitted frame */
		for (;; tail++) {
844
			tx_skb = macb_tx_skb(queue, tail);
845 846 847 848
			skb = tx_skb->skb;

			/* First, update TX stats if needed */
			if (skb) {
849 850 851 852 853 854
				if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
					/* skb now belongs to timestamp buffer
					 * and will be removed later
					 */
					tx_skb->skb = NULL;
				}
855
				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
856 857
					    macb_tx_ring_wrap(bp, tail),
					    skb->data);
858 859
				bp->dev->stats.tx_packets++;
				bp->dev->stats.tx_bytes += skb->len;
860
			}
861

862 863 864 865 866 867 868 869 870 871
			/* Now we can safely release resources */
			macb_tx_unmap(bp, tx_skb);

			/* skb is set only for the last buffer of the frame.
			 * WARNING: at this point skb has been freed by
			 * macb_tx_unmap().
			 */
			if (skb)
				break;
		}
872 873
	}

874 875 876
	queue->tx_tail = tail;
	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
	    CIRC_CNT(queue->tx_head, queue->tx_tail,
877
		     bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
878
		netif_wake_subqueue(bp->dev, queue_index);
879 880
}

N
Nicolas Ferre 已提交
881 882 883 884 885
static void gem_rx_refill(struct macb *bp)
{
	unsigned int		entry;
	struct sk_buff		*skb;
	dma_addr_t		paddr;
886
	struct macb_dma_desc *desc;
N
Nicolas Ferre 已提交
887

888
	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
889 890
			  bp->rx_ring_size) > 0) {
		entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
N
Nicolas Ferre 已提交
891 892 893 894 895

		/* Make hw descriptor updates visible to CPU */
		rmb();

		bp->rx_prepared_head++;
896
		desc = macb_rx_desc(bp, entry);
N
Nicolas Ferre 已提交
897

898
		if (!bp->rx_skbuff[entry]) {
N
Nicolas Ferre 已提交
899 900
			/* allocate sk_buff for this free entry in ring */
			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
901
			if (unlikely(!skb)) {
N
Nicolas Ferre 已提交
902 903 904 905 906 907 908
				netdev_err(bp->dev,
					   "Unable to allocate sk_buff\n");
				break;
			}

			/* now fill corresponding descriptor entry */
			paddr = dma_map_single(&bp->pdev->dev, skb->data,
909 910
					       bp->rx_buffer_size,
					       DMA_FROM_DEVICE);
911 912 913 914 915 916
			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
				dev_kfree_skb(skb);
				break;
			}

			bp->rx_skbuff[entry] = skb;
N
Nicolas Ferre 已提交
917

918
			if (entry == bp->rx_ring_size - 1)
N
Nicolas Ferre 已提交
919
				paddr |= MACB_BIT(RX_WRAP);
920 921
			macb_set_addr(bp, desc, paddr);
			desc->ctrl = 0;
N
Nicolas Ferre 已提交
922 923 924

			/* properly align Ethernet header */
			skb_reserve(skb, NET_IP_ALIGN);
925
		} else {
926 927
			desc->addr &= ~MACB_BIT(RX_USED);
			desc->ctrl = 0;
N
Nicolas Ferre 已提交
928 929 930 931 932 933 934
		}
	}

	/* Make descriptor updates visible to hardware */
	wmb();

	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
935
		    bp->rx_prepared_head, bp->rx_tail);
N
Nicolas Ferre 已提交
936 937 938 939 940 941 942 943 944 945
}

/* Mark DMA descriptors from begin up to and not including end as unused */
static void discard_partial_frame(struct macb *bp, unsigned int begin,
				  unsigned int end)
{
	unsigned int frag;

	for (frag = begin; frag != end; frag++) {
		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
946

N
Nicolas Ferre 已提交
947 948 949 950 951 952
		desc->addr &= ~MACB_BIT(RX_USED);
	}

	/* Make descriptor updates visible to hardware */
	wmb();

953
	/* When this happens, the hardware stats registers for
N
Nicolas Ferre 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967
	 * whatever caused this is updated, so we don't have to record
	 * anything.
	 */
}

static int gem_rx(struct macb *bp, int budget)
{
	unsigned int		len;
	unsigned int		entry;
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
	int			count = 0;

	while (count < budget) {
968 969 970
		u32 ctrl;
		dma_addr_t addr;
		bool rxused;
N
Nicolas Ferre 已提交
971

972
		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
973
		desc = macb_rx_desc(bp, entry);
N
Nicolas Ferre 已提交
974 975 976 977

		/* Make hw descriptor updates visible to CPU */
		rmb();

978
		rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
979
		addr = macb_get_addr(bp, desc);
N
Nicolas Ferre 已提交
980 981
		ctrl = desc->ctrl;

982
		if (!rxused)
N
Nicolas Ferre 已提交
983 984 985 986 987 988 989 990
			break;

		bp->rx_tail++;
		count++;

		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
			netdev_err(bp->dev,
				   "not whole frame pointed by descriptor\n");
991
			bp->dev->stats.rx_dropped++;
N
Nicolas Ferre 已提交
992 993 994 995 996 997
			break;
		}
		skb = bp->rx_skbuff[entry];
		if (unlikely(!skb)) {
			netdev_err(bp->dev,
				   "inconsistent Rx descriptor chain\n");
998
			bp->dev->stats.rx_dropped++;
N
Nicolas Ferre 已提交
999 1000 1001 1002
			break;
		}
		/* now everything is ready for receiving packet */
		bp->rx_skbuff[entry] = NULL;
1003
		len = ctrl & bp->rx_frm_len_mask;
N
Nicolas Ferre 已提交
1004 1005 1006 1007 1008

		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);

		skb_put(skb, len);
		dma_unmap_single(&bp->pdev->dev, addr,
1009
				 bp->rx_buffer_size, DMA_FROM_DEVICE);
N
Nicolas Ferre 已提交
1010 1011 1012

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb_checksum_none_assert(skb);
1013 1014 1015 1016
		if (bp->dev->features & NETIF_F_RXCSUM &&
		    !(bp->dev->flags & IFF_PROMISC) &&
		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
N
Nicolas Ferre 已提交
1017

1018 1019
		bp->dev->stats.rx_packets++;
		bp->dev->stats.rx_bytes += skb->len;
N
Nicolas Ferre 已提交
1020

1021 1022
		gem_ptp_do_rxstamp(bp, skb, desc);

N
Nicolas Ferre 已提交
1023 1024 1025 1026
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
			    skb->len, skb->csum);
		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1027
			       skb_mac_header(skb), 16, true);
N
Nicolas Ferre 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
			       skb->data, 32, true);
#endif

		netif_receive_skb(skb);
	}

	gem_rx_refill(bp);

	return count;
}

1040 1041 1042 1043 1044
static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
			 unsigned int last_frag)
{
	unsigned int len;
	unsigned int frag;
1045
	unsigned int offset;
1046
	struct sk_buff *skb;
1047
	struct macb_dma_desc *desc;
1048

1049
	desc = macb_rx_desc(bp, last_frag);
1050
	len = desc->ctrl & bp->rx_frm_len_mask;
1051

1052
	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1053 1054
		macb_rx_ring_wrap(bp, first_frag),
		macb_rx_ring_wrap(bp, last_frag), len);
1055

1056
	/* The ethernet header starts NET_IP_ALIGN bytes into the
1057 1058 1059 1060 1061 1062 1063 1064
	 * first buffer. Since the header is 14 bytes, this makes the
	 * payload word-aligned.
	 *
	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
	 * the two padding bytes into the skb so that we avoid hitting
	 * the slowpath in memcpy(), and pull them off afterwards.
	 */
	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1065
	if (!skb) {
1066
		bp->dev->stats.rx_dropped++;
1067 1068 1069
		for (frag = first_frag; ; frag++) {
			desc = macb_rx_desc(bp, frag);
			desc->addr &= ~MACB_BIT(RX_USED);
1070 1071 1072
			if (frag == last_frag)
				break;
		}
1073 1074

		/* Make descriptor updates visible to hardware */
1075
		wmb();
1076

1077 1078 1079
		return 1;
	}

1080 1081
	offset = 0;
	len += NET_IP_ALIGN;
1082
	skb_checksum_none_assert(skb);
1083 1084
	skb_put(skb, len);

1085
	for (frag = first_frag; ; frag++) {
1086
		unsigned int frag_len = bp->rx_buffer_size;
1087 1088

		if (offset + frag_len > len) {
1089 1090 1091 1092
			if (unlikely(frag != last_frag)) {
				dev_kfree_skb_any(skb);
				return -1;
			}
1093 1094
			frag_len = len - offset;
		}
1095
		skb_copy_to_linear_data_offset(skb, offset,
1096 1097
					       macb_rx_buffer(bp, frag),
					       frag_len);
1098
		offset += bp->rx_buffer_size;
1099 1100
		desc = macb_rx_desc(bp, frag);
		desc->addr &= ~MACB_BIT(RX_USED);
1101 1102 1103 1104 1105

		if (frag == last_frag)
			break;
	}

1106 1107 1108
	/* Make descriptor updates visible to hardware */
	wmb();

1109
	__skb_pull(skb, NET_IP_ALIGN);
1110 1111
	skb->protocol = eth_type_trans(skb, bp->dev);

1112 1113
	bp->dev->stats.rx_packets++;
	bp->dev->stats.rx_bytes += skb->len;
1114
	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1115
		    skb->len, skb->csum);
1116 1117 1118 1119 1120
	netif_receive_skb(skb);

	return 0;
}

1121 1122 1123
static inline void macb_init_rx_ring(struct macb *bp)
{
	dma_addr_t addr;
1124
	struct macb_dma_desc *desc = NULL;
1125 1126 1127
	int i;

	addr = bp->rx_buffers_dma;
1128
	for (i = 0; i < bp->rx_ring_size; i++) {
1129 1130 1131
		desc = macb_rx_desc(bp, i);
		macb_set_addr(bp, desc, addr);
		desc->ctrl = 0;
1132 1133
		addr += bp->rx_buffer_size;
	}
1134
	desc->addr |= MACB_BIT(RX_WRAP);
1135
	bp->rx_tail = 0;
1136 1137
}

1138 1139
static int macb_rx(struct macb *bp, int budget)
{
1140
	bool reset_rx_queue = false;
1141
	int received = 0;
1142
	unsigned int tail;
1143 1144
	int first_frag = -1;

1145 1146
	for (tail = bp->rx_tail; budget > 0; tail++) {
		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1147
		u32 ctrl;
1148

1149
		/* Make hw descriptor updates visible to CPU */
1150
		rmb();
1151

1152
		ctrl = desc->ctrl;
1153

1154
		if (!(desc->addr & MACB_BIT(RX_USED)))
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
			break;

		if (ctrl & MACB_BIT(RX_SOF)) {
			if (first_frag != -1)
				discard_partial_frame(bp, first_frag, tail);
			first_frag = tail;
		}

		if (ctrl & MACB_BIT(RX_EOF)) {
			int dropped;
1165 1166 1167 1168 1169

			if (unlikely(first_frag == -1)) {
				reset_rx_queue = true;
				continue;
			}
1170 1171 1172

			dropped = macb_rx_frame(bp, first_frag, tail);
			first_frag = -1;
1173 1174 1175 1176
			if (unlikely(dropped < 0)) {
				reset_rx_queue = true;
				continue;
			}
1177 1178 1179 1180 1181 1182 1183
			if (!dropped) {
				received++;
				budget--;
			}
		}
	}

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	if (unlikely(reset_rx_queue)) {
		unsigned long flags;
		u32 ctrl;

		netdev_err(bp->dev, "RX queue corruption: reset it\n");

		spin_lock_irqsave(&bp->lock, flags);

		ctrl = macb_readl(bp, NCR);
		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));

		macb_init_rx_ring(bp);
		macb_writel(bp, RBQP, bp->rx_ring_dma);

		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));

		spin_unlock_irqrestore(&bp->lock, flags);
		return received;
	}

1204 1205 1206 1207 1208 1209 1210 1211
	if (first_frag != -1)
		bp->rx_tail = first_frag;
	else
		bp->rx_tail = tail;

	return received;
}

1212
static int macb_poll(struct napi_struct *napi, int budget)
1213
{
1214 1215
	struct macb *bp = container_of(napi, struct macb, napi);
	int work_done;
1216 1217 1218 1219 1220
	u32 status;

	status = macb_readl(bp, RSR);
	macb_writel(bp, RSR, status);

1221
	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1222
		    (unsigned long)status, budget);
1223

N
Nicolas Ferre 已提交
1224
	work_done = bp->macbgem_ops.mog_rx(bp, budget);
1225
	if (work_done < budget) {
1226
		napi_complete_done(napi, work_done);
1227

1228 1229
		/* Packets received while interrupts were disabled */
		status = macb_readl(bp, RSR);
1230
		if (status) {
1231 1232
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
				macb_writel(bp, ISR, MACB_BIT(RCOMP));
1233
			napi_reschedule(napi);
1234 1235 1236
		} else {
			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
		}
1237
	}
1238 1239 1240

	/* TODO: Handle errors */

1241
	return work_done;
1242 1243 1244 1245
}

static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
1246 1247 1248
	struct macb_queue *queue = dev_id;
	struct macb *bp = queue->bp;
	struct net_device *dev = bp->dev;
1249
	u32 status, ctrl;
1250

1251
	status = queue_readl(queue, ISR);
1252 1253 1254 1255 1256 1257 1258 1259 1260

	if (unlikely(!status))
		return IRQ_NONE;

	spin_lock(&bp->lock);

	while (status) {
		/* close possible race with dev_close */
		if (unlikely(!netif_running(dev))) {
1261
			queue_writel(queue, IDR, -1);
1262 1263
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
				queue_writel(queue, ISR, -1);
1264 1265 1266
			break;
		}

1267 1268 1269
		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
			    (unsigned int)(queue - bp->queues),
			    (unsigned long)status);
1270

1271
		if (status & MACB_RX_INT_FLAGS) {
1272
			/* There's no point taking any more interrupts
1273 1274 1275 1276 1277
			 * until we have processed the buffers. The
			 * scheduling call may fail if the poll routine
			 * is already scheduled, so disable interrupts
			 * now.
			 */
1278
			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1279
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1280
				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1281

1282
			if (napi_schedule_prep(&bp->napi)) {
1283
				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1284
				__napi_schedule(&bp->napi);
1285 1286 1287
			}
		}

N
Nicolas Ferre 已提交
1288
		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1289 1290
			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
			schedule_work(&queue->tx_error_task);
1291 1292

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1293
				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1294

N
Nicolas Ferre 已提交
1295 1296 1297 1298
			break;
		}

		if (status & MACB_BIT(TCOMP))
1299
			macb_tx_interrupt(queue);
1300

1301
		/* Link change detection isn't possible with RMII, so we'll
1302 1303 1304
		 * add that if/when we get our hands on a full-blown MII PHY.
		 */

1305 1306 1307 1308 1309 1310
		/* There is a hardware issue under heavy load where DMA can
		 * stop, this causes endless "used buffer descriptor read"
		 * interrupts but it can be cleared by re-enabling RX. See
		 * the at91 manual, section 41.3.1 or the Zynq manual
		 * section 16.7.4 for details.
		 */
1311 1312 1313
		if (status & MACB_BIT(RXUBR)) {
			ctrl = macb_readl(bp, NCR);
			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1314
			wmb();
1315 1316 1317
			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1318
				queue_writel(queue, ISR, MACB_BIT(RXUBR));
1319 1320
		}

A
Alexander Stein 已提交
1321 1322
		if (status & MACB_BIT(ISR_ROVR)) {
			/* We missed at least one packet */
J
Jamie Iles 已提交
1323 1324 1325 1326
			if (macb_is_gem(bp))
				bp->hw_stats.gem.rx_overruns++;
			else
				bp->hw_stats.macb.rx_overruns++;
1327 1328

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1329
				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
A
Alexander Stein 已提交
1330 1331
		}

1332
		if (status & MACB_BIT(HRESP)) {
1333
			/* TODO: Reset the hardware, and maybe move the
1334 1335
			 * netdev_err to a lower-priority context as well
			 * (work queue?)
1336
			 */
1337
			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1338 1339

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1340
				queue_writel(queue, ISR, MACB_BIT(HRESP));
1341
		}
1342
		status = queue_readl(queue, ISR);
1343 1344 1345 1346 1347 1348 1349
	}

	spin_unlock(&bp->lock);

	return IRQ_HANDLED;
}

1350
#ifdef CONFIG_NET_POLL_CONTROLLER
1351
/* Polling receive - used by netconsole and other diagnostic tools
1352 1353 1354 1355
 * to allow network i/o with interrupts disabled.
 */
static void macb_poll_controller(struct net_device *dev)
{
1356 1357
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
1358
	unsigned long flags;
1359
	unsigned int q;
1360 1361

	local_irq_save(flags);
1362 1363
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
		macb_interrupt(dev->irq, queue);
1364 1365 1366 1367
	local_irq_restore(flags);
}
#endif

1368
static unsigned int macb_tx_map(struct macb *bp,
1369
				struct macb_queue *queue,
R
Rafal Ozieblo 已提交
1370 1371
				struct sk_buff *skb,
				unsigned int hdrlen)
1372 1373
{
	dma_addr_t mapping;
1374
	unsigned int len, entry, i, tx_head = queue->tx_head;
1375
	struct macb_tx_skb *tx_skb = NULL;
1376
	struct macb_dma_desc *desc;
1377 1378
	unsigned int offset, size, count = 0;
	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
R
Rafal Ozieblo 已提交
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
	unsigned int eof = 1, mss_mfs = 0;
	u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;

	/* LSO */
	if (skb_shinfo(skb)->gso_size != 0) {
		if (ip_hdr(skb)->protocol == IPPROTO_UDP)
			/* UDP - UFO */
			lso_ctrl = MACB_LSO_UFO_ENABLE;
		else
			/* TCP - TSO */
			lso_ctrl = MACB_LSO_TSO_ENABLE;
	}
1391 1392 1393

	/* First, map non-paged data */
	len = skb_headlen(skb);
R
Rafal Ozieblo 已提交
1394 1395 1396 1397

	/* first buffer length */
	size = hdrlen;

1398 1399
	offset = 0;
	while (len) {
1400
		entry = macb_tx_ring_wrap(bp, tx_head);
1401
		tx_skb = &queue->tx_skb[entry];
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418

		mapping = dma_map_single(&bp->pdev->dev,
					 skb->data + offset,
					 size, DMA_TO_DEVICE);
		if (dma_mapping_error(&bp->pdev->dev, mapping))
			goto dma_error;

		/* Save info to properly release resources */
		tx_skb->skb = NULL;
		tx_skb->mapping = mapping;
		tx_skb->size = size;
		tx_skb->mapped_as_page = false;

		len -= size;
		offset += size;
		count++;
		tx_head++;
R
Rafal Ozieblo 已提交
1419 1420

		size = min(len, bp->max_tx_length);
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
	}

	/* Then, map paged data from fragments */
	for (f = 0; f < nr_frags; f++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];

		len = skb_frag_size(frag);
		offset = 0;
		while (len) {
			size = min(len, bp->max_tx_length);
1431
			entry = macb_tx_ring_wrap(bp, tx_head);
1432
			tx_skb = &queue->tx_skb[entry];
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452

			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
						   offset, size, DMA_TO_DEVICE);
			if (dma_mapping_error(&bp->pdev->dev, mapping))
				goto dma_error;

			/* Save info to properly release resources */
			tx_skb->skb = NULL;
			tx_skb->mapping = mapping;
			tx_skb->size = size;
			tx_skb->mapped_as_page = true;

			len -= size;
			offset += size;
			count++;
			tx_head++;
		}
	}

	/* Should never happen */
1453
	if (unlikely(!tx_skb)) {
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
		netdev_err(bp->dev, "BUG! empty skb!\n");
		return 0;
	}

	/* This is the last buffer of the frame: save socket buffer */
	tx_skb->skb = skb;

	/* Update TX ring: update buffer descriptors in reverse order
	 * to avoid race condition
	 */

	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
	 * to set the end of TX queue
	 */
	i = tx_head;
1469
	entry = macb_tx_ring_wrap(bp, i);
1470
	ctrl = MACB_BIT(TX_USED);
1471
	desc = macb_tx_desc(queue, entry);
1472 1473
	desc->ctrl = ctrl;

R
Rafal Ozieblo 已提交
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
	if (lso_ctrl) {
		if (lso_ctrl == MACB_LSO_UFO_ENABLE)
			/* include header and FCS in value given to h/w */
			mss_mfs = skb_shinfo(skb)->gso_size +
					skb_transport_offset(skb) +
					ETH_FCS_LEN;
		else /* TSO */ {
			mss_mfs = skb_shinfo(skb)->gso_size;
			/* TCP Sequence Number Source Select
			 * can be set only for TSO
			 */
			seq_ctrl = 0;
		}
	}

1489 1490
	do {
		i--;
1491
		entry = macb_tx_ring_wrap(bp, i);
1492
		tx_skb = &queue->tx_skb[entry];
1493
		desc = macb_tx_desc(queue, entry);
1494 1495 1496 1497 1498 1499

		ctrl = (u32)tx_skb->size;
		if (eof) {
			ctrl |= MACB_BIT(TX_LAST);
			eof = 0;
		}
1500
		if (unlikely(entry == (bp->tx_ring_size - 1)))
1501 1502
			ctrl |= MACB_BIT(TX_WRAP);

R
Rafal Ozieblo 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
		/* First descriptor is header descriptor */
		if (i == queue->tx_head) {
			ctrl |= MACB_BF(TX_LSO, lso_ctrl);
			ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
		} else
			/* Only set MSS/MFS on payload descriptors
			 * (second or later descriptor)
			 */
			ctrl |= MACB_BF(MSS_MFS, mss_mfs);

1513
		/* Set TX buffer descriptor */
1514
		macb_set_addr(bp, desc, tx_skb->mapping);
1515 1516 1517 1518 1519
		/* desc->addr must be visible to hardware before clearing
		 * 'TX_USED' bit in desc->ctrl.
		 */
		wmb();
		desc->ctrl = ctrl;
1520
	} while (i != queue->tx_head);
1521

1522
	queue->tx_head = tx_head;
1523 1524 1525 1526 1527 1528

	return count;

dma_error:
	netdev_err(bp->dev, "TX DMA map failed\n");

1529 1530
	for (i = queue->tx_head; i != tx_head; i++) {
		tx_skb = macb_tx_skb(queue, i);
1531 1532 1533 1534 1535 1536 1537

		macb_tx_unmap(bp, tx_skb);
	}

	return 0;
}

R
Rafal Ozieblo 已提交
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
static netdev_features_t macb_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
{
	unsigned int nr_frags, f;
	unsigned int hdrlen;

	/* Validate LSO compatibility */

	/* there is only one buffer */
	if (!skb_is_nonlinear(skb))
		return features;

	/* length of header */
	hdrlen = skb_transport_offset(skb);
	if (ip_hdr(skb)->protocol == IPPROTO_TCP)
		hdrlen += tcp_hdrlen(skb);

	/* For LSO:
	 * When software supplies two or more payload buffers all payload buffers
	 * apart from the last must be a multiple of 8 bytes in size.
	 */
	if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
		return features & ~MACB_NETIF_LSO;

	nr_frags = skb_shinfo(skb)->nr_frags;
	/* No need to check last fragment */
	nr_frags--;
	for (f = 0; f < nr_frags; f++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];

		if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
			return features & ~MACB_NETIF_LSO;
	}
	return features;
}

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
static inline int macb_clear_csum(struct sk_buff *skb)
{
	/* no change for packets without checksum offloading */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	/* make sure we can modify the header */
	if (unlikely(skb_cow_head(skb, 0)))
		return -1;

	/* initialize checksum field
	 * This is required - at least for Zynq, which otherwise calculates
	 * wrong UDP header checksums for UDP packets with UDP data len <=2
	 */
	*(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
	return 0;
}

1593 1594
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
1595
	u16 queue_index = skb_get_queue_mapping(skb);
1596
	struct macb *bp = netdev_priv(dev);
1597
	struct macb_queue *queue = &bp->queues[queue_index];
1598
	unsigned long flags;
R
Rafal Ozieblo 已提交
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
	unsigned int desc_cnt, nr_frags, frag_size, f;
	unsigned int hdrlen;
	bool is_lso, is_udp = 0;

	is_lso = (skb_shinfo(skb)->gso_size != 0);

	if (is_lso) {
		is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);

		/* length of headers */
		if (is_udp)
			/* only queue eth + ip headers separately for UDP */
			hdrlen = skb_transport_offset(skb);
		else
			hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
		if (skb_headlen(skb) < hdrlen) {
			netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
			/* if this is required, would need to copy to single buffer */
			return NETDEV_TX_BUSY;
		}
	} else
		hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1621

1622 1623
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
	netdev_vdbg(bp->dev,
1624 1625 1626
		    "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
		    queue_index, skb->len, skb->head, skb->data,
		    skb_tail_pointer(skb), skb_end_pointer(skb));
1627 1628
	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
		       skb->data, 16, true);
1629 1630
#endif

1631 1632
	/* Count how many TX buffer descriptors are needed to send this
	 * socket buffer: skb fragments of jumbo frames may need to be
1633
	 * split into many buffer descriptors.
1634
	 */
R
Rafal Ozieblo 已提交
1635 1636 1637 1638 1639
	if (is_lso && (skb_headlen(skb) > hdrlen))
		/* extra header descriptor if also payload in first buffer */
		desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
	else
		desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1640 1641 1642
	nr_frags = skb_shinfo(skb)->nr_frags;
	for (f = 0; f < nr_frags; f++) {
		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
R
Rafal Ozieblo 已提交
1643
		desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1644 1645
	}

1646
	spin_lock_irqsave(&bp->lock, flags);
1647 1648

	/* This is a hard error, log it. */
1649
	if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
R
Rafal Ozieblo 已提交
1650
		       bp->tx_ring_size) < desc_cnt) {
1651
		netif_stop_subqueue(dev, queue_index);
1652
		spin_unlock_irqrestore(&bp->lock, flags);
1653
		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1654
			   queue->tx_head, queue->tx_tail);
1655
		return NETDEV_TX_BUSY;
1656 1657
	}

1658 1659
	if (macb_clear_csum(skb)) {
		dev_kfree_skb_any(skb);
1660
		goto unlock;
1661 1662
	}

1663
	/* Map socket buffer for DMA transfer */
R
Rafal Ozieblo 已提交
1664
	if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1665
		dev_kfree_skb_any(skb);
1666 1667
		goto unlock;
	}
1668

1669
	/* Make newly initialized descriptor visible to hardware */
1670
	wmb();
1671 1672
	skb_tx_timestamp(skb);

1673 1674
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

1675
	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1676
		netif_stop_subqueue(dev, queue_index);
1677

1678
unlock:
1679
	spin_unlock_irqrestore(&bp->lock, flags);
1680

1681
	return NETDEV_TX_OK;
1682 1683
}

N
Nicolas Ferre 已提交
1684
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1685 1686 1687 1688
{
	if (!macb_is_gem(bp)) {
		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
	} else {
N
Nicolas Ferre 已提交
1689
		bp->rx_buffer_size = size;
1690 1691

		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
N
Nicolas Ferre 已提交
1692
			netdev_dbg(bp->dev,
1693 1694
				   "RX buffer must be multiple of %d bytes, expanding\n",
				   RX_BUFFER_MULTIPLE);
1695
			bp->rx_buffer_size =
N
Nicolas Ferre 已提交
1696
				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1697 1698
		}
	}
N
Nicolas Ferre 已提交
1699

1700
	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
N
Nicolas Ferre 已提交
1701
		   bp->dev->mtu, bp->rx_buffer_size);
1702 1703
}

N
Nicolas Ferre 已提交
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
static void gem_free_rx_buffers(struct macb *bp)
{
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
	dma_addr_t		addr;
	int i;

	if (!bp->rx_skbuff)
		return;

1714
	for (i = 0; i < bp->rx_ring_size; i++) {
N
Nicolas Ferre 已提交
1715 1716
		skb = bp->rx_skbuff[i];

1717
		if (!skb)
N
Nicolas Ferre 已提交
1718 1719
			continue;

1720 1721 1722
		desc = macb_rx_desc(bp, i);
		addr = macb_get_addr(bp, desc);

1723
		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
N
Nicolas Ferre 已提交
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
				 DMA_FROM_DEVICE);
		dev_kfree_skb_any(skb);
		skb = NULL;
	}

	kfree(bp->rx_skbuff);
	bp->rx_skbuff = NULL;
}

static void macb_free_rx_buffers(struct macb *bp)
{
	if (bp->rx_buffers) {
		dma_free_coherent(&bp->pdev->dev,
1737
				  bp->rx_ring_size * bp->rx_buffer_size,
N
Nicolas Ferre 已提交
1738 1739 1740 1741
				  bp->rx_buffers, bp->rx_buffers_dma);
		bp->rx_buffers = NULL;
	}
}
1742

1743 1744
static void macb_free_consistent(struct macb *bp)
{
1745 1746 1747
	struct macb_queue *queue;
	unsigned int q;

N
Nicolas Ferre 已提交
1748
	bp->macbgem_ops.mog_free_rx_buffers(bp);
1749
	if (bp->rx_ring) {
1750
		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1751 1752 1753
				  bp->rx_ring, bp->rx_ring_dma);
		bp->rx_ring = NULL;
	}
1754 1755 1756 1757 1758

	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		kfree(queue->tx_skb);
		queue->tx_skb = NULL;
		if (queue->tx_ring) {
1759
			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1760 1761 1762
					  queue->tx_ring, queue->tx_ring_dma);
			queue->tx_ring = NULL;
		}
1763
	}
N
Nicolas Ferre 已提交
1764 1765 1766 1767 1768 1769
}

static int gem_alloc_rx_buffers(struct macb *bp)
{
	int size;

1770
	size = bp->rx_ring_size * sizeof(struct sk_buff *);
N
Nicolas Ferre 已提交
1771 1772 1773
	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
	if (!bp->rx_skbuff)
		return -ENOMEM;
1774 1775 1776 1777
	else
		netdev_dbg(bp->dev,
			   "Allocated %d RX struct sk_buff entries at %p\n",
			   bp->rx_ring_size, bp->rx_skbuff);
N
Nicolas Ferre 已提交
1778 1779 1780 1781 1782 1783 1784
	return 0;
}

static int macb_alloc_rx_buffers(struct macb *bp)
{
	int size;

1785
	size = bp->rx_ring_size * bp->rx_buffer_size;
N
Nicolas Ferre 已提交
1786 1787 1788 1789
	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
					    &bp->rx_buffers_dma, GFP_KERNEL);
	if (!bp->rx_buffers)
		return -ENOMEM;
1790 1791 1792 1793

	netdev_dbg(bp->dev,
		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
		   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
N
Nicolas Ferre 已提交
1794
	return 0;
1795 1796 1797 1798
}

static int macb_alloc_consistent(struct macb *bp)
{
1799 1800
	struct macb_queue *queue;
	unsigned int q;
1801 1802
	int size;

1803
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1804
		size = TX_RING_BYTES(bp);
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
						    &queue->tx_ring_dma,
						    GFP_KERNEL);
		if (!queue->tx_ring)
			goto out_err;
		netdev_dbg(bp->dev,
			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
			   q, size, (unsigned long)queue->tx_ring_dma,
			   queue->tx_ring);

1815
		size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1816 1817 1818 1819
		queue->tx_skb = kmalloc(size, GFP_KERNEL);
		if (!queue->tx_skb)
			goto out_err;
	}
1820

1821
	size = RX_RING_BYTES(bp);
1822 1823 1824 1825
	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
					 &bp->rx_ring_dma, GFP_KERNEL);
	if (!bp->rx_ring)
		goto out_err;
1826 1827 1828
	netdev_dbg(bp->dev,
		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1829

N
Nicolas Ferre 已提交
1830
	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1831 1832 1833 1834 1835 1836 1837 1838 1839
		goto out_err;

	return 0;

out_err:
	macb_free_consistent(bp);
	return -ENOMEM;
}

N
Nicolas Ferre 已提交
1840 1841
static void gem_init_rings(struct macb *bp)
{
1842
	struct macb_queue *queue;
1843
	struct macb_dma_desc *desc = NULL;
1844
	unsigned int q;
N
Nicolas Ferre 已提交
1845 1846
	int i;

1847
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1848
		for (i = 0; i < bp->tx_ring_size; i++) {
1849 1850 1851
			desc = macb_tx_desc(queue, i);
			macb_set_addr(bp, desc, 0);
			desc->ctrl = MACB_BIT(TX_USED);
1852
		}
1853
		desc->ctrl |= MACB_BIT(TX_WRAP);
1854 1855
		queue->tx_head = 0;
		queue->tx_tail = 0;
N
Nicolas Ferre 已提交
1856 1857
	}

1858 1859
	bp->rx_tail = 0;
	bp->rx_prepared_head = 0;
N
Nicolas Ferre 已提交
1860 1861 1862 1863

	gem_rx_refill(bp);
}

1864 1865 1866
static void macb_init_rings(struct macb *bp)
{
	int i;
1867
	struct macb_dma_desc *desc = NULL;
1868

1869
	macb_init_rx_ring(bp);
1870

1871
	for (i = 0; i < bp->tx_ring_size; i++) {
1872 1873 1874
		desc = macb_tx_desc(&bp->queues[0], i);
		macb_set_addr(bp, desc, 0);
		desc->ctrl = MACB_BIT(TX_USED);
1875
	}
1876 1877
	bp->queues[0].tx_head = 0;
	bp->queues[0].tx_tail = 0;
1878
	desc->ctrl |= MACB_BIT(TX_WRAP);
1879 1880 1881 1882
}

static void macb_reset_hw(struct macb *bp)
{
1883 1884 1885
	struct macb_queue *queue;
	unsigned int q;

1886
	/* Disable RX and TX (XXX: Should we halt the transmission
1887 1888 1889 1890 1891 1892 1893 1894
	 * more gracefully?)
	 */
	macb_writel(bp, NCR, 0);

	/* Clear the stats registers (XXX: Update stats first?) */
	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));

	/* Clear all status flags */
J
Joachim Eastwood 已提交
1895 1896
	macb_writel(bp, TSR, -1);
	macb_writel(bp, RSR, -1);
1897 1898

	/* Disable all interrupts */
1899 1900 1901
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, IDR, -1);
		queue_readl(queue, ISR);
1902 1903
		if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
			queue_writel(queue, ISR, -1);
1904
	}
1905 1906
}

1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
static u32 gem_mdc_clk_div(struct macb *bp)
{
	u32 config;
	unsigned long pclk_hz = clk_get_rate(bp->pclk);

	if (pclk_hz <= 20000000)
		config = GEM_BF(CLK, GEM_CLK_DIV8);
	else if (pclk_hz <= 40000000)
		config = GEM_BF(CLK, GEM_CLK_DIV16);
	else if (pclk_hz <= 80000000)
		config = GEM_BF(CLK, GEM_CLK_DIV32);
	else if (pclk_hz <= 120000000)
		config = GEM_BF(CLK, GEM_CLK_DIV48);
	else if (pclk_hz <= 160000000)
		config = GEM_BF(CLK, GEM_CLK_DIV64);
	else
		config = GEM_BF(CLK, GEM_CLK_DIV96);

	return config;
}

static u32 macb_mdc_clk_div(struct macb *bp)
{
	u32 config;
	unsigned long pclk_hz;

	if (macb_is_gem(bp))
		return gem_mdc_clk_div(bp);

	pclk_hz = clk_get_rate(bp->pclk);
	if (pclk_hz <= 20000000)
		config = MACB_BF(CLK, MACB_CLK_DIV8);
	else if (pclk_hz <= 40000000)
		config = MACB_BF(CLK, MACB_CLK_DIV16);
	else if (pclk_hz <= 80000000)
		config = MACB_BF(CLK, MACB_CLK_DIV32);
	else
		config = MACB_BF(CLK, MACB_CLK_DIV64);

	return config;
}

1949
/* Get the DMA bus width field of the network configuration register that we
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
 * should program.  We find the width from decoding the design configuration
 * register to find the maximum supported data bus width.
 */
static u32 macb_dbw(struct macb *bp)
{
	if (!macb_is_gem(bp))
		return 0;

	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
	case 4:
		return GEM_BF(DBW, GEM_DBW128);
	case 2:
		return GEM_BF(DBW, GEM_DBW64);
	case 1:
	default:
		return GEM_BF(DBW, GEM_DBW32);
	}
}

1969
/* Configure the receive DMA engine
1970
 * - use the correct receive buffer size
1971
 * - set best burst length for DMA operations
1972 1973 1974
 *   (if not supported by FIFO, it will fallback to default)
 * - set both rx/tx packet buffers to full memory size
 * These are configurable parameters for GEM.
1975 1976 1977 1978 1979 1980 1981
 */
static void macb_configure_dma(struct macb *bp)
{
	u32 dmacfg;

	if (macb_is_gem(bp)) {
		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1982
		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1983 1984
		if (bp->dma_burst_length)
			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1985
		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1986
		dmacfg &= ~GEM_BIT(ENDIA_PKT);
1987

1988
		if (bp->native_io)
1989 1990 1991 1992
			dmacfg &= ~GEM_BIT(ENDIA_DESC);
		else
			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */

1993 1994 1995 1996
		if (bp->dev->features & NETIF_F_HW_CSUM)
			dmacfg |= GEM_BIT(TXCOEN);
		else
			dmacfg &= ~GEM_BIT(TXCOEN);
1997 1998

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1999
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2000
			dmacfg |= GEM_BIT(ADDR64);
2001 2002 2003 2004
#endif
#ifdef CONFIG_MACB_USE_HWSTAMP
		if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
			dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2005
#endif
2006 2007
		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
			   dmacfg);
2008 2009 2010 2011
		gem_writel(bp, DMACFG, dmacfg);
	}
}

2012 2013
static void macb_init_hw(struct macb *bp)
{
2014 2015 2016
	struct macb_queue *queue;
	unsigned int q;

2017 2018 2019
	u32 config;

	macb_reset_hw(bp);
2020
	macb_set_hwaddr(bp);
2021

2022
	config = macb_mdc_clk_div(bp);
2023 2024
	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
		config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2025
	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
2026 2027
	config |= MACB_BIT(PAE);		/* PAuse Enable */
	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
D
Dan Carpenter 已提交
2028
	if (bp->caps & MACB_CAPS_JUMBO)
2029 2030 2031
		config |= MACB_BIT(JFRAME);	/* Enable jumbo frames */
	else
		config |= MACB_BIT(BIG);	/* Receive oversized frames */
2032 2033
	if (bp->dev->flags & IFF_PROMISC)
		config |= MACB_BIT(CAF);	/* Copy All Frames */
2034 2035
	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
		config |= GEM_BIT(RXCOEN);
2036 2037
	if (!(bp->dev->flags & IFF_BROADCAST))
		config |= MACB_BIT(NBC);	/* No BroadCast */
2038
	config |= macb_dbw(bp);
2039
	macb_writel(bp, NCFGR, config);
D
Dan Carpenter 已提交
2040
	if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2041
		gem_writel(bp, JML, bp->jumbo_max_len);
2042 2043
	bp->speed = SPEED_10;
	bp->duplex = DUPLEX_HALF;
2044
	bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
D
Dan Carpenter 已提交
2045
	if (bp->caps & MACB_CAPS_JUMBO)
2046
		bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2047

2048 2049
	macb_configure_dma(bp);

2050
	/* Initialize TX and RX buffers */
2051
	macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
2052
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2053
	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2054
		macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
2055
#endif
2056
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2057
		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2058
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2059
		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2060
			queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2061
#endif
2062 2063 2064 2065 2066 2067 2068

		/* Enable interrupts */
		queue_writel(queue, IER,
			     MACB_RX_INT_FLAGS |
			     MACB_TX_INT_FLAGS |
			     MACB_BIT(HRESP));
	}
2069 2070

	/* Enable TX and RX */
F
frederic RODO 已提交
2071
	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
2072 2073
}

2074
/* The hash address register is 64 bits long and takes up two
P
Patrice Vilchez 已提交
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
 * locations in the memory map.  The least significant bits are stored
 * in EMAC_HSL and the most significant bits in EMAC_HSH.
 *
 * The unicast hash enable and the multicast hash enable bits in the
 * network configuration register enable the reception of hash matched
 * frames. The destination address is reduced to a 6 bit index into
 * the 64 bit hash register using the following hash function.  The
 * hash function is an exclusive or of every sixth bit of the
 * destination address.
 *
 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
 *
 * da[0] represents the least significant bit of the first byte
 * received, that is, the multicast/unicast indicator, and da[47]
 * represents the most significant bit of the last byte received.  If
 * the hash index, hi[n], points to a bit that is set in the hash
 * register then the frame will be matched according to whether the
 * frame is multicast or unicast.  A multicast match will be signalled
 * if the multicast hash enable bit is set, da[0] is 1 and the hash
 * index points to a bit set in the hash register.  A unicast match
 * will be signalled if the unicast hash enable bit is set, da[0] is 0
 * and the hash index points to a bit set in the hash register.  To
 * receive all multicast frames, the hash register should be set with
 * all ones and the multicast hash enable bit should be set in the
 * network configuration register.
 */

static inline int hash_bit_value(int bitnr, __u8 *addr)
{
	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
		return 1;
	return 0;
}

2114
/* Return the hash index value for the specified address. */
P
Patrice Vilchez 已提交
2115 2116 2117 2118 2119 2120 2121
static int hash_get_index(__u8 *addr)
{
	int i, j, bitval;
	int hash_index = 0;

	for (j = 0; j < 6; j++) {
		for (i = 0, bitval = 0; i < 8; i++)
2122
			bitval ^= hash_bit_value(i * 6 + j, addr);
P
Patrice Vilchez 已提交
2123 2124 2125 2126 2127 2128 2129

		hash_index |= (bitval << j);
	}

	return hash_index;
}

2130
/* Add multicast addresses to the internal multicast-hash table. */
P
Patrice Vilchez 已提交
2131 2132
static void macb_sethashtable(struct net_device *dev)
{
2133
	struct netdev_hw_addr *ha;
P
Patrice Vilchez 已提交
2134
	unsigned long mc_filter[2];
2135
	unsigned int bitnr;
P
Patrice Vilchez 已提交
2136 2137
	struct macb *bp = netdev_priv(dev);

2138 2139
	mc_filter[0] = 0;
	mc_filter[1] = 0;
P
Patrice Vilchez 已提交
2140

2141 2142
	netdev_for_each_mc_addr(ha, dev) {
		bitnr = hash_get_index(ha->addr);
P
Patrice Vilchez 已提交
2143 2144 2145
		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
	}

J
Jamie Iles 已提交
2146 2147
	macb_or_gem_writel(bp, HRB, mc_filter[0]);
	macb_or_gem_writel(bp, HRT, mc_filter[1]);
P
Patrice Vilchez 已提交
2148 2149
}

2150
/* Enable/Disable promiscuous and multicast modes. */
2151
static void macb_set_rx_mode(struct net_device *dev)
P
Patrice Vilchez 已提交
2152 2153 2154 2155 2156 2157
{
	unsigned long cfg;
	struct macb *bp = netdev_priv(dev);

	cfg = macb_readl(bp, NCFGR);

2158
	if (dev->flags & IFF_PROMISC) {
P
Patrice Vilchez 已提交
2159 2160
		/* Enable promiscuous mode */
		cfg |= MACB_BIT(CAF);
2161 2162 2163 2164 2165 2166

		/* Disable RX checksum offload */
		if (macb_is_gem(bp))
			cfg &= ~GEM_BIT(RXCOEN);
	} else {
		/* Disable promiscuous mode */
P
Patrice Vilchez 已提交
2167 2168
		cfg &= ~MACB_BIT(CAF);

2169 2170 2171 2172 2173
		/* Enable RX checksum offload only if requested */
		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
			cfg |= GEM_BIT(RXCOEN);
	}

P
Patrice Vilchez 已提交
2174 2175
	if (dev->flags & IFF_ALLMULTI) {
		/* Enable all multicast mode */
J
Jamie Iles 已提交
2176 2177
		macb_or_gem_writel(bp, HRB, -1);
		macb_or_gem_writel(bp, HRT, -1);
P
Patrice Vilchez 已提交
2178
		cfg |= MACB_BIT(NCFGR_MTI);
2179
	} else if (!netdev_mc_empty(dev)) {
P
Patrice Vilchez 已提交
2180 2181 2182 2183 2184
		/* Enable specific multicasts */
		macb_sethashtable(dev);
		cfg |= MACB_BIT(NCFGR_MTI);
	} else if (dev->flags & (~IFF_ALLMULTI)) {
		/* Disable all multicast mode */
J
Jamie Iles 已提交
2185 2186
		macb_or_gem_writel(bp, HRB, 0);
		macb_or_gem_writel(bp, HRT, 0);
P
Patrice Vilchez 已提交
2187 2188 2189 2190 2191 2192
		cfg &= ~MACB_BIT(NCFGR_MTI);
	}

	macb_writel(bp, NCFGR, cfg);
}

2193 2194 2195
static int macb_open(struct net_device *dev)
{
	struct macb *bp = netdev_priv(dev);
N
Nicolas Ferre 已提交
2196
	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2197 2198
	int err;

2199
	netdev_dbg(bp->dev, "open\n");
2200

2201 2202 2203
	/* carrier starts down */
	netif_carrier_off(dev);

F
frederic RODO 已提交
2204
	/* if the phy is not yet register, retry later*/
2205
	if (!dev->phydev)
F
frederic RODO 已提交
2206
		return -EAGAIN;
2207 2208

	/* RX buffers initialization */
N
Nicolas Ferre 已提交
2209
	macb_init_rx_buffer_size(bp, bufsz);
F
frederic RODO 已提交
2210

2211 2212
	err = macb_alloc_consistent(bp);
	if (err) {
2213 2214
		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
			   err);
2215 2216 2217
		return err;
	}

2218 2219
	napi_enable(&bp->napi);

N
Nicolas Ferre 已提交
2220
	bp->macbgem_ops.mog_init_rings(bp);
2221 2222
	macb_init_hw(bp);

F
frederic RODO 已提交
2223
	/* schedule a link state check */
2224
	phy_start(dev->phydev);
2225

2226
	netif_tx_start_all_queues(dev);
2227

2228 2229 2230
	if (bp->ptp_info)
		bp->ptp_info->ptp_init(dev);

2231 2232 2233 2234 2235 2236 2237 2238
	return 0;
}

static int macb_close(struct net_device *dev)
{
	struct macb *bp = netdev_priv(dev);
	unsigned long flags;

2239
	netif_tx_stop_all_queues(dev);
2240
	napi_disable(&bp->napi);
2241

2242 2243
	if (dev->phydev)
		phy_stop(dev->phydev);
F
frederic RODO 已提交
2244

2245 2246 2247 2248 2249 2250 2251
	spin_lock_irqsave(&bp->lock, flags);
	macb_reset_hw(bp);
	netif_carrier_off(dev);
	spin_unlock_irqrestore(&bp->lock, flags);

	macb_free_consistent(bp);

2252 2253 2254
	if (bp->ptp_info)
		bp->ptp_info->ptp_remove(dev);

2255 2256 2257
	return 0;
}

2258 2259 2260 2261 2262 2263 2264 2265 2266 2267
static int macb_change_mtu(struct net_device *dev, int new_mtu)
{
	if (netif_running(dev))
		return -EBUSY;

	dev->mtu = new_mtu;

	return 0;
}

2268 2269
static void gem_update_stats(struct macb *bp)
{
2270
	unsigned int i;
2271 2272
	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;

2273 2274
	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
		u32 offset = gem_statistics[i].offset;
2275
		u64 val = bp->macb_reg_readl(bp, offset);
2276 2277 2278 2279 2280 2281

		bp->ethtool_stats[i] += val;
		*p += val;

		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
			/* Add GEM_OCTTXH, GEM_OCTRXH */
2282
			val = bp->macb_reg_readl(bp, offset + 4);
2283
			bp->ethtool_stats[i] += ((u64)val) << 32;
2284 2285 2286
			*(++p) += val;
		}
	}
2287 2288 2289 2290 2291
}

static struct net_device_stats *gem_get_stats(struct macb *bp)
{
	struct gem_stats *hwstat = &bp->hw_stats.gem;
2292
	struct net_device_stats *nstat = &bp->dev->stats;
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326

	gem_update_stats(bp);

	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
			    hwstat->rx_alignment_errors +
			    hwstat->rx_resource_errors +
			    hwstat->rx_overruns +
			    hwstat->rx_oversize_frames +
			    hwstat->rx_jabbers +
			    hwstat->rx_undersized_frames +
			    hwstat->rx_length_field_frame_errors);
	nstat->tx_errors = (hwstat->tx_late_collisions +
			    hwstat->tx_excessive_collisions +
			    hwstat->tx_underrun +
			    hwstat->tx_carrier_sense_errors);
	nstat->multicast = hwstat->rx_multicast_frames;
	nstat->collisions = (hwstat->tx_single_collision_frames +
			     hwstat->tx_multiple_collision_frames +
			     hwstat->tx_excessive_collisions);
	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
				   hwstat->rx_jabbers +
				   hwstat->rx_undersized_frames +
				   hwstat->rx_length_field_frame_errors);
	nstat->rx_over_errors = hwstat->rx_resource_errors;
	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
	nstat->rx_fifo_errors = hwstat->rx_overruns;
	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
	nstat->tx_fifo_errors = hwstat->tx_underrun;

	return nstat;
}

2327 2328 2329 2330 2331 2332 2333
static void gem_get_ethtool_stats(struct net_device *dev,
				  struct ethtool_stats *stats, u64 *data)
{
	struct macb *bp;

	bp = netdev_priv(dev);
	gem_update_stats(bp);
2334
	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
}

static int gem_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return GEM_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
}

static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
2349
	unsigned int i;
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359

	switch (sset) {
	case ETH_SS_STATS:
		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
			memcpy(p, gem_statistics[i].stat_string,
			       ETH_GSTRING_LEN);
		break;
	}
}

2360
static struct net_device_stats *macb_get_stats(struct net_device *dev)
2361 2362
{
	struct macb *bp = netdev_priv(dev);
2363
	struct net_device_stats *nstat = &bp->dev->stats;
2364 2365 2366 2367
	struct macb_stats *hwstat = &bp->hw_stats.macb;

	if (macb_is_gem(bp))
		return gem_get_stats(bp);
2368

F
frederic RODO 已提交
2369 2370 2371
	/* read stats from hardware */
	macb_update_stats(bp);

2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383
	/* Convert HW stats into netdevice stats */
	nstat->rx_errors = (hwstat->rx_fcs_errors +
			    hwstat->rx_align_errors +
			    hwstat->rx_resource_errors +
			    hwstat->rx_overruns +
			    hwstat->rx_oversize_pkts +
			    hwstat->rx_jabbers +
			    hwstat->rx_undersize_pkts +
			    hwstat->rx_length_mismatch);
	nstat->tx_errors = (hwstat->tx_late_cols +
			    hwstat->tx_excessive_cols +
			    hwstat->tx_underruns +
2384 2385
			    hwstat->tx_carrier_errors +
			    hwstat->sqe_test_errors);
2386 2387 2388 2389 2390 2391 2392
	nstat->collisions = (hwstat->tx_single_cols +
			     hwstat->tx_multiple_cols +
			     hwstat->tx_excessive_cols);
	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
				   hwstat->rx_jabbers +
				   hwstat->rx_undersize_pkts +
				   hwstat->rx_length_mismatch);
A
Alexander Stein 已提交
2393 2394
	nstat->rx_over_errors = hwstat->rx_resource_errors +
				   hwstat->rx_overruns;
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
	nstat->rx_frame_errors = hwstat->rx_align_errors;
	nstat->rx_fifo_errors = hwstat->rx_overruns;
	/* XXX: What does "missed" mean? */
	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
	nstat->tx_fifo_errors = hwstat->tx_underruns;
	/* Don't know about heartbeat or window errors... */

	return nstat;
}

2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
static int macb_get_regs_len(struct net_device *netdev)
{
	return MACB_GREGS_NBR * sizeof(u32);
}

static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
			  void *p)
{
	struct macb *bp = netdev_priv(dev);
	unsigned int tail, head;
	u32 *regs_buff = p;

	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
			| MACB_GREGS_VERSION;

2422 2423
	tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
	head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435

	regs_buff[0]  = macb_readl(bp, NCR);
	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
	regs_buff[2]  = macb_readl(bp, NSR);
	regs_buff[3]  = macb_readl(bp, TSR);
	regs_buff[4]  = macb_readl(bp, RBQP);
	regs_buff[5]  = macb_readl(bp, TBQP);
	regs_buff[6]  = macb_readl(bp, RSR);
	regs_buff[7]  = macb_readl(bp, IMR);

	regs_buff[8]  = tail;
	regs_buff[9]  = head;
2436 2437
	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2438

2439 2440
	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
		regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2441
	if (macb_is_gem(bp))
2442 2443 2444
		regs_buff[13] = gem_readl(bp, DMACFG);
}

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct macb *bp = netdev_priv(netdev);

	wol->supported = 0;
	wol->wolopts = 0;

	if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
		wol->supported = WAKE_MAGIC;

		if (bp->wol & MACB_WOL_ENABLED)
			wol->wolopts |= WAKE_MAGIC;
	}
}

static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct macb *bp = netdev_priv(netdev);

	if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
	    (wol->wolopts & ~WAKE_MAGIC))
		return -EOPNOTSUPP;

	if (wol->wolopts & WAKE_MAGIC)
		bp->wol |= MACB_WOL_ENABLED;
	else
		bp->wol &= ~MACB_WOL_ENABLED;

	device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);

	return 0;
}

2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
static void macb_get_ringparam(struct net_device *netdev,
			       struct ethtool_ringparam *ring)
{
	struct macb *bp = netdev_priv(netdev);

	ring->rx_max_pending = MAX_RX_RING_SIZE;
	ring->tx_max_pending = MAX_TX_RING_SIZE;

	ring->rx_pending = bp->rx_ring_size;
	ring->tx_pending = bp->tx_ring_size;
}

static int macb_set_ringparam(struct net_device *netdev,
			      struct ethtool_ringparam *ring)
{
	struct macb *bp = netdev_priv(netdev);
	u32 new_rx_size, new_tx_size;
	unsigned int reset = 0;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_size = clamp_t(u32, ring->rx_pending,
			      MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
	new_rx_size = roundup_pow_of_two(new_rx_size);

	new_tx_size = clamp_t(u32, ring->tx_pending,
			      MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
	new_tx_size = roundup_pow_of_two(new_tx_size);

	if ((new_tx_size == bp->tx_ring_size) &&
	    (new_rx_size == bp->rx_ring_size)) {
		/* nothing to do */
		return 0;
	}

	if (netif_running(bp->dev)) {
		reset = 1;
		macb_close(bp->dev);
	}

	bp->rx_ring_size = new_rx_size;
	bp->tx_ring_size = new_tx_size;

	if (reset)
		macb_open(bp->dev);

	return 0;
}

2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
#ifdef CONFIG_MACB_USE_HWSTAMP
static unsigned int gem_get_tsu_rate(struct macb *bp)
{
	struct clk *tsu_clk;
	unsigned int tsu_rate;

	tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
	if (!IS_ERR(tsu_clk))
		tsu_rate = clk_get_rate(tsu_clk);
	/* try pclk instead */
	else if (!IS_ERR(bp->pclk)) {
		tsu_clk = bp->pclk;
		tsu_rate = clk_get_rate(tsu_clk);
	} else
		return -ENOTSUPP;
	return tsu_rate;
}

static s32 gem_get_ptp_max_adj(void)
{
	return 64000000;
}

static int gem_get_ts_info(struct net_device *dev,
			   struct ethtool_ts_info *info)
{
	struct macb *bp = netdev_priv(dev);

	if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
		ethtool_op_get_ts_info(dev, info);
		return 0;
	}

	info->so_timestamping =
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE |
		SOF_TIMESTAMPING_TX_HARDWARE |
		SOF_TIMESTAMPING_RX_HARDWARE |
		SOF_TIMESTAMPING_RAW_HARDWARE;
	info->tx_types =
		(1 << HWTSTAMP_TX_ONESTEP_SYNC) |
		(1 << HWTSTAMP_TX_OFF) |
		(1 << HWTSTAMP_TX_ON);
	info->rx_filters =
		(1 << HWTSTAMP_FILTER_NONE) |
		(1 << HWTSTAMP_FILTER_ALL);

	info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;

	return 0;
}

static struct macb_ptp_info gem_ptp_info = {
	.ptp_init	 = gem_ptp_init,
	.ptp_remove	 = gem_ptp_remove,
	.get_ptp_max_adj = gem_get_ptp_max_adj,
	.get_tsu_rate	 = gem_get_tsu_rate,
	.get_ts_info	 = gem_get_ts_info,
	.get_hwtst	 = gem_get_hwtst,
	.set_hwtst	 = gem_set_hwtst,
};
#endif

2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
static int macb_get_ts_info(struct net_device *netdev,
			    struct ethtool_ts_info *info)
{
	struct macb *bp = netdev_priv(netdev);

	if (bp->ptp_info)
		return bp->ptp_info->get_ts_info(netdev, info);

	return ethtool_op_get_ts_info(netdev, info);
}

2603
static const struct ethtool_ops macb_ethtool_ops = {
2604 2605
	.get_regs_len		= macb_get_regs_len,
	.get_regs		= macb_get_regs,
2606
	.get_link		= ethtool_op_get_link,
2607
	.get_ts_info		= ethtool_op_get_ts_info,
2608 2609
	.get_wol		= macb_get_wol,
	.set_wol		= macb_set_wol,
2610 2611
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
2612 2613
	.get_ringparam		= macb_get_ringparam,
	.set_ringparam		= macb_set_ringparam,
2614 2615
};

L
Lad, Prabhakar 已提交
2616
static const struct ethtool_ops gem_ethtool_ops = {
2617 2618 2619
	.get_regs_len		= macb_get_regs_len,
	.get_regs		= macb_get_regs,
	.get_link		= ethtool_op_get_link,
2620
	.get_ts_info		= macb_get_ts_info,
2621 2622 2623
	.get_ethtool_stats	= gem_get_ethtool_stats,
	.get_strings		= gem_get_ethtool_strings,
	.get_sset_count		= gem_get_sset_count,
2624 2625
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
2626 2627
	.get_ringparam		= macb_get_ringparam,
	.set_ringparam		= macb_set_ringparam,
2628 2629
};

2630
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2631
{
2632
	struct phy_device *phydev = dev->phydev;
2633
	struct macb *bp = netdev_priv(dev);
2634 2635 2636 2637

	if (!netif_running(dev))
		return -EINVAL;

F
frederic RODO 已提交
2638 2639
	if (!phydev)
		return -ENODEV;
2640

2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
	if (!bp->ptp_info)
		return phy_mii_ioctl(phydev, rq, cmd);

	switch (cmd) {
	case SIOCSHWTSTAMP:
		return bp->ptp_info->set_hwtst(dev, rq, cmd);
	case SIOCGHWTSTAMP:
		return bp->ptp_info->get_hwtst(dev, rq);
	default:
		return phy_mii_ioctl(phydev, rq, cmd);
	}
2652 2653
}

2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
static int macb_set_features(struct net_device *netdev,
			     netdev_features_t features)
{
	struct macb *bp = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;

	/* TX checksum offload */
	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
		u32 dmacfg;

		dmacfg = gem_readl(bp, DMACFG);
		if (features & NETIF_F_HW_CSUM)
			dmacfg |= GEM_BIT(TXCOEN);
		else
			dmacfg &= ~GEM_BIT(TXCOEN);
		gem_writel(bp, DMACFG, dmacfg);
	}

2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	/* RX checksum offload */
	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
		u32 netcfg;

		netcfg = gem_readl(bp, NCFGR);
		if (features & NETIF_F_RXCSUM &&
		    !(netdev->flags & IFF_PROMISC))
			netcfg |= GEM_BIT(RXCOEN);
		else
			netcfg &= ~GEM_BIT(RXCOEN);
		gem_writel(bp, NCFGR, netcfg);
	}

2685 2686 2687
	return 0;
}

2688 2689 2690 2691
static const struct net_device_ops macb_netdev_ops = {
	.ndo_open		= macb_open,
	.ndo_stop		= macb_close,
	.ndo_start_xmit		= macb_start_xmit,
2692
	.ndo_set_rx_mode	= macb_set_rx_mode,
2693 2694 2695
	.ndo_get_stats		= macb_get_stats,
	.ndo_do_ioctl		= macb_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
2696
	.ndo_change_mtu		= macb_change_mtu,
2697
	.ndo_set_mac_address	= eth_mac_addr,
2698 2699 2700
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= macb_poll_controller,
#endif
2701
	.ndo_set_features	= macb_set_features,
R
Rafal Ozieblo 已提交
2702
	.ndo_features_check	= macb_features_check,
2703 2704
};

2705
/* Configure peripheral capabilities according to device tree
2706 2707
 * and integration options used
 */
2708 2709
static void macb_configure_caps(struct macb *bp,
				const struct macb_config *dt_conf)
2710 2711 2712
{
	u32 dcfg;

2713 2714 2715
	if (dt_conf)
		bp->caps = dt_conf->caps;

2716
	if (hw_is_gem(bp->regs, bp->native_io)) {
2717 2718 2719 2720 2721 2722 2723 2724
		bp->caps |= MACB_CAPS_MACB_IS_GEM;

		dcfg = gem_readl(bp, DCFG1);
		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
		dcfg = gem_readl(bp, DCFG2);
		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
			bp->caps |= MACB_CAPS_FIFO_MODE;
2725 2726
#ifdef CONFIG_MACB_USE_HWSTAMP
		if (gem_has_ptp(bp)) {
2727 2728
			if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
				pr_err("GEM doesn't support hardware ptp.\n");
2729
			else {
2730
				bp->hw_dma_cap |= HW_DMA_CAP_PTP;
2731 2732
				bp->ptp_info = &gem_ptp_info;
			}
2733
		}
2734
#endif
2735 2736
	}

2737
	dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2738 2739
}

2740
static void macb_probe_queues(void __iomem *mem,
2741
			      bool native_io,
2742 2743 2744 2745 2746 2747 2748 2749
			      unsigned int *queue_mask,
			      unsigned int *num_queues)
{
	unsigned int hw_q;

	*queue_mask = 0x1;
	*num_queues = 1;

2750 2751 2752 2753 2754 2755
	/* is it macb or gem ?
	 *
	 * We need to read directly from the hardware here because
	 * we are early in the probe process and don't have the
	 * MACB_CAPS_MACB_IS_GEM flag positioned
	 */
2756
	if (!hw_is_gem(mem, native_io))
2757 2758 2759
		return;

	/* bit 0 is never set but queue 0 always exists */
2760 2761
	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;

2762 2763 2764 2765 2766 2767 2768
	*queue_mask |= 0x1;

	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
		if (*queue_mask & (1 << hw_q))
			(*num_queues)++;
}

2769
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2770 2771
			 struct clk **hclk, struct clk **tx_clk,
			 struct clk **rx_clk)
2772
{
2773
	struct macb_platform_data *pdata;
2774
	int err;
2775

2776 2777 2778 2779 2780 2781 2782 2783 2784
	pdata = dev_get_platdata(&pdev->dev);
	if (pdata) {
		*pclk = pdata->pclk;
		*hclk = pdata->hclk;
	} else {
		*pclk = devm_clk_get(&pdev->dev, "pclk");
		*hclk = devm_clk_get(&pdev->dev, "hclk");
	}

2785 2786
	if (IS_ERR(*pclk)) {
		err = PTR_ERR(*pclk);
2787
		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2788
		return err;
A
Andrew Victor 已提交
2789
	}
J
Jamie Iles 已提交
2790

2791 2792
	if (IS_ERR(*hclk)) {
		err = PTR_ERR(*hclk);
2793
		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2794
		return err;
2795 2796
	}

2797 2798 2799
	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
	if (IS_ERR(*tx_clk))
		*tx_clk = NULL;
2800

2801 2802 2803 2804
	*rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
	if (IS_ERR(*rx_clk))
		*rx_clk = NULL;

2805
	err = clk_prepare_enable(*pclk);
2806 2807
	if (err) {
		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2808
		return err;
2809 2810
	}

2811
	err = clk_prepare_enable(*hclk);
2812 2813
	if (err) {
		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2814
		goto err_disable_pclk;
2815 2816
	}

2817
	err = clk_prepare_enable(*tx_clk);
2818 2819
	if (err) {
		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2820
		goto err_disable_hclk;
2821 2822
	}

2823 2824 2825 2826 2827 2828
	err = clk_prepare_enable(*rx_clk);
	if (err) {
		dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
		goto err_disable_txclk;
	}

2829 2830
	return 0;

2831 2832 2833
err_disable_txclk:
	clk_disable_unprepare(*tx_clk);

2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
err_disable_hclk:
	clk_disable_unprepare(*hclk);

err_disable_pclk:
	clk_disable_unprepare(*pclk);

	return err;
}

static int macb_init(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	unsigned int hw_q, q;
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
	int err;
	u32 val;

2852 2853 2854
	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;

2855 2856 2857 2858
	/* set the queue register mapping once for all: queue0 has a special
	 * register mapping but we don't want to test the queue index then
	 * compute the corresponding register offset at run time.
	 */
2859
	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2860
		if (!(bp->queue_mask & (1 << hw_q)))
2861 2862
			continue;

2863
		queue = &bp->queues[q];
2864 2865 2866 2867 2868 2869 2870
		queue->bp = bp;
		if (hw_q) {
			queue->ISR  = GEM_ISR(hw_q - 1);
			queue->IER  = GEM_IER(hw_q - 1);
			queue->IDR  = GEM_IDR(hw_q - 1);
			queue->IMR  = GEM_IMR(hw_q - 1);
			queue->TBQP = GEM_TBQP(hw_q - 1);
2871
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2872
			if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2873
				queue->TBQPH = GEM_TBQPH(hw_q - 1);
2874
#endif
2875 2876 2877 2878 2879 2880 2881
		} else {
			/* queue0 uses legacy registers */
			queue->ISR  = MACB_ISR;
			queue->IER  = MACB_IER;
			queue->IDR  = MACB_IDR;
			queue->IMR  = MACB_IMR;
			queue->TBQP = MACB_TBQP;
2882
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2883
			if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2884
				queue->TBQPH = MACB_TBQPH;
2885
#endif
2886 2887 2888 2889 2890 2891 2892
		}

		/* get irq: here we use the linux queue index, not the hardware
		 * queue index. the queue irq definitions in the device tree
		 * must remove the optional gaps that could exist in the
		 * hardware queue mask.
		 */
2893
		queue->irq = platform_get_irq(pdev, q);
2894
		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2895
				       IRQF_SHARED, dev->name, queue);
2896 2897 2898 2899
		if (err) {
			dev_err(&pdev->dev,
				"Unable to request IRQ %d (error %d)\n",
				queue->irq, err);
2900
			return err;
2901 2902 2903
		}

		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2904
		q++;
2905 2906
	}

2907
	dev->netdev_ops = &macb_netdev_ops;
2908
	netif_napi_add(dev, &bp->napi, macb_poll, 64);
2909

N
Nicolas Ferre 已提交
2910 2911
	/* setup appropriated routines according to adapter type */
	if (macb_is_gem(bp)) {
2912
		bp->max_tx_length = GEM_MAX_TX_LEN;
N
Nicolas Ferre 已提交
2913 2914 2915 2916
		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
		bp->macbgem_ops.mog_init_rings = gem_init_rings;
		bp->macbgem_ops.mog_rx = gem_rx;
2917
		dev->ethtool_ops = &gem_ethtool_ops;
N
Nicolas Ferre 已提交
2918
	} else {
2919
		bp->max_tx_length = MACB_MAX_TX_LEN;
N
Nicolas Ferre 已提交
2920 2921 2922 2923
		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
		bp->macbgem_ops.mog_init_rings = macb_init_rings;
		bp->macbgem_ops.mog_rx = macb_rx;
2924
		dev->ethtool_ops = &macb_ethtool_ops;
N
Nicolas Ferre 已提交
2925 2926
	}

2927 2928
	/* Set features */
	dev->hw_features = NETIF_F_SG;
R
Rafal Ozieblo 已提交
2929 2930 2931 2932 2933

	/* Check LSO capability */
	if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
		dev->hw_features |= MACB_NETIF_LSO;

2934 2935
	/* Checksum offload is only available on gem with packet buffer */
	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2936
		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2937 2938 2939 2940
	if (bp->caps & MACB_CAPS_SG_DISABLED)
		dev->hw_features &= ~NETIF_F_SG;
	dev->features = dev->hw_features;

2941 2942 2943 2944 2945
	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
		val = 0;
		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
			val = GEM_BIT(RGMII);
		else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2946
			 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2947
			val = MACB_BIT(RMII);
2948
		else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2949
			val = MACB_BIT(MII);
2950

2951 2952
		if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
			val |= MACB_BIT(CLKEN);
2953

2954 2955
		macb_or_gem_writel(bp, USRIO, val);
	}
2956

2957
	/* Set MII management clock divider */
2958 2959
	val = macb_mdc_clk_div(bp);
	val |= macb_dbw(bp);
2960 2961
	if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
		val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
	macb_writel(bp, NCFGR, val);

	return 0;
}

#if defined(CONFIG_OF)
/* 1518 rounded up */
#define AT91ETHER_MAX_RBUFF_SZ	0x600
/* max number of receive buffers */
#define AT91ETHER_MAX_RX_DESCR	9

/* Initialize and start the Receiver and Transmit subsystems */
static int at91ether_start(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
2977
	struct macb_dma_desc *desc;
2978 2979 2980 2981 2982 2983
	dma_addr_t addr;
	u32 ctl;
	int i;

	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
					 (AT91ETHER_MAX_RX_DESCR *
2984
					  macb_dma_desc_get_size(lp)),
2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
					 &lp->rx_ring_dma, GFP_KERNEL);
	if (!lp->rx_ring)
		return -ENOMEM;

	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
					    AT91ETHER_MAX_RX_DESCR *
					    AT91ETHER_MAX_RBUFF_SZ,
					    &lp->rx_buffers_dma, GFP_KERNEL);
	if (!lp->rx_buffers) {
		dma_free_coherent(&lp->pdev->dev,
				  AT91ETHER_MAX_RX_DESCR *
2996
				  macb_dma_desc_get_size(lp),
2997 2998 2999 3000 3001 3002 3003
				  lp->rx_ring, lp->rx_ring_dma);
		lp->rx_ring = NULL;
		return -ENOMEM;
	}

	addr = lp->rx_buffers_dma;
	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3004 3005 3006
		desc = macb_rx_desc(lp, i);
		macb_set_addr(lp, desc, addr);
		desc->ctrl = 0;
3007 3008 3009 3010
		addr += AT91ETHER_MAX_RBUFF_SZ;
	}

	/* Set the Wrap bit on the last descriptor */
3011
	desc->addr |= MACB_BIT(RX_WRAP);
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052

	/* Reset buffer index */
	lp->rx_tail = 0;

	/* Program address of descriptor list in Rx Buffer Queue register */
	macb_writel(lp, RBQP, lp->rx_ring_dma);

	/* Enable Receive and Transmit */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));

	return 0;
}

/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	u32 ctl;
	int ret;

	/* Clear internal statistics */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));

	macb_set_hwaddr(lp);

	ret = at91ether_start(dev);
	if (ret)
		return ret;

	/* Enable MAC interrupts */
	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
			     MACB_BIT(RXUBR)	|
			     MACB_BIT(ISR_TUND)	|
			     MACB_BIT(ISR_RLE)	|
			     MACB_BIT(TCOMP)	|
			     MACB_BIT(ISR_ROVR)	|
			     MACB_BIT(HRESP));

	/* schedule a link state check */
3053
	phy_start(dev->phydev);
3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082

	netif_start_queue(dev);

	return 0;
}

/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	u32 ctl;

	/* Disable Receiver and Transmitter */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));

	/* Disable MAC interrupts */
	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
			     MACB_BIT(RXUBR)	|
			     MACB_BIT(ISR_TUND)	|
			     MACB_BIT(ISR_RLE)	|
			     MACB_BIT(TCOMP)	|
			     MACB_BIT(ISR_ROVR) |
			     MACB_BIT(HRESP));

	netif_stop_queue(dev);

	dma_free_coherent(&lp->pdev->dev,
			  AT91ETHER_MAX_RX_DESCR *
3083
			  macb_dma_desc_get_size(lp),
3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107
			  lp->rx_ring, lp->rx_ring_dma);
	lp->rx_ring = NULL;

	dma_free_coherent(&lp->pdev->dev,
			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
			  lp->rx_buffers, lp->rx_buffers_dma);
	lp->rx_buffers = NULL;

	return 0;
}

/* Transmit packet */
static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);

	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
		netif_stop_queue(dev);

		/* Store packet information (to free when Tx completed) */
		lp->skb = skb;
		lp->skb_length = skb->len;
		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
							DMA_TO_DEVICE);
3108 3109 3110 3111 3112 3113
		if (dma_mapping_error(NULL, lp->skb_physaddr)) {
			dev_kfree_skb_any(skb);
			dev->stats.tx_dropped++;
			netdev_err(dev, "%s: DMA mapping error\n", __func__);
			return NETDEV_TX_OK;
		}
3114 3115 3116 3117 3118

		/* Set address of the data in the Transmit Address register */
		macb_writel(lp, TAR, lp->skb_physaddr);
		/* Set length of the packet in the Transmit Control register */
		macb_writel(lp, TCR, skb->len);
3119

3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133
	} else {
		netdev_err(dev, "%s called, but device is busy!\n", __func__);
		return NETDEV_TX_BUSY;
	}

	return NETDEV_TX_OK;
}

/* Extract received frame from buffer descriptors and sent to upper layers.
 * (Called from interrupt context)
 */
static void at91ether_rx(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
3134
	struct macb_dma_desc *desc;
3135 3136 3137 3138
	unsigned char *p_recv;
	struct sk_buff *skb;
	unsigned int pktlen;

3139 3140
	desc = macb_rx_desc(lp, lp->rx_tail);
	while (desc->addr & MACB_BIT(RX_USED)) {
3141
		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3142
		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3143 3144 3145
		skb = netdev_alloc_skb(dev, pktlen + 2);
		if (skb) {
			skb_reserve(skb, 2);
3146
			skb_put_data(skb, p_recv, pktlen);
3147 3148

			skb->protocol = eth_type_trans(skb, dev);
3149 3150
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pktlen;
3151 3152
			netif_rx(skb);
		} else {
3153
			dev->stats.rx_dropped++;
3154 3155
		}

3156
		if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3157
			dev->stats.multicast++;
3158 3159

		/* reset ownership bit */
3160
		desc->addr &= ~MACB_BIT(RX_USED);
3161 3162 3163 3164 3165 3166

		/* wrap after last buffer */
		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
			lp->rx_tail = 0;
		else
			lp->rx_tail++;
3167 3168

		desc = macb_rx_desc(lp, lp->rx_tail);
3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191
	}
}

/* MAC interrupt handler */
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct macb *lp = netdev_priv(dev);
	u32 intstatus, ctl;

	/* MAC Interrupt Status register indicates what interrupts are pending.
	 * It is automatically cleared once read.
	 */
	intstatus = macb_readl(lp, ISR);

	/* Receive complete */
	if (intstatus & MACB_BIT(RCOMP))
		at91ether_rx(dev);

	/* Transmit complete */
	if (intstatus & MACB_BIT(TCOMP)) {
		/* The TCOM bit is set even if the transmission failed */
		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3192
			dev->stats.tx_errors++;
3193 3194 3195 3196 3197 3198

		if (lp->skb) {
			dev_kfree_skb_irq(lp->skb);
			lp->skb = NULL;
			dma_unmap_single(NULL, lp->skb_physaddr,
					 lp->skb_length, DMA_TO_DEVICE);
3199 3200
			dev->stats.tx_packets++;
			dev->stats.tx_bytes += lp->skb_length;
3201 3202 3203 3204 3205 3206 3207 3208
		}
		netif_wake_queue(dev);
	}

	/* Work-around for EMAC Errata section 41.3.1 */
	if (intstatus & MACB_BIT(RXUBR)) {
		ctl = macb_readl(lp, NCR);
		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3209
		wmb();
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
	}

	if (intstatus & MACB_BIT(ISR_ROVR))
		netdev_err(dev, "ROVR error\n");

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void at91ether_poll_controller(struct net_device *dev)
{
	unsigned long flags;

	local_irq_save(flags);
	at91ether_interrupt(dev->irq, dev);
	local_irq_restore(flags);
}
#endif

static const struct net_device_ops at91ether_netdev_ops = {
	.ndo_open		= at91ether_open,
	.ndo_stop		= at91ether_close,
	.ndo_start_xmit		= at91ether_start_xmit,
	.ndo_get_stats		= macb_get_stats,
	.ndo_set_rx_mode	= macb_set_rx_mode,
	.ndo_set_mac_address	= eth_mac_addr,
	.ndo_do_ioctl		= macb_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= at91ether_poll_controller,
#endif
};

3244
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3245 3246
			      struct clk **hclk, struct clk **tx_clk,
			      struct clk **rx_clk)
3247 3248 3249
{
	int err;

3250 3251
	*hclk = NULL;
	*tx_clk = NULL;
3252
	*rx_clk = NULL;
3253 3254 3255 3256

	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
	if (IS_ERR(*pclk))
		return PTR_ERR(*pclk);
3257

3258
	err = clk_prepare_enable(*pclk);
3259 3260 3261 3262 3263
	if (err) {
		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
		return err;
	}

3264 3265 3266 3267 3268 3269 3270 3271 3272 3273
	return 0;
}

static int at91ether_init(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(dev);
	int err;
	u32 reg;

3274 3275 3276 3277 3278 3279
	dev->netdev_ops = &at91ether_netdev_ops;
	dev->ethtool_ops = &macb_ethtool_ops;

	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
			       0, dev->name, dev);
	if (err)
3280
		return err;
3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292

	macb_writel(bp, NCR, 0);

	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
		reg |= MACB_BIT(RM9200_RMII);

	macb_writel(bp, NCFGR, reg);

	return 0;
}

3293
static const struct macb_config at91sam9260_config = {
3294
	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3295
	.clk_init = macb_clk_init,
3296 3297 3298
	.init = macb_init,
};

3299
static const struct macb_config pc302gem_config = {
3300 3301
	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
	.dma_burst_length = 16,
3302
	.clk_init = macb_clk_init,
3303 3304 3305
	.init = macb_init,
};

3306
static const struct macb_config sama5d2_config = {
3307
	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3308 3309 3310 3311 3312
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
};

3313
static const struct macb_config sama5d3_config = {
3314
	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3315
	      | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
3316
	.dma_burst_length = 16,
3317
	.clk_init = macb_clk_init,
3318
	.init = macb_init,
3319
	.jumbo_max_len = 10240,
3320 3321
};

3322
static const struct macb_config sama5d4_config = {
3323
	.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3324
	.dma_burst_length = 4,
3325
	.clk_init = macb_clk_init,
3326 3327 3328
	.init = macb_init,
};

3329
static const struct macb_config emac_config = {
3330
	.clk_init = at91ether_clk_init,
3331 3332 3333
	.init = at91ether_init,
};

3334 3335 3336 3337 3338
static const struct macb_config np4_config = {
	.caps = MACB_CAPS_USRIO_DISABLED,
	.clk_init = macb_clk_init,
	.init = macb_init,
};
3339

3340
static const struct macb_config zynqmp_config = {
3341 3342 3343
	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
			MACB_CAPS_JUMBO |
			MACB_CAPS_GEM_HAS_PTP,
3344 3345 3346
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
3347
	.jumbo_max_len = 10240,
3348 3349
};

3350
static const struct macb_config zynq_config = {
3351
	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3352 3353 3354 3355 3356
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
};

3357 3358 3359 3360
static const struct of_device_id macb_dt_ids[] = {
	{ .compatible = "cdns,at32ap7000-macb" },
	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
	{ .compatible = "cdns,macb" },
3361
	{ .compatible = "cdns,np4-macb", .data = &np4_config },
3362 3363
	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
	{ .compatible = "cdns,gem", .data = &pc302gem_config },
3364
	{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3365 3366 3367 3368
	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
	{ .compatible = "cdns,emac", .data = &emac_config },
3369
	{ .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3370
	{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
3371 3372 3373 3374 3375
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */

3376
static const struct macb_config default_gem_config = {
3377 3378 3379
	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
			MACB_CAPS_JUMBO |
			MACB_CAPS_GEM_HAS_PTP,
3380 3381 3382 3383 3384 3385
	.dma_burst_length = 16,
	.clk_init = macb_clk_init,
	.init = macb_init,
	.jumbo_max_len = 10240,
};

3386 3387
static int macb_probe(struct platform_device *pdev)
{
3388
	const struct macb_config *macb_config = &default_gem_config;
3389
	int (*clk_init)(struct platform_device *, struct clk **,
3390
			struct clk **, struct clk **,  struct clk **)
3391 3392
					      = macb_config->clk_init;
	int (*init)(struct platform_device *) = macb_config->init;
3393
	struct device_node *np = pdev->dev.of_node;
3394
	struct device_node *phy_node;
3395
	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3396 3397
	unsigned int queue_mask, num_queues;
	struct macb_platform_data *pdata;
3398
	bool native_io;
3399 3400 3401 3402 3403 3404 3405 3406
	struct phy_device *phydev;
	struct net_device *dev;
	struct resource *regs;
	void __iomem *mem;
	const char *mac;
	struct macb *bp;
	int err;

3407 3408 3409 3410 3411
	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mem = devm_ioremap_resource(&pdev->dev, regs);
	if (IS_ERR(mem))
		return PTR_ERR(mem);

3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
	if (np) {
		const struct of_device_id *match;

		match = of_match_node(macb_dt_ids, np);
		if (match && match->data) {
			macb_config = match->data;
			clk_init = macb_config->clk_init;
			init = macb_config->init;
		}
	}

3423
	err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3424 3425 3426
	if (err)
		return err;

3427
	native_io = hw_is_native_io(mem);
3428

3429
	macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3430
	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3431 3432 3433 3434
	if (!dev) {
		err = -ENOMEM;
		goto err_disable_clocks;
	}
3435 3436 3437 3438 3439 3440 3441 3442 3443

	dev->base_addr = regs->start;

	SET_NETDEV_DEV(dev, &pdev->dev);

	bp = netdev_priv(dev);
	bp->pdev = pdev;
	bp->dev = dev;
	bp->regs = mem;
3444 3445
	bp->native_io = native_io;
	if (native_io) {
3446 3447
		bp->macb_reg_readl = hw_readl_native;
		bp->macb_reg_writel = hw_writel_native;
3448
	} else {
3449 3450
		bp->macb_reg_readl = hw_readl;
		bp->macb_reg_writel = hw_writel;
3451
	}
3452
	bp->num_queues = num_queues;
3453
	bp->queue_mask = queue_mask;
3454 3455 3456 3457 3458
	if (macb_config)
		bp->dma_burst_length = macb_config->dma_burst_length;
	bp->pclk = pclk;
	bp->hclk = hclk;
	bp->tx_clk = tx_clk;
3459
	bp->rx_clk = rx_clk;
3460
	if (macb_config)
3461 3462
		bp->jumbo_max_len = macb_config->jumbo_max_len;

3463
	bp->wol = 0;
3464
	if (of_get_property(np, "magic-packet", NULL))
3465 3466 3467
		bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
	device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);

3468 3469
	spin_lock_init(&bp->lock);

3470
	/* setup capabilities */
3471 3472
	macb_configure_caps(bp, macb_config);

3473 3474 3475 3476 3477 3478
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
		dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
		bp->hw_dma_cap |= HW_DMA_CAP_64B;
	}
#endif
3479 3480 3481
	platform_set_drvdata(pdev, dev);

	dev->irq = platform_get_irq(pdev, 0);
3482 3483
	if (dev->irq < 0) {
		err = dev->irq;
3484
		goto err_out_free_netdev;
3485
	}
3486

3487 3488 3489 3490 3491 3492 3493
	/* MTU range: 68 - 1500 or 10240 */
	dev->min_mtu = GEM_MTU_MIN_SIZE;
	if (bp->caps & MACB_CAPS_JUMBO)
		dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
	else
		dev->max_mtu = ETH_DATA_LEN;

3494
	mac = of_get_mac_address(np);
3495
	if (mac)
3496
		ether_addr_copy(bp->dev->dev_addr, mac);
3497
	else
3498 3499
		macb_get_hwaddr(bp);

3500
	/* Power up the PHY if there is a GPIO reset */
3501 3502 3503
	phy_node =  of_get_next_available_child(np, NULL);
	if (phy_node) {
		int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
3504

3505
		if (gpio_is_valid(gpio)) {
3506
			bp->reset_gpio = gpio_to_desc(gpio);
3507 3508
			gpiod_direction_output(bp->reset_gpio, 1);
		}
3509 3510
	}
	of_node_put(phy_node);
3511

3512
	err = of_get_phy_mode(np);
3513
	if (err < 0) {
J
Jingoo Han 已提交
3514
		pdata = dev_get_platdata(&pdev->dev);
3515 3516 3517 3518 3519 3520 3521
		if (pdata && pdata->is_rmii)
			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
		else
			bp->phy_interface = PHY_INTERFACE_MODE_MII;
	} else {
		bp->phy_interface = err;
	}
F
frederic RODO 已提交
3522

3523 3524 3525 3526
	/* IP specific init */
	err = init(pdev);
	if (err)
		goto err_out_free_netdev;
3527

3528 3529 3530 3531
	err = macb_mii_init(bp);
	if (err)
		goto err_out_free_netdev;

3532
	phydev = dev->phydev;
3533 3534 3535

	netif_carrier_off(dev);

3536 3537 3538
	err = register_netdev(dev);
	if (err) {
		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3539
		goto err_out_unregister_mdio;
3540 3541
	}

3542
	phy_attached_info(phydev);
3543

3544 3545 3546
	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
		    dev->base_addr, dev->irq, dev->dev_addr);
3547 3548 3549

	return 0;

3550
err_out_unregister_mdio:
3551
	phy_disconnect(dev->phydev);
3552 3553 3554 3555 3556 3557
	mdiobus_unregister(bp->mii_bus);
	mdiobus_free(bp->mii_bus);

	/* Shutdown the PHY if there is a GPIO reset */
	if (bp->reset_gpio)
		gpiod_set_value(bp->reset_gpio, 0);
3558

3559
err_out_free_netdev:
3560
	free_netdev(dev);
3561

3562 3563 3564 3565
err_disable_clocks:
	clk_disable_unprepare(tx_clk);
	clk_disable_unprepare(hclk);
	clk_disable_unprepare(pclk);
3566
	clk_disable_unprepare(rx_clk);
3567

3568 3569 3570
	return err;
}

3571
static int macb_remove(struct platform_device *pdev)
3572 3573 3574 3575 3576 3577 3578 3579
{
	struct net_device *dev;
	struct macb *bp;

	dev = platform_get_drvdata(pdev);

	if (dev) {
		bp = netdev_priv(dev);
3580 3581
		if (dev->phydev)
			phy_disconnect(dev->phydev);
3582
		mdiobus_unregister(bp->mii_bus);
3583
		dev->phydev = NULL;
3584
		mdiobus_free(bp->mii_bus);
3585 3586

		/* Shutdown the PHY if there is a GPIO reset */
3587 3588
		if (bp->reset_gpio)
			gpiod_set_value(bp->reset_gpio, 0);
3589

3590
		unregister_netdev(dev);
3591
		clk_disable_unprepare(bp->tx_clk);
3592 3593
		clk_disable_unprepare(bp->hclk);
		clk_disable_unprepare(bp->pclk);
3594
		clk_disable_unprepare(bp->rx_clk);
3595
		of_node_put(bp->phy_node);
3596
		free_netdev(dev);
3597 3598 3599 3600 3601
	}

	return 0;
}

3602
static int __maybe_unused macb_suspend(struct device *dev)
3603
{
S
Soren Brinkmann 已提交
3604
	struct platform_device *pdev = to_platform_device(dev);
3605 3606 3607
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(netdev);

3608
	netif_carrier_off(netdev);
3609 3610
	netif_device_detach(netdev);

3611 3612 3613 3614 3615 3616 3617 3618
	if (bp->wol & MACB_WOL_ENABLED) {
		macb_writel(bp, IER, MACB_BIT(WOL));
		macb_writel(bp, WOL, MACB_BIT(MAG));
		enable_irq_wake(bp->queues[0].irq);
	} else {
		clk_disable_unprepare(bp->tx_clk);
		clk_disable_unprepare(bp->hclk);
		clk_disable_unprepare(bp->pclk);
3619
		clk_disable_unprepare(bp->rx_clk);
3620
	}
3621 3622 3623 3624

	return 0;
}

3625
static int __maybe_unused macb_resume(struct device *dev)
3626
{
S
Soren Brinkmann 已提交
3627
	struct platform_device *pdev = to_platform_device(dev);
3628 3629 3630
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(netdev);

3631 3632 3633 3634 3635 3636 3637 3638
	if (bp->wol & MACB_WOL_ENABLED) {
		macb_writel(bp, IDR, MACB_BIT(WOL));
		macb_writel(bp, WOL, 0);
		disable_irq_wake(bp->queues[0].irq);
	} else {
		clk_prepare_enable(bp->pclk);
		clk_prepare_enable(bp->hclk);
		clk_prepare_enable(bp->tx_clk);
3639
		clk_prepare_enable(bp->rx_clk);
3640
	}
3641 3642 3643 3644 3645 3646

	netif_device_attach(netdev);

	return 0;
}

S
Soren Brinkmann 已提交
3647 3648
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);

3649
static struct platform_driver macb_driver = {
3650 3651
	.probe		= macb_probe,
	.remove		= macb_remove,
3652 3653
	.driver		= {
		.name		= "macb",
3654
		.of_match_table	= of_match_ptr(macb_dt_ids),
S
Soren Brinkmann 已提交
3655
		.pm	= &macb_pm_ops,
3656 3657 3658
	},
};

3659
module_platform_driver(macb_driver);
3660 3661

MODULE_LICENSE("GPL");
J
Jamie Iles 已提交
3662
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
J
Jean Delvare 已提交
3663
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3664
MODULE_ALIAS("platform:macb");