macb.c 71.7 KB
Newer Older
1
/*
J
Jamie Iles 已提交
2
 * Cadence MACB/GEM Ethernet Controller driver
3 4 5 6 7 8 9 10
 *
 * Copyright (C) 2004-2006 Atmel Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

11
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 13 14 15 16
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
17
#include <linux/circ_buf.h>
18 19
#include <linux/slab.h>
#include <linux/init.h>
S
Soren Brinkmann 已提交
20
#include <linux/io.h>
21
#include <linux/gpio.h>
22
#include <linux/interrupt.h>
23 24 25
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
26
#include <linux/platform_data/macb.h>
27
#include <linux/platform_device.h>
F
frederic RODO 已提交
28
#include <linux/phy.h>
29
#include <linux/of.h>
30
#include <linux/of_device.h>
31
#include <linux/of_mdio.h>
32
#include <linux/of_net.h>
33 34 35

#include "macb.h"

36 37
#define MACB_RX_BUFFER_SIZE	128
#define RX_BUFFER_MULTIPLE	64  /* bytes */
38 39
#define RX_RING_SIZE		512 /* must be power of 2 */
#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
40

41 42
#define TX_RING_SIZE		128 /* must be power of 2 */
#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
43

44 45
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
46 47 48

#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
				 | MACB_BIT(ISR_ROVR))
N
Nicolas Ferre 已提交
49 50 51 52 53
#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
					| MACB_BIT(ISR_RLE)		\
					| MACB_BIT(TXERR))
#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))

54 55 56
#define MACB_MAX_TX_LEN		((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
#define GEM_MAX_TX_LEN		((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))

N
Nicolas Ferre 已提交
57 58 59 60 61
/*
 * Graceful stop timeouts in us. We should allow up to
 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
 */
#define MACB_HALT_TIMEOUT	1230
62

63 64 65 66 67 68
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(unsigned int index)
{
	return index & (TX_RING_SIZE - 1);
}

69 70
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
					  unsigned int index)
71
{
72
	return &queue->tx_ring[macb_tx_ring_wrap(index)];
73 74
}

75 76
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
				       unsigned int index)
77
{
78
	return &queue->tx_skb[macb_tx_ring_wrap(index)];
79 80
}

81
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
82 83 84 85 86
{
	dma_addr_t offset;

	offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);

87
	return queue->tx_ring_dma + offset;
88 89 90 91 92 93 94 95 96 97 98 99 100 101
}

static unsigned int macb_rx_ring_wrap(unsigned int index)
{
	return index & (RX_RING_SIZE - 1);
}

static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{
	return &bp->rx_ring[macb_rx_ring_wrap(index)];
}

static void *macb_rx_buffer(struct macb *bp, unsigned int index)
{
102
	return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
103 104
}

105
static void macb_set_hwaddr(struct macb *bp)
106 107 108 109 110
{
	u32 bottom;
	u16 top;

	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
J
Jamie Iles 已提交
111
	macb_or_gem_writel(bp, SA1B, bottom);
112
	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
J
Jamie Iles 已提交
113
	macb_or_gem_writel(bp, SA1T, top);
114 115 116 117 118 119 120 121

	/* Clear unused address register sets */
	macb_or_gem_writel(bp, SA2B, 0);
	macb_or_gem_writel(bp, SA2T, 0);
	macb_or_gem_writel(bp, SA3B, 0);
	macb_or_gem_writel(bp, SA3T, 0);
	macb_or_gem_writel(bp, SA4B, 0);
	macb_or_gem_writel(bp, SA4T, 0);
122 123
}

124
static void macb_get_hwaddr(struct macb *bp)
125
{
126
	struct macb_platform_data *pdata;
127 128 129
	u32 bottom;
	u16 top;
	u8 addr[6];
130 131
	int i;

J
Jingoo Han 已提交
132
	pdata = dev_get_platdata(&bp->pdev->dev);
133

134 135 136 137 138
	/* Check all 4 address register for vaild address */
	for (i = 0; i < 4; i++) {
		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
		top = macb_or_gem_readl(bp, SA1T + i * 8);

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
		if (pdata && pdata->rev_eth_addr) {
			addr[5] = bottom & 0xff;
			addr[4] = (bottom >> 8) & 0xff;
			addr[3] = (bottom >> 16) & 0xff;
			addr[2] = (bottom >> 24) & 0xff;
			addr[1] = top & 0xff;
			addr[0] = (top & 0xff00) >> 8;
		} else {
			addr[0] = bottom & 0xff;
			addr[1] = (bottom >> 8) & 0xff;
			addr[2] = (bottom >> 16) & 0xff;
			addr[3] = (bottom >> 24) & 0xff;
			addr[4] = top & 0xff;
			addr[5] = (top >> 8) & 0xff;
		}
154 155 156 157 158

		if (is_valid_ether_addr(addr)) {
			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
			return;
		}
159
	}
160 161 162

	netdev_info(bp->dev, "invalid hw address, using random\n");
	eth_hw_addr_random(bp->dev);
163 164
}

F
frederic RODO 已提交
165
static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
166
{
F
frederic RODO 已提交
167
	struct macb *bp = bus->priv;
168 169 170 171
	int value;

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_READ)
F
frederic RODO 已提交
172 173
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
174 175
			      | MACB_BF(CODE, MACB_MAN_CODE)));

F
frederic RODO 已提交
176 177 178
	/* wait for end of transfer */
	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
		cpu_relax();
179 180 181 182 183 184

	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));

	return value;
}

F
frederic RODO 已提交
185 186
static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
187
{
F
frederic RODO 已提交
188
	struct macb *bp = bus->priv;
189 190 191

	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
			      | MACB_BF(RW, MACB_MAN_WRITE)
F
frederic RODO 已提交
192 193
			      | MACB_BF(PHYA, mii_id)
			      | MACB_BF(REGA, regnum)
194
			      | MACB_BF(CODE, MACB_MAN_CODE)
F
frederic RODO 已提交
195
			      | MACB_BF(DATA, value)));
196

F
frederic RODO 已提交
197 198 199 200 201 202
	/* wait for end of transfer */
	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
		cpu_relax();

	return 0;
}
203

204 205 206 207 208 209 210 211 212 213
/**
 * macb_set_tx_clk() - Set a clock to a new frequency
 * @clk		Pointer to the clock to change
 * @rate	New frequency in Hz
 * @dev		Pointer to the struct net_device
 */
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
	long ferr, rate, rate_rounded;

214 215 216
	if (!clk)
		return;

217 218 219 220 221 222 223 224 225 226 227
	switch (speed) {
	case SPEED_10:
		rate = 2500000;
		break;
	case SPEED_100:
		rate = 25000000;
		break;
	case SPEED_1000:
		rate = 125000000;
		break;
	default:
S
Soren Brinkmann 已提交
228
		return;
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	}

	rate_rounded = clk_round_rate(clk, rate);
	if (rate_rounded < 0)
		return;

	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
	 * is not satisfied.
	 */
	ferr = abs(rate_rounded - rate);
	ferr = DIV_ROUND_UP(ferr, rate / 100000);
	if (ferr > 5)
		netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
				rate);

	if (clk_set_rate(clk, rate_rounded))
		netdev_err(dev, "adjusting tx_clk failed.\n");
}

F
frederic RODO 已提交
248
static void macb_handle_link_change(struct net_device *dev)
249
{
F
frederic RODO 已提交
250 251 252
	struct macb *bp = netdev_priv(dev);
	struct phy_device *phydev = bp->phy_dev;
	unsigned long flags;
253

F
frederic RODO 已提交
254
	int status_change = 0;
255

F
frederic RODO 已提交
256 257 258 259 260 261 262 263 264
	spin_lock_irqsave(&bp->lock, flags);

	if (phydev->link) {
		if ((bp->speed != phydev->speed) ||
		    (bp->duplex != phydev->duplex)) {
			u32 reg;

			reg = macb_readl(bp, NCFGR);
			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
265 266
			if (macb_is_gem(bp))
				reg &= ~GEM_BIT(GBE);
F
frederic RODO 已提交
267 268 269

			if (phydev->duplex)
				reg |= MACB_BIT(FD);
A
Atsushi Nemoto 已提交
270
			if (phydev->speed == SPEED_100)
F
frederic RODO 已提交
271
				reg |= MACB_BIT(SPD);
272 273
			if (phydev->speed == SPEED_1000 &&
			    bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
274
				reg |= GEM_BIT(GBE);
F
frederic RODO 已提交
275

276
			macb_or_gem_writel(bp, NCFGR, reg);
F
frederic RODO 已提交
277 278 279 280 281

			bp->speed = phydev->speed;
			bp->duplex = phydev->duplex;
			status_change = 1;
		}
282 283
	}

F
frederic RODO 已提交
284
	if (phydev->link != bp->link) {
285
		if (!phydev->link) {
F
frederic RODO 已提交
286 287 288 289
			bp->speed = 0;
			bp->duplex = -1;
		}
		bp->link = phydev->link;
290

F
frederic RODO 已提交
291 292
		status_change = 1;
	}
293

F
frederic RODO 已提交
294 295 296
	spin_unlock_irqrestore(&bp->lock, flags);

	if (status_change) {
297
		if (phydev->link) {
298 299 300 301 302
			/* Update the TX clock rate if and only if the link is
			 * up and there has been a link change.
			 */
			macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);

303
			netif_carrier_on(dev);
304 305 306 307
			netdev_info(dev, "link up (%d/%s)\n",
				    phydev->speed,
				    phydev->duplex == DUPLEX_FULL ?
				    "Full" : "Half");
308 309
		} else {
			netif_carrier_off(dev);
310
			netdev_info(dev, "link down\n");
311
		}
F
frederic RODO 已提交
312
	}
313 314
}

F
frederic RODO 已提交
315 316
/* based on au1000_eth. c*/
static int macb_mii_probe(struct net_device *dev)
317
{
F
frederic RODO 已提交
318
	struct macb *bp = netdev_priv(dev);
319
	struct macb_platform_data *pdata;
320
	struct phy_device *phydev;
321
	int phy_irq;
322
	int ret;
F
frederic RODO 已提交
323

324
	phydev = phy_find_first(bp->mii_bus);
F
frederic RODO 已提交
325
	if (!phydev) {
326
		netdev_err(dev, "no PHY found\n");
327
		return -ENXIO;
F
frederic RODO 已提交
328 329
	}

330 331 332 333 334 335 336 337
	pdata = dev_get_platdata(&bp->pdev->dev);
	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
		if (!ret) {
			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
		}
	}
F
frederic RODO 已提交
338 339

	/* attach the mac to the phy */
340
	ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
341
				 bp->phy_interface);
342
	if (ret) {
343
		netdev_err(dev, "Could not attach to PHY\n");
344
		return ret;
F
frederic RODO 已提交
345 346 347
	}

	/* mask with MAC supported features */
348
	if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
349 350 351
		phydev->supported &= PHY_GBIT_FEATURES;
	else
		phydev->supported &= PHY_BASIC_FEATURES;
F
frederic RODO 已提交
352 353 354 355 356 357 358 359 360

	phydev->advertising = phydev->supported;

	bp->link = 0;
	bp->speed = 0;
	bp->duplex = -1;
	bp->phy_dev = phydev;

	return 0;
361 362
}

363
static int macb_mii_init(struct macb *bp)
364
{
365
	struct macb_platform_data *pdata;
366
	struct device_node *np;
F
frederic RODO 已提交
367
	int err = -ENXIO, i;
368

369
	/* Enable management port */
F
frederic RODO 已提交
370
	macb_writel(bp, NCR, MACB_BIT(MPE));
371

372 373 374 375 376 377 378 379 380
	bp->mii_bus = mdiobus_alloc();
	if (bp->mii_bus == NULL) {
		err = -ENOMEM;
		goto err_out;
	}

	bp->mii_bus->name = "MACB_mii_bus";
	bp->mii_bus->read = &macb_mdio_read;
	bp->mii_bus->write = &macb_mdio_write;
381 382
	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
		bp->pdev->name, bp->pdev->id);
383 384
	bp->mii_bus->priv = bp;
	bp->mii_bus->parent = &bp->dev->dev;
J
Jingoo Han 已提交
385
	pdata = dev_get_platdata(&bp->pdev->dev);
386

387 388
	bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
	if (!bp->mii_bus->irq) {
F
frederic RODO 已提交
389
		err = -ENOMEM;
390
		goto err_out_free_mdiobus;
391 392
	}

393
	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
	np = bp->pdev->dev.of_node;
	if (np) {
		/* try dt phy registration */
		err = of_mdiobus_register(bp->mii_bus, np);

		/* fallback to standard phy registration if no phy were
		   found during dt phy registration */
		if (!err && !phy_find_first(bp->mii_bus)) {
			for (i = 0; i < PHY_MAX_ADDR; i++) {
				struct phy_device *phydev;

				phydev = mdiobus_scan(bp->mii_bus, i);
				if (IS_ERR(phydev)) {
					err = PTR_ERR(phydev);
					break;
				}
			}

			if (err)
				goto err_out_unregister_bus;
		}
	} else {
		for (i = 0; i < PHY_MAX_ADDR; i++)
			bp->mii_bus->irq[i] = PHY_POLL;

		if (pdata)
			bp->mii_bus->phy_mask = pdata->phy_mask;

		err = mdiobus_register(bp->mii_bus);
	}

	if (err)
F
frederic RODO 已提交
427
		goto err_out_free_mdio_irq;
428

429 430
	err = macb_mii_probe(bp->dev);
	if (err)
F
frederic RODO 已提交
431
		goto err_out_unregister_bus;
432

F
frederic RODO 已提交
433
	return 0;
434

F
frederic RODO 已提交
435
err_out_unregister_bus:
436
	mdiobus_unregister(bp->mii_bus);
F
frederic RODO 已提交
437
err_out_free_mdio_irq:
438 439 440
	kfree(bp->mii_bus->irq);
err_out_free_mdiobus:
	mdiobus_free(bp->mii_bus);
F
frederic RODO 已提交
441 442
err_out:
	return err;
443 444 445 446 447
}

static void macb_update_stats(struct macb *bp)
{
	u32 __iomem *reg = bp->regs + MACB_PFR;
448 449
	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
450 451 452 453

	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);

	for(; p < end; p++, reg++)
454
		*p += readl_relaxed(reg);
455 456
}

N
Nicolas Ferre 已提交
457
static int macb_halt_tx(struct macb *bp)
458
{
N
Nicolas Ferre 已提交
459 460
	unsigned long	halt_time, timeout;
	u32		status;
461

N
Nicolas Ferre 已提交
462
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
463

N
Nicolas Ferre 已提交
464 465 466 467 468 469
	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
	do {
		halt_time = jiffies;
		status = macb_readl(bp, TSR);
		if (!(status & MACB_BIT(TGO)))
			return 0;
470

N
Nicolas Ferre 已提交
471 472
		usleep_range(10, 250);
	} while (time_before(halt_time, timeout));
473

N
Nicolas Ferre 已提交
474 475
	return -ETIMEDOUT;
}
476

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
{
	if (tx_skb->mapping) {
		if (tx_skb->mapped_as_page)
			dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
				       tx_skb->size, DMA_TO_DEVICE);
		else
			dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
					 tx_skb->size, DMA_TO_DEVICE);
		tx_skb->mapping = 0;
	}

	if (tx_skb->skb) {
		dev_kfree_skb_any(tx_skb->skb);
		tx_skb->skb = NULL;
	}
}

N
Nicolas Ferre 已提交
495 496
static void macb_tx_error_task(struct work_struct *work)
{
497 498 499
	struct macb_queue	*queue = container_of(work, struct macb_queue,
						      tx_error_task);
	struct macb		*bp = queue->bp;
N
Nicolas Ferre 已提交
500
	struct macb_tx_skb	*tx_skb;
501
	struct macb_dma_desc	*desc;
N
Nicolas Ferre 已提交
502 503
	struct sk_buff		*skb;
	unsigned int		tail;
504 505 506 507 508
	unsigned long		flags;

	netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
		    (unsigned int)(queue - bp->queues),
		    queue->tx_tail, queue->tx_head);
509

510 511 512 513 514 515 516
	/* Prevent the queue IRQ handlers from running: each of them may call
	 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
	 * As explained below, we have to halt the transmission before updating
	 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
	 * network engine about the macb/gem being halted.
	 */
	spin_lock_irqsave(&bp->lock, flags);
517

N
Nicolas Ferre 已提交
518
	/* Make sure nobody is trying to queue up new packets */
519
	netif_tx_stop_all_queues(bp->dev);
520

N
Nicolas Ferre 已提交
521 522 523
	/*
	 * Stop transmission now
	 * (in case we have just queued new packets)
524
	 * macb/gem must be halted to write TBQP register
N
Nicolas Ferre 已提交
525 526 527 528
	 */
	if (macb_halt_tx(bp))
		/* Just complain for now, reinitializing TX path can be good */
		netdev_err(bp->dev, "BUG: halt tx timed out\n");
529

N
Nicolas Ferre 已提交
530 531 532 533
	/*
	 * Treat frames in TX queue including the ones that caused the error.
	 * Free transmit buffers in upper layer.
	 */
534 535
	for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
		u32	ctrl;
536

537
		desc = macb_tx_desc(queue, tail);
N
Nicolas Ferre 已提交
538
		ctrl = desc->ctrl;
539
		tx_skb = macb_tx_skb(queue, tail);
N
Nicolas Ferre 已提交
540
		skb = tx_skb->skb;
541

N
Nicolas Ferre 已提交
542
		if (ctrl & MACB_BIT(TX_USED)) {
543 544 545 546
			/* skb is set for the last buffer of the frame */
			while (!skb) {
				macb_tx_unmap(bp, tx_skb);
				tail++;
547
				tx_skb = macb_tx_skb(queue, tail);
548 549 550 551 552 553 554 555 556 557 558 559
				skb = tx_skb->skb;
			}

			/* ctrl still refers to the first buffer descriptor
			 * since it's the only one written back by the hardware
			 */
			if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
				netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
					    macb_tx_ring_wrap(tail), skb->data);
				bp->stats.tx_packets++;
				bp->stats.tx_bytes += skb->len;
			}
N
Nicolas Ferre 已提交
560 561 562 563 564 565 566 567 568
		} else {
			/*
			 * "Buffers exhausted mid-frame" errors may only happen
			 * if the driver is buggy, so complain loudly about those.
			 * Statistics are updated by hardware.
			 */
			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
				netdev_err(bp->dev,
					   "BUG: TX buffers exhausted mid-frame\n");
569

N
Nicolas Ferre 已提交
570 571 572
			desc->ctrl = ctrl | MACB_BIT(TX_USED);
		}

573
		macb_tx_unmap(bp, tx_skb);
574 575
	}

576 577 578 579 580
	/* Set end of TX queue */
	desc = macb_tx_desc(queue, 0);
	desc->addr = 0;
	desc->ctrl = MACB_BIT(TX_USED);

N
Nicolas Ferre 已提交
581 582 583 584
	/* Make descriptor updates visible to hardware */
	wmb();

	/* Reinitialize the TX desc queue */
585
	queue_writel(queue, TBQP, queue->tx_ring_dma);
N
Nicolas Ferre 已提交
586
	/* Make TX ring reflect state of hardware */
587 588
	queue->tx_head = 0;
	queue->tx_tail = 0;
N
Nicolas Ferre 已提交
589 590 591

	/* Housework before enabling TX IRQ */
	macb_writel(bp, TSR, macb_readl(bp, TSR));
592 593 594 595 596 597 598
	queue_writel(queue, IER, MACB_TX_INT_FLAGS);

	/* Now we are ready to start transmission again */
	netif_tx_start_all_queues(bp->dev);
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

	spin_unlock_irqrestore(&bp->lock, flags);
N
Nicolas Ferre 已提交
599 600
}

601
static void macb_tx_interrupt(struct macb_queue *queue)
N
Nicolas Ferre 已提交
602 603 604 605
{
	unsigned int tail;
	unsigned int head;
	u32 status;
606 607
	struct macb *bp = queue->bp;
	u16 queue_index = queue - bp->queues;
N
Nicolas Ferre 已提交
608 609 610 611

	status = macb_readl(bp, TSR);
	macb_writel(bp, TSR, status);

612
	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
613
		queue_writel(queue, ISR, MACB_BIT(TCOMP));
614

N
Nicolas Ferre 已提交
615 616
	netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
		(unsigned long)status);
617

618 619
	head = queue->tx_head;
	for (tail = queue->tx_tail; tail != head; tail++) {
620 621 622 623
		struct macb_tx_skb	*tx_skb;
		struct sk_buff		*skb;
		struct macb_dma_desc	*desc;
		u32			ctrl;
624

625
		desc = macb_tx_desc(queue, tail);
626

627
		/* Make hw descriptor updates visible to CPU */
628
		rmb();
629

630
		ctrl = desc->ctrl;
631

632 633 634
		/* TX_USED bit is only set by hardware on the very first buffer
		 * descriptor of the transmitted frame.
		 */
635
		if (!(ctrl & MACB_BIT(TX_USED)))
636 637
			break;

638 639
		/* Process all buffers of the current transmitted frame */
		for (;; tail++) {
640
			tx_skb = macb_tx_skb(queue, tail);
641 642 643 644 645 646 647 648 649
			skb = tx_skb->skb;

			/* First, update TX stats if needed */
			if (skb) {
				netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
					    macb_tx_ring_wrap(tail), skb->data);
				bp->stats.tx_packets++;
				bp->stats.tx_bytes += skb->len;
			}
650

651 652 653 654 655 656 657 658 659 660
			/* Now we can safely release resources */
			macb_tx_unmap(bp, tx_skb);

			/* skb is set only for the last buffer of the frame.
			 * WARNING: at this point skb has been freed by
			 * macb_tx_unmap().
			 */
			if (skb)
				break;
		}
661 662
	}

663 664 665 666 667
	queue->tx_tail = tail;
	if (__netif_subqueue_stopped(bp->dev, queue_index) &&
	    CIRC_CNT(queue->tx_head, queue->tx_tail,
		     TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
		netif_wake_subqueue(bp->dev, queue_index);
668 669
}

N
Nicolas Ferre 已提交
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
static void gem_rx_refill(struct macb *bp)
{
	unsigned int		entry;
	struct sk_buff		*skb;
	dma_addr_t		paddr;

	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
		entry = macb_rx_ring_wrap(bp->rx_prepared_head);

		/* Make hw descriptor updates visible to CPU */
		rmb();

		bp->rx_prepared_head++;

		if (bp->rx_skbuff[entry] == NULL) {
			/* allocate sk_buff for this free entry in ring */
			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
			if (unlikely(skb == NULL)) {
				netdev_err(bp->dev,
					   "Unable to allocate sk_buff\n");
				break;
			}

			/* now fill corresponding descriptor entry */
			paddr = dma_map_single(&bp->pdev->dev, skb->data,
					       bp->rx_buffer_size, DMA_FROM_DEVICE);
696 697 698 699 700 701
			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
				dev_kfree_skb(skb);
				break;
			}

			bp->rx_skbuff[entry] = skb;
N
Nicolas Ferre 已提交
702 703 704 705 706 707 708 709

			if (entry == RX_RING_SIZE - 1)
				paddr |= MACB_BIT(RX_WRAP);
			bp->rx_ring[entry].addr = paddr;
			bp->rx_ring[entry].ctrl = 0;

			/* properly align Ethernet header */
			skb_reserve(skb, NET_IP_ALIGN);
710 711 712
		} else {
			bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
			bp->rx_ring[entry].ctrl = 0;
N
Nicolas Ferre 已提交
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
		}
	}

	/* Make descriptor updates visible to hardware */
	wmb();

	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
		   bp->rx_prepared_head, bp->rx_tail);
}

/* Mark DMA descriptors from begin up to and not including end as unused */
static void discard_partial_frame(struct macb *bp, unsigned int begin,
				  unsigned int end)
{
	unsigned int frag;

	for (frag = begin; frag != end; frag++) {
		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
		desc->addr &= ~MACB_BIT(RX_USED);
	}

	/* Make descriptor updates visible to hardware */
	wmb();

	/*
	 * When this happens, the hardware stats registers for
	 * whatever caused this is updated, so we don't have to record
	 * anything.
	 */
}

static int gem_rx(struct macb *bp, int budget)
{
	unsigned int		len;
	unsigned int		entry;
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
	int			count = 0;

	while (count < budget) {
		u32 addr, ctrl;

		entry = macb_rx_ring_wrap(bp->rx_tail);
		desc = &bp->rx_ring[entry];

		/* Make hw descriptor updates visible to CPU */
		rmb();

		addr = desc->addr;
		ctrl = desc->ctrl;

		if (!(addr & MACB_BIT(RX_USED)))
			break;

		bp->rx_tail++;
		count++;

		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
			netdev_err(bp->dev,
				   "not whole frame pointed by descriptor\n");
			bp->stats.rx_dropped++;
			break;
		}
		skb = bp->rx_skbuff[entry];
		if (unlikely(!skb)) {
			netdev_err(bp->dev,
				   "inconsistent Rx descriptor chain\n");
			bp->stats.rx_dropped++;
			break;
		}
		/* now everything is ready for receiving packet */
		bp->rx_skbuff[entry] = NULL;
		len = MACB_BFEXT(RX_FRMLEN, ctrl);

		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);

		skb_put(skb, len);
		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
		dma_unmap_single(&bp->pdev->dev, addr,
792
				 bp->rx_buffer_size, DMA_FROM_DEVICE);
N
Nicolas Ferre 已提交
793 794 795

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb_checksum_none_assert(skb);
796 797 798 799
		if (bp->dev->features & NETIF_F_RXCSUM &&
		    !(bp->dev->flags & IFF_PROMISC) &&
		    GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
N
Nicolas Ferre 已提交
800 801 802 803 804 805 806 807

		bp->stats.rx_packets++;
		bp->stats.rx_bytes += skb->len;

#if defined(DEBUG) && defined(VERBOSE_DEBUG)
		netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
			    skb->len, skb->csum);
		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
808
			       skb_mac_header(skb), 16, true);
N
Nicolas Ferre 已提交
809 810 811 812 813 814 815 816 817 818 819 820
		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
			       skb->data, 32, true);
#endif

		netif_receive_skb(skb);
	}

	gem_rx_refill(bp);

	return count;
}

821 822 823 824 825
static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
			 unsigned int last_frag)
{
	unsigned int len;
	unsigned int frag;
826
	unsigned int offset;
827
	struct sk_buff *skb;
828
	struct macb_dma_desc *desc;
829

830 831
	desc = macb_rx_desc(bp, last_frag);
	len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
832

833
	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
834 835
		macb_rx_ring_wrap(first_frag),
		macb_rx_ring_wrap(last_frag), len);
836

837 838 839 840 841 842 843 844 845 846
	/*
	 * The ethernet header starts NET_IP_ALIGN bytes into the
	 * first buffer. Since the header is 14 bytes, this makes the
	 * payload word-aligned.
	 *
	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
	 * the two padding bytes into the skb so that we avoid hitting
	 * the slowpath in memcpy(), and pull them off afterwards.
	 */
	skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
847 848
	if (!skb) {
		bp->stats.rx_dropped++;
849 850 851
		for (frag = first_frag; ; frag++) {
			desc = macb_rx_desc(bp, frag);
			desc->addr &= ~MACB_BIT(RX_USED);
852 853 854
			if (frag == last_frag)
				break;
		}
855 856

		/* Make descriptor updates visible to hardware */
857
		wmb();
858

859 860 861
		return 1;
	}

862 863
	offset = 0;
	len += NET_IP_ALIGN;
864
	skb_checksum_none_assert(skb);
865 866
	skb_put(skb, len);

867
	for (frag = first_frag; ; frag++) {
868
		unsigned int frag_len = bp->rx_buffer_size;
869 870 871 872 873

		if (offset + frag_len > len) {
			BUG_ON(frag != last_frag);
			frag_len = len - offset;
		}
874
		skb_copy_to_linear_data_offset(skb, offset,
875
				macb_rx_buffer(bp, frag), frag_len);
876
		offset += bp->rx_buffer_size;
877 878
		desc = macb_rx_desc(bp, frag);
		desc->addr &= ~MACB_BIT(RX_USED);
879 880 881 882 883

		if (frag == last_frag)
			break;
	}

884 885 886
	/* Make descriptor updates visible to hardware */
	wmb();

887
	__skb_pull(skb, NET_IP_ALIGN);
888 889 890
	skb->protocol = eth_type_trans(skb, bp->dev);

	bp->stats.rx_packets++;
891
	bp->stats.rx_bytes += skb->len;
892
	netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
893
		   skb->len, skb->csum);
894 895 896 897 898 899 900 901
	netif_receive_skb(skb);

	return 0;
}

static int macb_rx(struct macb *bp, int budget)
{
	int received = 0;
902
	unsigned int tail;
903 904
	int first_frag = -1;

905 906
	for (tail = bp->rx_tail; budget > 0; tail++) {
		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
907 908
		u32 addr, ctrl;

909
		/* Make hw descriptor updates visible to CPU */
910
		rmb();
911

912 913
		addr = desc->addr;
		ctrl = desc->ctrl;
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944

		if (!(addr & MACB_BIT(RX_USED)))
			break;

		if (ctrl & MACB_BIT(RX_SOF)) {
			if (first_frag != -1)
				discard_partial_frame(bp, first_frag, tail);
			first_frag = tail;
		}

		if (ctrl & MACB_BIT(RX_EOF)) {
			int dropped;
			BUG_ON(first_frag == -1);

			dropped = macb_rx_frame(bp, first_frag, tail);
			first_frag = -1;
			if (!dropped) {
				received++;
				budget--;
			}
		}
	}

	if (first_frag != -1)
		bp->rx_tail = first_frag;
	else
		bp->rx_tail = tail;

	return received;
}

945
static int macb_poll(struct napi_struct *napi, int budget)
946
{
947 948
	struct macb *bp = container_of(napi, struct macb, napi);
	int work_done;
949 950 951 952 953
	u32 status;

	status = macb_readl(bp, RSR);
	macb_writel(bp, RSR, status);

954
	work_done = 0;
955

956
	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
957
		   (unsigned long)status, budget);
958

N
Nicolas Ferre 已提交
959
	work_done = bp->macbgem_ops.mog_rx(bp, budget);
960
	if (work_done < budget) {
961
		napi_complete(napi);
962

963 964
		/* Packets received while interrupts were disabled */
		status = macb_readl(bp, RSR);
965
		if (status) {
966 967
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
				macb_writel(bp, ISR, MACB_BIT(RCOMP));
968
			napi_reschedule(napi);
969 970 971
		} else {
			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
		}
972
	}
973 974 975

	/* TODO: Handle errors */

976
	return work_done;
977 978 979 980
}

static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
981 982 983
	struct macb_queue *queue = dev_id;
	struct macb *bp = queue->bp;
	struct net_device *dev = bp->dev;
984 985
	u32 status;

986
	status = queue_readl(queue, ISR);
987 988 989 990 991 992 993 994 995

	if (unlikely(!status))
		return IRQ_NONE;

	spin_lock(&bp->lock);

	while (status) {
		/* close possible race with dev_close */
		if (unlikely(!netif_running(dev))) {
996
			queue_writel(queue, IDR, -1);
997 998 999
			break;
		}

1000 1001 1002
		netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
			    (unsigned int)(queue - bp->queues),
			    (unsigned long)status);
1003

1004
		if (status & MACB_RX_INT_FLAGS) {
1005 1006 1007 1008 1009 1010 1011
			/*
			 * There's no point taking any more interrupts
			 * until we have processed the buffers. The
			 * scheduling call may fail if the poll routine
			 * is already scheduled, so disable interrupts
			 * now.
			 */
1012
			queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1013
			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1014
				queue_writel(queue, ISR, MACB_BIT(RCOMP));
1015

1016
			if (napi_schedule_prep(&bp->napi)) {
1017
				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1018
				__napi_schedule(&bp->napi);
1019 1020 1021
			}
		}

N
Nicolas Ferre 已提交
1022
		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1023 1024
			queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
			schedule_work(&queue->tx_error_task);
1025 1026

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1027
				queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1028

N
Nicolas Ferre 已提交
1029 1030 1031 1032
			break;
		}

		if (status & MACB_BIT(TCOMP))
1033
			macb_tx_interrupt(queue);
1034 1035 1036 1037 1038 1039

		/*
		 * Link change detection isn't possible with RMII, so we'll
		 * add that if/when we get our hands on a full-blown MII PHY.
		 */

A
Alexander Stein 已提交
1040 1041
		if (status & MACB_BIT(ISR_ROVR)) {
			/* We missed at least one packet */
J
Jamie Iles 已提交
1042 1043 1044 1045
			if (macb_is_gem(bp))
				bp->hw_stats.gem.rx_overruns++;
			else
				bp->hw_stats.macb.rx_overruns++;
1046 1047

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1048
				queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
A
Alexander Stein 已提交
1049 1050
		}

1051 1052
		if (status & MACB_BIT(HRESP)) {
			/*
1053 1054 1055
			 * TODO: Reset the hardware, and maybe move the
			 * netdev_err to a lower-priority context as well
			 * (work queue?)
1056
			 */
1057
			netdev_err(dev, "DMA bus error: HRESP not OK\n");
1058 1059

			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1060
				queue_writel(queue, ISR, MACB_BIT(HRESP));
1061 1062
		}

1063
		status = queue_readl(queue, ISR);
1064 1065 1066 1067 1068 1069 1070
	}

	spin_unlock(&bp->lock);

	return IRQ_HANDLED;
}

1071 1072 1073 1074 1075 1076 1077
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
 * Polling receive - used by netconsole and other diagnostic tools
 * to allow network i/o with interrupts disabled.
 */
static void macb_poll_controller(struct net_device *dev)
{
1078 1079
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
1080
	unsigned long flags;
1081
	unsigned int q;
1082 1083

	local_irq_save(flags);
1084 1085
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
		macb_interrupt(dev->irq, queue);
1086 1087 1088 1089
	local_irq_restore(flags);
}
#endif

1090 1091 1092 1093 1094 1095 1096
static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
						     unsigned int len)
{
	return (len + bp->max_tx_length - 1) / bp->max_tx_length;
}

static unsigned int macb_tx_map(struct macb *bp,
1097
				struct macb_queue *queue,
1098
				struct sk_buff *skb)
1099 1100
{
	dma_addr_t mapping;
1101
	unsigned int len, entry, i, tx_head = queue->tx_head;
1102
	struct macb_tx_skb *tx_skb = NULL;
1103
	struct macb_dma_desc *desc;
1104 1105 1106
	unsigned int offset, size, count = 0;
	unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
	unsigned int eof = 1;
1107
	u32 ctrl;
1108 1109 1110 1111 1112 1113 1114

	/* First, map non-paged data */
	len = skb_headlen(skb);
	offset = 0;
	while (len) {
		size = min(len, bp->max_tx_length);
		entry = macb_tx_ring_wrap(tx_head);
1115
		tx_skb = &queue->tx_skb[entry];
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143

		mapping = dma_map_single(&bp->pdev->dev,
					 skb->data + offset,
					 size, DMA_TO_DEVICE);
		if (dma_mapping_error(&bp->pdev->dev, mapping))
			goto dma_error;

		/* Save info to properly release resources */
		tx_skb->skb = NULL;
		tx_skb->mapping = mapping;
		tx_skb->size = size;
		tx_skb->mapped_as_page = false;

		len -= size;
		offset += size;
		count++;
		tx_head++;
	}

	/* Then, map paged data from fragments */
	for (f = 0; f < nr_frags; f++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];

		len = skb_frag_size(frag);
		offset = 0;
		while (len) {
			size = min(len, bp->max_tx_length);
			entry = macb_tx_ring_wrap(tx_head);
1144
			tx_skb = &queue->tx_skb[entry];
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

			mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
						   offset, size, DMA_TO_DEVICE);
			if (dma_mapping_error(&bp->pdev->dev, mapping))
				goto dma_error;

			/* Save info to properly release resources */
			tx_skb->skb = NULL;
			tx_skb->mapping = mapping;
			tx_skb->size = size;
			tx_skb->mapped_as_page = true;

			len -= size;
			offset += size;
			count++;
			tx_head++;
		}
	}

	/* Should never happen */
	if (unlikely(tx_skb == NULL)) {
		netdev_err(bp->dev, "BUG! empty skb!\n");
		return 0;
	}

	/* This is the last buffer of the frame: save socket buffer */
	tx_skb->skb = skb;

	/* Update TX ring: update buffer descriptors in reverse order
	 * to avoid race condition
	 */

	/* Set 'TX_USED' bit in buffer descriptor at tx_head position
	 * to set the end of TX queue
	 */
	i = tx_head;
	entry = macb_tx_ring_wrap(i);
	ctrl = MACB_BIT(TX_USED);
1183
	desc = &queue->tx_ring[entry];
1184 1185 1186 1187 1188
	desc->ctrl = ctrl;

	do {
		i--;
		entry = macb_tx_ring_wrap(i);
1189 1190
		tx_skb = &queue->tx_skb[entry];
		desc = &queue->tx_ring[entry];
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206

		ctrl = (u32)tx_skb->size;
		if (eof) {
			ctrl |= MACB_BIT(TX_LAST);
			eof = 0;
		}
		if (unlikely(entry == (TX_RING_SIZE - 1)))
			ctrl |= MACB_BIT(TX_WRAP);

		/* Set TX buffer descriptor */
		desc->addr = tx_skb->mapping;
		/* desc->addr must be visible to hardware before clearing
		 * 'TX_USED' bit in desc->ctrl.
		 */
		wmb();
		desc->ctrl = ctrl;
1207
	} while (i != queue->tx_head);
1208

1209
	queue->tx_head = tx_head;
1210 1211 1212 1213 1214 1215

	return count;

dma_error:
	netdev_err(bp->dev, "TX DMA map failed\n");

1216 1217
	for (i = queue->tx_head; i != tx_head; i++) {
		tx_skb = macb_tx_skb(queue, i);
1218 1219 1220 1221 1222 1223 1224 1225 1226

		macb_tx_unmap(bp, tx_skb);
	}

	return 0;
}

static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
1227
	u16 queue_index = skb_get_queue_mapping(skb);
1228
	struct macb *bp = netdev_priv(dev);
1229
	struct macb_queue *queue = &bp->queues[queue_index];
1230
	unsigned long flags;
1231
	unsigned int count, nr_frags, frag_size, f;
1232

1233 1234
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
	netdev_vdbg(bp->dev,
1235 1236
		   "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
		   queue_index, skb->len, skb->head, skb->data,
1237 1238 1239
		   skb_tail_pointer(skb), skb_end_pointer(skb));
	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
		       skb->data, 16, true);
1240 1241
#endif

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	/* Count how many TX buffer descriptors are needed to send this
	 * socket buffer: skb fragments of jumbo frames may need to be
	 * splitted into many buffer descriptors.
	 */
	count = macb_count_tx_descriptors(bp, skb_headlen(skb));
	nr_frags = skb_shinfo(skb)->nr_frags;
	for (f = 0; f < nr_frags; f++) {
		frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
		count += macb_count_tx_descriptors(bp, frag_size);
	}

1253
	spin_lock_irqsave(&bp->lock, flags);
1254 1255

	/* This is a hard error, log it. */
1256 1257
	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) {
		netif_stop_subqueue(dev, queue_index);
1258
		spin_unlock_irqrestore(&bp->lock, flags);
1259
		netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1260
			   queue->tx_head, queue->tx_tail);
1261
		return NETDEV_TX_BUSY;
1262 1263
	}

1264
	/* Map socket buffer for DMA transfer */
1265
	if (!macb_tx_map(bp, queue, skb)) {
1266
		dev_kfree_skb_any(skb);
1267 1268
		goto unlock;
	}
1269

1270
	/* Make newly initialized descriptor visible to hardware */
1271 1272
	wmb();

1273 1274
	skb_tx_timestamp(skb);

1275 1276
	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));

1277 1278
	if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1)
		netif_stop_subqueue(dev, queue_index);
1279

1280
unlock:
1281
	spin_unlock_irqrestore(&bp->lock, flags);
1282

1283
	return NETDEV_TX_OK;
1284 1285
}

N
Nicolas Ferre 已提交
1286
static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1287 1288 1289 1290
{
	if (!macb_is_gem(bp)) {
		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
	} else {
N
Nicolas Ferre 已提交
1291
		bp->rx_buffer_size = size;
1292 1293

		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
N
Nicolas Ferre 已提交
1294 1295
			netdev_dbg(bp->dev,
				    "RX buffer must be multiple of %d bytes, expanding\n",
1296 1297
				    RX_BUFFER_MULTIPLE);
			bp->rx_buffer_size =
N
Nicolas Ferre 已提交
1298
				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1299 1300
		}
	}
N
Nicolas Ferre 已提交
1301 1302 1303

	netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
		   bp->dev->mtu, bp->rx_buffer_size);
1304 1305
}

N
Nicolas Ferre 已提交
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
static void gem_free_rx_buffers(struct macb *bp)
{
	struct sk_buff		*skb;
	struct macb_dma_desc	*desc;
	dma_addr_t		addr;
	int i;

	if (!bp->rx_skbuff)
		return;

	for (i = 0; i < RX_RING_SIZE; i++) {
		skb = bp->rx_skbuff[i];

		if (skb == NULL)
			continue;

		desc = &bp->rx_ring[i];
		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1324
		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
N
Nicolas Ferre 已提交
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
				 DMA_FROM_DEVICE);
		dev_kfree_skb_any(skb);
		skb = NULL;
	}

	kfree(bp->rx_skbuff);
	bp->rx_skbuff = NULL;
}

static void macb_free_rx_buffers(struct macb *bp)
{
	if (bp->rx_buffers) {
		dma_free_coherent(&bp->pdev->dev,
				  RX_RING_SIZE * bp->rx_buffer_size,
				  bp->rx_buffers, bp->rx_buffers_dma);
		bp->rx_buffers = NULL;
	}
}
1343

1344 1345
static void macb_free_consistent(struct macb *bp)
{
1346 1347 1348
	struct macb_queue *queue;
	unsigned int q;

N
Nicolas Ferre 已提交
1349
	bp->macbgem_ops.mog_free_rx_buffers(bp);
1350 1351 1352 1353 1354
	if (bp->rx_ring) {
		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
				  bp->rx_ring, bp->rx_ring_dma);
		bp->rx_ring = NULL;
	}
1355 1356 1357 1358 1359 1360 1361 1362 1363

	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		kfree(queue->tx_skb);
		queue->tx_skb = NULL;
		if (queue->tx_ring) {
			dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
					  queue->tx_ring, queue->tx_ring_dma);
			queue->tx_ring = NULL;
		}
1364
	}
N
Nicolas Ferre 已提交
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
}

static int gem_alloc_rx_buffers(struct macb *bp)
{
	int size;

	size = RX_RING_SIZE * sizeof(struct sk_buff *);
	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
	if (!bp->rx_skbuff)
		return -ENOMEM;
	else
		netdev_dbg(bp->dev,
			   "Allocated %d RX struct sk_buff entries at %p\n",
			   RX_RING_SIZE, bp->rx_skbuff);
	return 0;
}

static int macb_alloc_rx_buffers(struct macb *bp)
{
	int size;

	size = RX_RING_SIZE * bp->rx_buffer_size;
	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
					    &bp->rx_buffers_dma, GFP_KERNEL);
	if (!bp->rx_buffers)
		return -ENOMEM;
	else
		netdev_dbg(bp->dev,
			   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
			   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
	return 0;
1396 1397 1398 1399
}

static int macb_alloc_consistent(struct macb *bp)
{
1400 1401
	struct macb_queue *queue;
	unsigned int q;
1402 1403
	int size;

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		size = TX_RING_BYTES;
		queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
						    &queue->tx_ring_dma,
						    GFP_KERNEL);
		if (!queue->tx_ring)
			goto out_err;
		netdev_dbg(bp->dev,
			   "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
			   q, size, (unsigned long)queue->tx_ring_dma,
			   queue->tx_ring);

		size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
		queue->tx_skb = kmalloc(size, GFP_KERNEL);
		if (!queue->tx_skb)
			goto out_err;
	}
1421 1422 1423 1424 1425 1426

	size = RX_RING_BYTES;
	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
					 &bp->rx_ring_dma, GFP_KERNEL);
	if (!bp->rx_ring)
		goto out_err;
1427 1428 1429
	netdev_dbg(bp->dev,
		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1430

N
Nicolas Ferre 已提交
1431
	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1432 1433 1434 1435 1436 1437 1438 1439 1440
		goto out_err;

	return 0;

out_err:
	macb_free_consistent(bp);
	return -ENOMEM;
}

N
Nicolas Ferre 已提交
1441 1442
static void gem_init_rings(struct macb *bp)
{
1443 1444
	struct macb_queue *queue;
	unsigned int q;
N
Nicolas Ferre 已提交
1445 1446
	int i;

1447 1448 1449 1450 1451 1452 1453 1454
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		for (i = 0; i < TX_RING_SIZE; i++) {
			queue->tx_ring[i].addr = 0;
			queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
		}
		queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
		queue->tx_head = 0;
		queue->tx_tail = 0;
N
Nicolas Ferre 已提交
1455 1456
	}

1457 1458
	bp->rx_tail = 0;
	bp->rx_prepared_head = 0;
N
Nicolas Ferre 已提交
1459 1460 1461 1462

	gem_rx_refill(bp);
}

1463 1464 1465 1466 1467 1468 1469 1470 1471
static void macb_init_rings(struct macb *bp)
{
	int i;
	dma_addr_t addr;

	addr = bp->rx_buffers_dma;
	for (i = 0; i < RX_RING_SIZE; i++) {
		bp->rx_ring[i].addr = addr;
		bp->rx_ring[i].ctrl = 0;
1472
		addr += bp->rx_buffer_size;
1473 1474 1475 1476
	}
	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);

	for (i = 0; i < TX_RING_SIZE; i++) {
1477 1478
		bp->queues[0].tx_ring[i].addr = 0;
		bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
1479
	}
1480 1481
	bp->queues[0].tx_head = 0;
	bp->queues[0].tx_tail = 0;
1482
	bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1483

1484
	bp->rx_tail = 0;
1485 1486 1487 1488
}

static void macb_reset_hw(struct macb *bp)
{
1489 1490 1491
	struct macb_queue *queue;
	unsigned int q;

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
	/*
	 * Disable RX and TX (XXX: Should we halt the transmission
	 * more gracefully?)
	 */
	macb_writel(bp, NCR, 0);

	/* Clear the stats registers (XXX: Update stats first?) */
	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));

	/* Clear all status flags */
J
Joachim Eastwood 已提交
1502 1503
	macb_writel(bp, TSR, -1);
	macb_writel(bp, RSR, -1);
1504 1505

	/* Disable all interrupts */
1506 1507 1508 1509
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, IDR, -1);
		queue_readl(queue, ISR);
	}
1510 1511
}

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
static u32 gem_mdc_clk_div(struct macb *bp)
{
	u32 config;
	unsigned long pclk_hz = clk_get_rate(bp->pclk);

	if (pclk_hz <= 20000000)
		config = GEM_BF(CLK, GEM_CLK_DIV8);
	else if (pclk_hz <= 40000000)
		config = GEM_BF(CLK, GEM_CLK_DIV16);
	else if (pclk_hz <= 80000000)
		config = GEM_BF(CLK, GEM_CLK_DIV32);
	else if (pclk_hz <= 120000000)
		config = GEM_BF(CLK, GEM_CLK_DIV48);
	else if (pclk_hz <= 160000000)
		config = GEM_BF(CLK, GEM_CLK_DIV64);
	else
		config = GEM_BF(CLK, GEM_CLK_DIV96);

	return config;
}

static u32 macb_mdc_clk_div(struct macb *bp)
{
	u32 config;
	unsigned long pclk_hz;

	if (macb_is_gem(bp))
		return gem_mdc_clk_div(bp);

	pclk_hz = clk_get_rate(bp->pclk);
	if (pclk_hz <= 20000000)
		config = MACB_BF(CLK, MACB_CLK_DIV8);
	else if (pclk_hz <= 40000000)
		config = MACB_BF(CLK, MACB_CLK_DIV16);
	else if (pclk_hz <= 80000000)
		config = MACB_BF(CLK, MACB_CLK_DIV32);
	else
		config = MACB_BF(CLK, MACB_CLK_DIV64);

	return config;
}

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
/*
 * Get the DMA bus width field of the network configuration register that we
 * should program.  We find the width from decoding the design configuration
 * register to find the maximum supported data bus width.
 */
static u32 macb_dbw(struct macb *bp)
{
	if (!macb_is_gem(bp))
		return 0;

	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
	case 4:
		return GEM_BF(DBW, GEM_DBW128);
	case 2:
		return GEM_BF(DBW, GEM_DBW64);
	case 1:
	default:
		return GEM_BF(DBW, GEM_DBW32);
	}
}

1575
/*
1576 1577
 * Configure the receive DMA engine
 * - use the correct receive buffer size
1578
 * - set best burst length for DMA operations
1579 1580 1581
 *   (if not supported by FIFO, it will fallback to default)
 * - set both rx/tx packet buffers to full memory size
 * These are configurable parameters for GEM.
1582 1583 1584 1585
 */
static void macb_configure_dma(struct macb *bp)
{
	u32 dmacfg;
1586
	u32 tmp, ncr;
1587 1588 1589

	if (macb_is_gem(bp)) {
		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1590
		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1591 1592
		if (bp->dma_burst_length)
			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1593
		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1594
		dmacfg &= ~GEM_BIT(ENDIA_PKT);
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611

		/* Find the CPU endianness by using the loopback bit of net_ctrl
		 * register. save it first. When the CPU is in big endian we
		 * need to program swaped mode for management descriptor access.
		 */
		ncr = macb_readl(bp, NCR);
		__raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
		tmp =  __raw_readl(bp->regs + MACB_NCR);

		if (tmp == MACB_BIT(LLB))
			dmacfg &= ~GEM_BIT(ENDIA_DESC);
		else
			dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */

		/* Restore net_ctrl */
		macb_writel(bp, NCR, ncr);

1612 1613 1614 1615
		if (bp->dev->features & NETIF_F_HW_CSUM)
			dmacfg |= GEM_BIT(TXCOEN);
		else
			dmacfg &= ~GEM_BIT(TXCOEN);
1616 1617
		netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
			   dmacfg);
1618 1619 1620 1621
		gem_writel(bp, DMACFG, dmacfg);
	}
}

1622 1623
static void macb_init_hw(struct macb *bp)
{
1624 1625 1626
	struct macb_queue *queue;
	unsigned int q;

1627 1628 1629
	u32 config;

	macb_reset_hw(bp);
1630
	macb_set_hwaddr(bp);
1631

1632
	config = macb_mdc_clk_div(bp);
1633
	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
1634 1635
	config |= MACB_BIT(PAE);		/* PAuse Enable */
	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
1636
	config |= MACB_BIT(BIG);		/* Receive oversized frames */
1637 1638
	if (bp->dev->flags & IFF_PROMISC)
		config |= MACB_BIT(CAF);	/* Copy All Frames */
1639 1640
	else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
		config |= GEM_BIT(RXCOEN);
1641 1642
	if (!(bp->dev->flags & IFF_BROADCAST))
		config |= MACB_BIT(NBC);	/* No BroadCast */
1643
	config |= macb_dbw(bp);
1644
	macb_writel(bp, NCFGR, config);
1645 1646
	bp->speed = SPEED_10;
	bp->duplex = DUPLEX_HALF;
1647

1648 1649
	macb_configure_dma(bp);

1650 1651
	/* Initialize TX and RX buffers */
	macb_writel(bp, RBQP, bp->rx_ring_dma);
1652 1653 1654 1655 1656 1657 1658 1659 1660
	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
		queue_writel(queue, TBQP, queue->tx_ring_dma);

		/* Enable interrupts */
		queue_writel(queue, IER,
			     MACB_RX_INT_FLAGS |
			     MACB_TX_INT_FLAGS |
			     MACB_BIT(HRESP));
	}
1661 1662

	/* Enable TX and RX */
F
frederic RODO 已提交
1663
	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1664 1665
}

P
Patrice Vilchez 已提交
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
/*
 * The hash address register is 64 bits long and takes up two
 * locations in the memory map.  The least significant bits are stored
 * in EMAC_HSL and the most significant bits in EMAC_HSH.
 *
 * The unicast hash enable and the multicast hash enable bits in the
 * network configuration register enable the reception of hash matched
 * frames. The destination address is reduced to a 6 bit index into
 * the 64 bit hash register using the following hash function.  The
 * hash function is an exclusive or of every sixth bit of the
 * destination address.
 *
 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
 *
 * da[0] represents the least significant bit of the first byte
 * received, that is, the multicast/unicast indicator, and da[47]
 * represents the most significant bit of the last byte received.  If
 * the hash index, hi[n], points to a bit that is set in the hash
 * register then the frame will be matched according to whether the
 * frame is multicast or unicast.  A multicast match will be signalled
 * if the multicast hash enable bit is set, da[0] is 1 and the hash
 * index points to a bit set in the hash register.  A unicast match
 * will be signalled if the unicast hash enable bit is set, da[0] is 0
 * and the hash index points to a bit set in the hash register.  To
 * receive all multicast frames, the hash register should be set with
 * all ones and the multicast hash enable bit should be set in the
 * network configuration register.
 */

static inline int hash_bit_value(int bitnr, __u8 *addr)
{
	if (addr[bitnr / 8] & (1 << (bitnr % 8)))
		return 1;
	return 0;
}

/*
 * Return the hash index value for the specified address.
 */
static int hash_get_index(__u8 *addr)
{
	int i, j, bitval;
	int hash_index = 0;

	for (j = 0; j < 6; j++) {
		for (i = 0, bitval = 0; i < 8; i++)
1717
			bitval ^= hash_bit_value(i * 6 + j, addr);
P
Patrice Vilchez 已提交
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729

		hash_index |= (bitval << j);
	}

	return hash_index;
}

/*
 * Add multicast addresses to the internal multicast-hash table.
 */
static void macb_sethashtable(struct net_device *dev)
{
1730
	struct netdev_hw_addr *ha;
P
Patrice Vilchez 已提交
1731
	unsigned long mc_filter[2];
1732
	unsigned int bitnr;
P
Patrice Vilchez 已提交
1733 1734 1735 1736
	struct macb *bp = netdev_priv(dev);

	mc_filter[0] = mc_filter[1] = 0;

1737 1738
	netdev_for_each_mc_addr(ha, dev) {
		bitnr = hash_get_index(ha->addr);
P
Patrice Vilchez 已提交
1739 1740 1741
		mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
	}

J
Jamie Iles 已提交
1742 1743
	macb_or_gem_writel(bp, HRB, mc_filter[0]);
	macb_or_gem_writel(bp, HRT, mc_filter[1]);
P
Patrice Vilchez 已提交
1744 1745 1746 1747 1748
}

/*
 * Enable/Disable promiscuous and multicast modes.
 */
1749
static void macb_set_rx_mode(struct net_device *dev)
P
Patrice Vilchez 已提交
1750 1751 1752 1753 1754 1755
{
	unsigned long cfg;
	struct macb *bp = netdev_priv(dev);

	cfg = macb_readl(bp, NCFGR);

1756
	if (dev->flags & IFF_PROMISC) {
P
Patrice Vilchez 已提交
1757 1758
		/* Enable promiscuous mode */
		cfg |= MACB_BIT(CAF);
1759 1760 1761 1762 1763 1764

		/* Disable RX checksum offload */
		if (macb_is_gem(bp))
			cfg &= ~GEM_BIT(RXCOEN);
	} else {
		/* Disable promiscuous mode */
P
Patrice Vilchez 已提交
1765 1766
		cfg &= ~MACB_BIT(CAF);

1767 1768 1769 1770 1771
		/* Enable RX checksum offload only if requested */
		if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
			cfg |= GEM_BIT(RXCOEN);
	}

P
Patrice Vilchez 已提交
1772 1773
	if (dev->flags & IFF_ALLMULTI) {
		/* Enable all multicast mode */
J
Jamie Iles 已提交
1774 1775
		macb_or_gem_writel(bp, HRB, -1);
		macb_or_gem_writel(bp, HRT, -1);
P
Patrice Vilchez 已提交
1776
		cfg |= MACB_BIT(NCFGR_MTI);
1777
	} else if (!netdev_mc_empty(dev)) {
P
Patrice Vilchez 已提交
1778 1779 1780 1781 1782
		/* Enable specific multicasts */
		macb_sethashtable(dev);
		cfg |= MACB_BIT(NCFGR_MTI);
	} else if (dev->flags & (~IFF_ALLMULTI)) {
		/* Disable all multicast mode */
J
Jamie Iles 已提交
1783 1784
		macb_or_gem_writel(bp, HRB, 0);
		macb_or_gem_writel(bp, HRT, 0);
P
Patrice Vilchez 已提交
1785 1786 1787 1788 1789 1790
		cfg &= ~MACB_BIT(NCFGR_MTI);
	}

	macb_writel(bp, NCFGR, cfg);
}

1791 1792 1793
static int macb_open(struct net_device *dev)
{
	struct macb *bp = netdev_priv(dev);
N
Nicolas Ferre 已提交
1794
	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1795 1796
	int err;

1797
	netdev_dbg(bp->dev, "open\n");
1798

1799 1800 1801
	/* carrier starts down */
	netif_carrier_off(dev);

F
frederic RODO 已提交
1802 1803 1804
	/* if the phy is not yet register, retry later*/
	if (!bp->phy_dev)
		return -EAGAIN;
1805 1806

	/* RX buffers initialization */
N
Nicolas Ferre 已提交
1807
	macb_init_rx_buffer_size(bp, bufsz);
F
frederic RODO 已提交
1808

1809 1810
	err = macb_alloc_consistent(bp);
	if (err) {
1811 1812
		netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
			   err);
1813 1814 1815
		return err;
	}

1816 1817
	napi_enable(&bp->napi);

N
Nicolas Ferre 已提交
1818
	bp->macbgem_ops.mog_init_rings(bp);
1819 1820
	macb_init_hw(bp);

F
frederic RODO 已提交
1821 1822
	/* schedule a link state check */
	phy_start(bp->phy_dev);
1823

1824
	netif_tx_start_all_queues(dev);
1825 1826 1827 1828 1829 1830 1831 1832 1833

	return 0;
}

static int macb_close(struct net_device *dev)
{
	struct macb *bp = netdev_priv(dev);
	unsigned long flags;

1834
	netif_tx_stop_all_queues(dev);
1835
	napi_disable(&bp->napi);
1836

F
frederic RODO 已提交
1837 1838 1839
	if (bp->phy_dev)
		phy_stop(bp->phy_dev);

1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	spin_lock_irqsave(&bp->lock, flags);
	macb_reset_hw(bp);
	netif_carrier_off(dev);
	spin_unlock_irqrestore(&bp->lock, flags);

	macb_free_consistent(bp);

	return 0;
}

1850 1851
static void gem_update_stats(struct macb *bp)
{
1852
	int i;
1853 1854
	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;

1855 1856
	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
		u32 offset = gem_statistics[i].offset;
1857
		u64 val = readl_relaxed(bp->regs + offset);
1858 1859 1860 1861 1862 1863

		bp->ethtool_stats[i] += val;
		*p += val;

		if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
			/* Add GEM_OCTTXH, GEM_OCTRXH */
1864
			val = readl_relaxed(bp->regs + offset + 4);
1865
			bp->ethtool_stats[i] += ((u64)val) << 32;
1866 1867 1868
			*(++p) += val;
		}
	}
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
}

static struct net_device_stats *gem_get_stats(struct macb *bp)
{
	struct gem_stats *hwstat = &bp->hw_stats.gem;
	struct net_device_stats *nstat = &bp->stats;

	gem_update_stats(bp);

	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
			    hwstat->rx_alignment_errors +
			    hwstat->rx_resource_errors +
			    hwstat->rx_overruns +
			    hwstat->rx_oversize_frames +
			    hwstat->rx_jabbers +
			    hwstat->rx_undersized_frames +
			    hwstat->rx_length_field_frame_errors);
	nstat->tx_errors = (hwstat->tx_late_collisions +
			    hwstat->tx_excessive_collisions +
			    hwstat->tx_underrun +
			    hwstat->tx_carrier_sense_errors);
	nstat->multicast = hwstat->rx_multicast_frames;
	nstat->collisions = (hwstat->tx_single_collision_frames +
			     hwstat->tx_multiple_collision_frames +
			     hwstat->tx_excessive_collisions);
	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
				   hwstat->rx_jabbers +
				   hwstat->rx_undersized_frames +
				   hwstat->rx_length_field_frame_errors);
	nstat->rx_over_errors = hwstat->rx_resource_errors;
	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
	nstat->rx_fifo_errors = hwstat->rx_overruns;
	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
	nstat->tx_fifo_errors = hwstat->tx_underrun;

	return nstat;
}

1909 1910 1911 1912 1913 1914 1915
static void gem_get_ethtool_stats(struct net_device *dev,
				  struct ethtool_stats *stats, u64 *data)
{
	struct macb *bp;

	bp = netdev_priv(dev);
	gem_update_stats(bp);
1916
	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
}

static int gem_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return GEM_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
}

static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{
	int i;

	switch (sset) {
	case ETH_SS_STATS:
		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
			memcpy(p, gem_statistics[i].stat_string,
			       ETH_GSTRING_LEN);
		break;
	}
}

1942
static struct net_device_stats *macb_get_stats(struct net_device *dev)
1943 1944 1945
{
	struct macb *bp = netdev_priv(dev);
	struct net_device_stats *nstat = &bp->stats;
1946 1947 1948 1949
	struct macb_stats *hwstat = &bp->hw_stats.macb;

	if (macb_is_gem(bp))
		return gem_get_stats(bp);
1950

F
frederic RODO 已提交
1951 1952 1953
	/* read stats from hardware */
	macb_update_stats(bp);

1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
	/* Convert HW stats into netdevice stats */
	nstat->rx_errors = (hwstat->rx_fcs_errors +
			    hwstat->rx_align_errors +
			    hwstat->rx_resource_errors +
			    hwstat->rx_overruns +
			    hwstat->rx_oversize_pkts +
			    hwstat->rx_jabbers +
			    hwstat->rx_undersize_pkts +
			    hwstat->rx_length_mismatch);
	nstat->tx_errors = (hwstat->tx_late_cols +
			    hwstat->tx_excessive_cols +
			    hwstat->tx_underruns +
1966 1967
			    hwstat->tx_carrier_errors +
			    hwstat->sqe_test_errors);
1968 1969 1970 1971 1972 1973 1974
	nstat->collisions = (hwstat->tx_single_cols +
			     hwstat->tx_multiple_cols +
			     hwstat->tx_excessive_cols);
	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
				   hwstat->rx_jabbers +
				   hwstat->rx_undersize_pkts +
				   hwstat->rx_length_mismatch);
A
Alexander Stein 已提交
1975 1976
	nstat->rx_over_errors = hwstat->rx_resource_errors +
				   hwstat->rx_overruns;
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
	nstat->rx_frame_errors = hwstat->rx_align_errors;
	nstat->rx_fifo_errors = hwstat->rx_overruns;
	/* XXX: What does "missed" mean? */
	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
	nstat->tx_fifo_errors = hwstat->tx_underruns;
	/* Don't know about heartbeat or window errors... */

	return nstat;
}

static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct macb *bp = netdev_priv(dev);
F
frederic RODO 已提交
1992 1993 1994 1995
	struct phy_device *phydev = bp->phy_dev;

	if (!phydev)
		return -ENODEV;
1996

F
frederic RODO 已提交
1997
	return phy_ethtool_gset(phydev, cmd);
1998 1999 2000 2001 2002
}

static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct macb *bp = netdev_priv(dev);
F
frederic RODO 已提交
2003
	struct phy_device *phydev = bp->phy_dev;
2004

F
frederic RODO 已提交
2005 2006 2007 2008
	if (!phydev)
		return -ENODEV;

	return phy_ethtool_sset(phydev, cmd);
2009 2010
}

2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
static int macb_get_regs_len(struct net_device *netdev)
{
	return MACB_GREGS_NBR * sizeof(u32);
}

static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
			  void *p)
{
	struct macb *bp = netdev_priv(dev);
	unsigned int tail, head;
	u32 *regs_buff = p;

	regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
			| MACB_GREGS_VERSION;

2026 2027
	tail = macb_tx_ring_wrap(bp->queues[0].tx_tail);
	head = macb_tx_ring_wrap(bp->queues[0].tx_head);
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039

	regs_buff[0]  = macb_readl(bp, NCR);
	regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
	regs_buff[2]  = macb_readl(bp, NSR);
	regs_buff[3]  = macb_readl(bp, TSR);
	regs_buff[4]  = macb_readl(bp, RBQP);
	regs_buff[5]  = macb_readl(bp, TBQP);
	regs_buff[6]  = macb_readl(bp, RSR);
	regs_buff[7]  = macb_readl(bp, IMR);

	regs_buff[8]  = tail;
	regs_buff[9]  = head;
2040 2041
	regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
	regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2042

2043
	regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2044 2045 2046 2047 2048
	if (macb_is_gem(bp)) {
		regs_buff[13] = gem_readl(bp, DMACFG);
	}
}

2049
static const struct ethtool_ops macb_ethtool_ops = {
2050 2051
	.get_settings		= macb_get_settings,
	.set_settings		= macb_set_settings,
2052 2053
	.get_regs_len		= macb_get_regs_len,
	.get_regs		= macb_get_regs,
2054
	.get_link		= ethtool_op_get_link,
2055
	.get_ts_info		= ethtool_op_get_ts_info,
2056 2057
};

L
Lad, Prabhakar 已提交
2058
static const struct ethtool_ops gem_ethtool_ops = {
2059 2060 2061 2062 2063 2064
	.get_settings		= macb_get_settings,
	.set_settings		= macb_set_settings,
	.get_regs_len		= macb_get_regs_len,
	.get_regs		= macb_get_regs,
	.get_link		= ethtool_op_get_link,
	.get_ts_info		= ethtool_op_get_ts_info,
2065 2066 2067
	.get_ethtool_stats	= gem_get_ethtool_stats,
	.get_strings		= gem_get_ethtool_strings,
	.get_sset_count		= gem_get_sset_count,
2068 2069
};

2070
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2071 2072
{
	struct macb *bp = netdev_priv(dev);
F
frederic RODO 已提交
2073
	struct phy_device *phydev = bp->phy_dev;
2074 2075 2076 2077

	if (!netif_running(dev))
		return -EINVAL;

F
frederic RODO 已提交
2078 2079
	if (!phydev)
		return -ENODEV;
2080

2081
	return phy_mii_ioctl(phydev, rq, cmd);
2082 2083
}

2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
static int macb_set_features(struct net_device *netdev,
			     netdev_features_t features)
{
	struct macb *bp = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;

	/* TX checksum offload */
	if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
		u32 dmacfg;

		dmacfg = gem_readl(bp, DMACFG);
		if (features & NETIF_F_HW_CSUM)
			dmacfg |= GEM_BIT(TXCOEN);
		else
			dmacfg &= ~GEM_BIT(TXCOEN);
		gem_writel(bp, DMACFG, dmacfg);
	}

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
	/* RX checksum offload */
	if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
		u32 netcfg;

		netcfg = gem_readl(bp, NCFGR);
		if (features & NETIF_F_RXCSUM &&
		    !(netdev->flags & IFF_PROMISC))
			netcfg |= GEM_BIT(RXCOEN);
		else
			netcfg &= ~GEM_BIT(RXCOEN);
		gem_writel(bp, NCFGR, netcfg);
	}

2115 2116 2117
	return 0;
}

2118 2119 2120 2121
static const struct net_device_ops macb_netdev_ops = {
	.ndo_open		= macb_open,
	.ndo_stop		= macb_close,
	.ndo_start_xmit		= macb_start_xmit,
2122
	.ndo_set_rx_mode	= macb_set_rx_mode,
2123 2124 2125 2126 2127
	.ndo_get_stats		= macb_get_stats,
	.ndo_do_ioctl		= macb_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_change_mtu		= eth_change_mtu,
	.ndo_set_mac_address	= eth_mac_addr,
2128 2129 2130
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= macb_poll_controller,
#endif
2131
	.ndo_set_features	= macb_set_features,
2132 2133
};

2134
/*
2135
 * Configure peripheral capabilities according to device tree
2136 2137
 * and integration options used
 */
2138
static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
2139 2140 2141
{
	u32 dcfg;

2142 2143 2144
	if (dt_conf)
		bp->caps = dt_conf->caps;

2145
	if (macb_is_gem_hw(bp->regs)) {
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
		bp->caps |= MACB_CAPS_MACB_IS_GEM;

		dcfg = gem_readl(bp, DCFG1);
		if (GEM_BFEXT(IRQCOR, dcfg) == 0)
			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
		dcfg = gem_readl(bp, DCFG2);
		if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
			bp->caps |= MACB_CAPS_FIFO_MODE;
	}

	netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
}

2159 2160 2161 2162 2163 2164 2165 2166 2167
static void macb_probe_queues(void __iomem *mem,
			      unsigned int *queue_mask,
			      unsigned int *num_queues)
{
	unsigned int hw_q;

	*queue_mask = 0x1;
	*num_queues = 1;

2168 2169 2170 2171 2172 2173
	/* is it macb or gem ?
	 *
	 * We need to read directly from the hardware here because
	 * we are early in the probe process and don't have the
	 * MACB_CAPS_MACB_IS_GEM flag positioned
	 */
2174
	if (!macb_is_gem_hw(mem))
2175 2176 2177
		return;

	/* bit 0 is never set but queue 0 always exists */
2178 2179
	*queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;

2180 2181 2182 2183 2184 2185 2186
	*queue_mask |= 0x1;

	for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
		if (*queue_mask & (1 << hw_q))
			(*num_queues)++;
}

2187 2188
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
			 struct clk **hclk, struct clk **tx_clk)
2189
{
2190
	int err;
2191

2192 2193 2194
	*pclk = devm_clk_get(&pdev->dev, "pclk");
	if (IS_ERR(*pclk)) {
		err = PTR_ERR(*pclk);
2195
		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2196
		return err;
A
Andrew Victor 已提交
2197
	}
J
Jamie Iles 已提交
2198

2199 2200 2201
	*hclk = devm_clk_get(&pdev->dev, "hclk");
	if (IS_ERR(*hclk)) {
		err = PTR_ERR(*hclk);
2202
		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2203
		return err;
2204 2205
	}

2206 2207 2208
	*tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
	if (IS_ERR(*tx_clk))
		*tx_clk = NULL;
2209

2210
	err = clk_prepare_enable(*pclk);
2211 2212
	if (err) {
		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2213
		return err;
2214 2215
	}

2216
	err = clk_prepare_enable(*hclk);
2217 2218
	if (err) {
		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2219
		goto err_disable_pclk;
2220 2221
	}

2222
	err = clk_prepare_enable(*tx_clk);
2223 2224
	if (err) {
		dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2225
		goto err_disable_hclk;
2226 2227
	}

2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
	return 0;

err_disable_hclk:
	clk_disable_unprepare(*hclk);

err_disable_pclk:
	clk_disable_unprepare(*pclk);

	return err;
}

static int macb_init(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	unsigned int hw_q, q;
	struct macb *bp = netdev_priv(dev);
	struct macb_queue *queue;
	int err;
	u32 val;

2248 2249 2250 2251
	/* set the queue register mapping once for all: queue0 has a special
	 * register mapping but we don't want to test the queue index then
	 * compute the corresponding register offset at run time.
	 */
2252
	for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2253
		if (!(bp->queue_mask & (1 << hw_q)))
2254 2255
			continue;

2256
		queue = &bp->queues[q];
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277
		queue->bp = bp;
		if (hw_q) {
			queue->ISR  = GEM_ISR(hw_q - 1);
			queue->IER  = GEM_IER(hw_q - 1);
			queue->IDR  = GEM_IDR(hw_q - 1);
			queue->IMR  = GEM_IMR(hw_q - 1);
			queue->TBQP = GEM_TBQP(hw_q - 1);
		} else {
			/* queue0 uses legacy registers */
			queue->ISR  = MACB_ISR;
			queue->IER  = MACB_IER;
			queue->IDR  = MACB_IDR;
			queue->IMR  = MACB_IMR;
			queue->TBQP = MACB_TBQP;
		}

		/* get irq: here we use the linux queue index, not the hardware
		 * queue index. the queue irq definitions in the device tree
		 * must remove the optional gaps that could exist in the
		 * hardware queue mask.
		 */
2278
		queue->irq = platform_get_irq(pdev, q);
2279
		err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2280
				       IRQF_SHARED, dev->name, queue);
2281 2282 2283 2284
		if (err) {
			dev_err(&pdev->dev,
				"Unable to request IRQ %d (error %d)\n",
				queue->irq, err);
2285
			return err;
2286 2287 2288
		}

		INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2289
		q++;
2290 2291
	}

2292
	dev->netdev_ops = &macb_netdev_ops;
2293
	netif_napi_add(dev, &bp->napi, macb_poll, 64);
2294

N
Nicolas Ferre 已提交
2295 2296
	/* setup appropriated routines according to adapter type */
	if (macb_is_gem(bp)) {
2297
		bp->max_tx_length = GEM_MAX_TX_LEN;
N
Nicolas Ferre 已提交
2298 2299 2300 2301
		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
		bp->macbgem_ops.mog_init_rings = gem_init_rings;
		bp->macbgem_ops.mog_rx = gem_rx;
2302
		dev->ethtool_ops = &gem_ethtool_ops;
N
Nicolas Ferre 已提交
2303
	} else {
2304
		bp->max_tx_length = MACB_MAX_TX_LEN;
N
Nicolas Ferre 已提交
2305 2306 2307 2308
		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
		bp->macbgem_ops.mog_init_rings = macb_init_rings;
		bp->macbgem_ops.mog_rx = macb_rx;
2309
		dev->ethtool_ops = &macb_ethtool_ops;
N
Nicolas Ferre 已提交
2310 2311
	}

2312 2313
	/* Set features */
	dev->hw_features = NETIF_F_SG;
2314 2315
	/* Checksum offload is only available on gem with packet buffer */
	if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2316
		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2317 2318 2319 2320
	if (bp->caps & MACB_CAPS_SG_DISABLED)
		dev->hw_features &= ~NETIF_F_SG;
	dev->features = dev->hw_features;

2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
	val = 0;
	if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
		val = GEM_BIT(RGMII);
	else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
		 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
		val = MACB_BIT(RMII);
	else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
		val = MACB_BIT(MII);

	if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
		val |= MACB_BIT(CLKEN);

	macb_or_gem_writel(bp, USRIO, val);

2335
	/* Set MII management clock divider */
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486
	val = macb_mdc_clk_div(bp);
	val |= macb_dbw(bp);
	macb_writel(bp, NCFGR, val);

	return 0;
}

#if defined(CONFIG_OF)
/* 1518 rounded up */
#define AT91ETHER_MAX_RBUFF_SZ	0x600
/* max number of receive buffers */
#define AT91ETHER_MAX_RX_DESCR	9

/* Initialize and start the Receiver and Transmit subsystems */
static int at91ether_start(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	dma_addr_t addr;
	u32 ctl;
	int i;

	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
					 (AT91ETHER_MAX_RX_DESCR *
					  sizeof(struct macb_dma_desc)),
					 &lp->rx_ring_dma, GFP_KERNEL);
	if (!lp->rx_ring)
		return -ENOMEM;

	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
					    AT91ETHER_MAX_RX_DESCR *
					    AT91ETHER_MAX_RBUFF_SZ,
					    &lp->rx_buffers_dma, GFP_KERNEL);
	if (!lp->rx_buffers) {
		dma_free_coherent(&lp->pdev->dev,
				  AT91ETHER_MAX_RX_DESCR *
				  sizeof(struct macb_dma_desc),
				  lp->rx_ring, lp->rx_ring_dma);
		lp->rx_ring = NULL;
		return -ENOMEM;
	}

	addr = lp->rx_buffers_dma;
	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
		lp->rx_ring[i].addr = addr;
		lp->rx_ring[i].ctrl = 0;
		addr += AT91ETHER_MAX_RBUFF_SZ;
	}

	/* Set the Wrap bit on the last descriptor */
	lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);

	/* Reset buffer index */
	lp->rx_tail = 0;

	/* Program address of descriptor list in Rx Buffer Queue register */
	macb_writel(lp, RBQP, lp->rx_ring_dma);

	/* Enable Receive and Transmit */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));

	return 0;
}

/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	u32 ctl;
	int ret;

	/* Clear internal statistics */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));

	macb_set_hwaddr(lp);

	ret = at91ether_start(dev);
	if (ret)
		return ret;

	/* Enable MAC interrupts */
	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
			     MACB_BIT(RXUBR)	|
			     MACB_BIT(ISR_TUND)	|
			     MACB_BIT(ISR_RLE)	|
			     MACB_BIT(TCOMP)	|
			     MACB_BIT(ISR_ROVR)	|
			     MACB_BIT(HRESP));

	/* schedule a link state check */
	phy_start(lp->phy_dev);

	netif_start_queue(dev);

	return 0;
}

/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	u32 ctl;

	/* Disable Receiver and Transmitter */
	ctl = macb_readl(lp, NCR);
	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));

	/* Disable MAC interrupts */
	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
			     MACB_BIT(RXUBR)	|
			     MACB_BIT(ISR_TUND)	|
			     MACB_BIT(ISR_RLE)	|
			     MACB_BIT(TCOMP)	|
			     MACB_BIT(ISR_ROVR) |
			     MACB_BIT(HRESP));

	netif_stop_queue(dev);

	dma_free_coherent(&lp->pdev->dev,
			  AT91ETHER_MAX_RX_DESCR *
			  sizeof(struct macb_dma_desc),
			  lp->rx_ring, lp->rx_ring_dma);
	lp->rx_ring = NULL;

	dma_free_coherent(&lp->pdev->dev,
			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
			  lp->rx_buffers, lp->rx_buffers_dma);
	lp->rx_buffers = NULL;

	return 0;
}

/* Transmit packet */
static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);

	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
		netif_stop_queue(dev);

		/* Store packet information (to free when Tx completed) */
		lp->skb = skb;
		lp->skb_length = skb->len;
		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
							DMA_TO_DEVICE);

		/* Set address of the data in the Transmit Address register */
		macb_writel(lp, TAR, lp->skb_physaddr);
		/* Set length of the packet in the Transmit Control register */
		macb_writel(lp, TCR, skb->len);
2487

2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607
	} else {
		netdev_err(dev, "%s called, but device is busy!\n", __func__);
		return NETDEV_TX_BUSY;
	}

	return NETDEV_TX_OK;
}

/* Extract received frame from buffer descriptors and sent to upper layers.
 * (Called from interrupt context)
 */
static void at91ether_rx(struct net_device *dev)
{
	struct macb *lp = netdev_priv(dev);
	unsigned char *p_recv;
	struct sk_buff *skb;
	unsigned int pktlen;

	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
		skb = netdev_alloc_skb(dev, pktlen + 2);
		if (skb) {
			skb_reserve(skb, 2);
			memcpy(skb_put(skb, pktlen), p_recv, pktlen);

			skb->protocol = eth_type_trans(skb, dev);
			lp->stats.rx_packets++;
			lp->stats.rx_bytes += pktlen;
			netif_rx(skb);
		} else {
			lp->stats.rx_dropped++;
		}

		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
			lp->stats.multicast++;

		/* reset ownership bit */
		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);

		/* wrap after last buffer */
		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
			lp->rx_tail = 0;
		else
			lp->rx_tail++;
	}
}

/* MAC interrupt handler */
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct macb *lp = netdev_priv(dev);
	u32 intstatus, ctl;

	/* MAC Interrupt Status register indicates what interrupts are pending.
	 * It is automatically cleared once read.
	 */
	intstatus = macb_readl(lp, ISR);

	/* Receive complete */
	if (intstatus & MACB_BIT(RCOMP))
		at91ether_rx(dev);

	/* Transmit complete */
	if (intstatus & MACB_BIT(TCOMP)) {
		/* The TCOM bit is set even if the transmission failed */
		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
			lp->stats.tx_errors++;

		if (lp->skb) {
			dev_kfree_skb_irq(lp->skb);
			lp->skb = NULL;
			dma_unmap_single(NULL, lp->skb_physaddr,
					 lp->skb_length, DMA_TO_DEVICE);
			lp->stats.tx_packets++;
			lp->stats.tx_bytes += lp->skb_length;
		}
		netif_wake_queue(dev);
	}

	/* Work-around for EMAC Errata section 41.3.1 */
	if (intstatus & MACB_BIT(RXUBR)) {
		ctl = macb_readl(lp, NCR);
		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
	}

	if (intstatus & MACB_BIT(ISR_ROVR))
		netdev_err(dev, "ROVR error\n");

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void at91ether_poll_controller(struct net_device *dev)
{
	unsigned long flags;

	local_irq_save(flags);
	at91ether_interrupt(dev->irq, dev);
	local_irq_restore(flags);
}
#endif

static const struct net_device_ops at91ether_netdev_ops = {
	.ndo_open		= at91ether_open,
	.ndo_stop		= at91ether_close,
	.ndo_start_xmit		= at91ether_start_xmit,
	.ndo_get_stats		= macb_get_stats,
	.ndo_set_rx_mode	= macb_set_rx_mode,
	.ndo_set_mac_address	= eth_mac_addr,
	.ndo_do_ioctl		= macb_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_change_mtu		= eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= at91ether_poll_controller,
#endif
};

2608 2609
static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
			      struct clk **hclk, struct clk **tx_clk)
2610 2611 2612
{
	int err;

2613 2614 2615 2616 2617 2618
	*hclk = NULL;
	*tx_clk = NULL;

	*pclk = devm_clk_get(&pdev->dev, "ether_clk");
	if (IS_ERR(*pclk))
		return PTR_ERR(*pclk);
2619

2620
	err = clk_prepare_enable(*pclk);
2621 2622 2623 2624 2625
	if (err) {
		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
		return err;
	}

2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
	return 0;
}

static int at91ether_init(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(dev);
	int err;
	u32 reg;

2636 2637 2638 2639 2640 2641
	dev->netdev_ops = &at91ether_netdev_ops;
	dev->ethtool_ops = &macb_ethtool_ops;

	err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
			       0, dev->name, dev);
	if (err)
2642
		return err;
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654

	macb_writel(bp, NCR, 0);

	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
	if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
		reg |= MACB_BIT(RM9200_RMII);

	macb_writel(bp, NCFGR, reg);

	return 0;
}

2655
static const struct macb_config at91sam9260_config = {
2656
	.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
2657
	.clk_init = macb_clk_init,
2658 2659 2660
	.init = macb_init,
};

2661
static const struct macb_config pc302gem_config = {
2662 2663
	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
	.dma_burst_length = 16,
2664
	.clk_init = macb_clk_init,
2665 2666 2667
	.init = macb_init,
};

2668
static const struct macb_config sama5d3_config = {
2669 2670
	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
	.dma_burst_length = 16,
2671
	.clk_init = macb_clk_init,
2672 2673 2674
	.init = macb_init,
};

2675
static const struct macb_config sama5d4_config = {
2676 2677
	.caps = 0,
	.dma_burst_length = 4,
2678
	.clk_init = macb_clk_init,
2679 2680 2681
	.init = macb_init,
};

2682
static const struct macb_config emac_config = {
2683
	.clk_init = at91ether_clk_init,
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
	.init = at91ether_init,
};

static const struct of_device_id macb_dt_ids[] = {
	{ .compatible = "cdns,at32ap7000-macb" },
	{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
	{ .compatible = "cdns,macb" },
	{ .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
	{ .compatible = "cdns,gem", .data = &pc302gem_config },
	{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
	{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
	{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
	{ .compatible = "cdns,emac", .data = &emac_config },
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
#endif /* CONFIG_OF */

static int macb_probe(struct platform_device *pdev)
{
2704 2705 2706
	int (*clk_init)(struct platform_device *, struct clk **,
			struct clk **, struct clk **)
					      = macb_clk_init;
2707 2708 2709
	int (*init)(struct platform_device *) = macb_init;
	struct device_node *np = pdev->dev.of_node;
	const struct macb_config *macb_config = NULL;
2710
	struct clk *pclk, *hclk, *tx_clk;
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
	unsigned int queue_mask, num_queues;
	struct macb_platform_data *pdata;
	struct phy_device *phydev;
	struct net_device *dev;
	struct resource *regs;
	void __iomem *mem;
	const char *mac;
	struct macb *bp;
	int err;

2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
	if (np) {
		const struct of_device_id *match;

		match = of_match_node(macb_dt_ids, np);
		if (match && match->data) {
			macb_config = match->data;
			clk_init = macb_config->clk_init;
			init = macb_config->init;
		}
	}

	err = clk_init(pdev, &pclk, &hclk, &tx_clk);
	if (err)
		return err;

2736 2737
	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mem = devm_ioremap_resource(&pdev->dev, regs);
2738 2739 2740 2741
	if (IS_ERR(mem)) {
		err = PTR_ERR(mem);
		goto err_disable_clocks;
	}
2742 2743 2744

	macb_probe_queues(mem, &queue_mask, &num_queues);
	dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2745 2746 2747 2748
	if (!dev) {
		err = -ENOMEM;
		goto err_disable_clocks;
	}
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758

	dev->base_addr = regs->start;

	SET_NETDEV_DEV(dev, &pdev->dev);

	bp = netdev_priv(dev);
	bp->pdev = pdev;
	bp->dev = dev;
	bp->regs = mem;
	bp->num_queues = num_queues;
2759
	bp->queue_mask = queue_mask;
2760 2761 2762 2763 2764
	if (macb_config)
		bp->dma_burst_length = macb_config->dma_burst_length;
	bp->pclk = pclk;
	bp->hclk = hclk;
	bp->tx_clk = tx_clk;
2765 2766
	spin_lock_init(&bp->lock);

2767
	/* setup capabilities */
2768 2769
	macb_configure_caps(bp, macb_config);

2770 2771 2772
	platform_set_drvdata(pdev, dev);

	dev->irq = platform_get_irq(pdev, 0);
2773 2774 2775 2776
	if (dev->irq < 0) {
		err = dev->irq;
		goto err_disable_clocks;
	}
2777 2778

	mac = of_get_mac_address(np);
2779 2780 2781
	if (mac)
		memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
	else
2782 2783
		macb_get_hwaddr(bp);

2784
	err = of_get_phy_mode(np);
2785
	if (err < 0) {
J
Jingoo Han 已提交
2786
		pdata = dev_get_platdata(&pdev->dev);
2787 2788 2789 2790 2791 2792 2793
		if (pdata && pdata->is_rmii)
			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
		else
			bp->phy_interface = PHY_INTERFACE_MODE_MII;
	} else {
		bp->phy_interface = err;
	}
F
frederic RODO 已提交
2794

2795 2796 2797 2798
	/* IP specific init */
	err = init(pdev);
	if (err)
		goto err_out_free_netdev;
2799 2800 2801 2802

	err = register_netdev(dev);
	if (err) {
		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2803
		goto err_out_unregister_netdev;
2804 2805
	}

2806 2807
	err = macb_mii_init(bp);
	if (err)
F
frederic RODO 已提交
2808
		goto err_out_unregister_netdev;
2809

2810 2811
	netif_carrier_off(dev);

2812 2813 2814
	netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
		    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
		    dev->base_addr, dev->irq, dev->dev_addr);
2815

F
frederic RODO 已提交
2816
	phydev = bp->phy_dev;
2817 2818
	netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
		    phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
F
frederic RODO 已提交
2819

2820 2821
	return 0;

F
frederic RODO 已提交
2822 2823
err_out_unregister_netdev:
	unregister_netdev(dev);
2824

2825
err_out_free_netdev:
2826
	free_netdev(dev);
2827

2828 2829 2830 2831 2832
err_disable_clocks:
	clk_disable_unprepare(tx_clk);
	clk_disable_unprepare(hclk);
	clk_disable_unprepare(pclk);

2833 2834 2835
	return err;
}

2836
static int macb_remove(struct platform_device *pdev)
2837 2838 2839 2840 2841 2842 2843 2844
{
	struct net_device *dev;
	struct macb *bp;

	dev = platform_get_drvdata(pdev);

	if (dev) {
		bp = netdev_priv(dev);
2845 2846
		if (bp->phy_dev)
			phy_disconnect(bp->phy_dev);
2847 2848 2849
		mdiobus_unregister(bp->mii_bus);
		kfree(bp->mii_bus->irq);
		mdiobus_free(bp->mii_bus);
2850
		unregister_netdev(dev);
2851
		clk_disable_unprepare(bp->tx_clk);
2852 2853
		clk_disable_unprepare(bp->hclk);
		clk_disable_unprepare(bp->pclk);
2854
		free_netdev(dev);
2855 2856 2857 2858 2859
	}

	return 0;
}

2860
static int __maybe_unused macb_suspend(struct device *dev)
2861
{
S
Soren Brinkmann 已提交
2862
	struct platform_device *pdev = to_platform_device(dev);
2863 2864 2865
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(netdev);

2866
	netif_carrier_off(netdev);
2867 2868
	netif_device_detach(netdev);

2869
	clk_disable_unprepare(bp->tx_clk);
2870 2871
	clk_disable_unprepare(bp->hclk);
	clk_disable_unprepare(bp->pclk);
2872 2873 2874 2875

	return 0;
}

2876
static int __maybe_unused macb_resume(struct device *dev)
2877
{
S
Soren Brinkmann 已提交
2878
	struct platform_device *pdev = to_platform_device(dev);
2879 2880 2881
	struct net_device *netdev = platform_get_drvdata(pdev);
	struct macb *bp = netdev_priv(netdev);

2882 2883
	clk_prepare_enable(bp->pclk);
	clk_prepare_enable(bp->hclk);
2884
	clk_prepare_enable(bp->tx_clk);
2885 2886 2887 2888 2889 2890

	netif_device_attach(netdev);

	return 0;
}

S
Soren Brinkmann 已提交
2891 2892
static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);

2893
static struct platform_driver macb_driver = {
2894 2895
	.probe		= macb_probe,
	.remove		= macb_remove,
2896 2897
	.driver		= {
		.name		= "macb",
2898
		.of_match_table	= of_match_ptr(macb_dt_ids),
S
Soren Brinkmann 已提交
2899
		.pm	= &macb_pm_ops,
2900 2901 2902
	},
};

2903
module_platform_driver(macb_driver);
2904 2905

MODULE_LICENSE("GPL");
J
Jamie Iles 已提交
2906
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
J
Jean Delvare 已提交
2907
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2908
MODULE_ALIAS("platform:macb");