gianfar.c 89.9 KB
Newer Older
J
Jan Ceuleers 已提交
1
/* drivers/net/ethernet/freescale/gianfar.c
L
Linus Torvalds 已提交
2 3
 *
 * Gianfar Ethernet Driver
4 5
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
L
Linus Torvalds 已提交
6 7 8
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
9
 * Maintainer: Kumar Gala
10
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
L
Linus Torvalds 已提交
11
 *
12
 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13
 * Copyright 2007 MontaVista Software, Inc.
L
Linus Torvalds 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
27
 *
28 29
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
L
Linus Torvalds 已提交
30 31 32
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
33 34
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
L
Linus Torvalds 已提交
35 36 37
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
38
 *  IEVENT register is set, triggering an interrupt when the
L
Linus Torvalds 已提交
39 40 41
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
42
 *  of frames or amount of time have passed).  In NAPI, the
L
Linus Torvalds 已提交
43
 *  interrupt handler will signal there is work to be done, and
44
 *  exit. This method will start at the last known empty
45
 *  descriptor, and process every subsequent descriptor until there
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

64 65 66
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

L
Linus Torvalds 已提交
67 68 69
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
70
#include <linux/unistd.h>
L
Linus Torvalds 已提交
71 72 73 74 75 76
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
77
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
78 79
#include <linux/spinlock.h>
#include <linux/mm.h>
80 81
#include <linux/of_address.h>
#include <linux/of_irq.h>
82
#include <linux/of_mdio.h>
83
#include <linux/of_platform.h>
84 85 86
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
K
Kumar Gala 已提交
87
#include <linux/in.h>
88
#include <linux/net_tstamp.h>
L
Linus Torvalds 已提交
89 90

#include <asm/io.h>
91
#include <asm/reg.h>
92
#include <asm/mpc85xx.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
98 99
#include <linux/mii.h>
#include <linux/phy.h>
100 101
#include <linux/phy_fixed.h>
#include <linux/of.h>
102
#include <linux/of_net.h>
L
Linus Torvalds 已提交
103 104 105 106 107

#include "gianfar.h"

#define TX_TIMEOUT      (1*HZ)

108
const char gfar_driver_version[] = "1.3";
L
Linus Torvalds 已提交
109 110 111

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112
static void gfar_reset_task(struct work_struct *work);
L
Linus Torvalds 已提交
113 114
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
115
struct sk_buff *gfar_new_skb(struct net_device *dev);
116
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117
			   struct sk_buff *skb);
L
Linus Torvalds 已提交
118 119
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 121 122
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
L
Linus Torvalds 已提交
123 124
static void adjust_link(struct net_device *dev);
static int init_phy(struct net_device *dev);
125
static int gfar_probe(struct platform_device *ofdev);
126
static int gfar_remove(struct platform_device *ofdev);
127
static void free_skb_resources(struct gfar_private *priv);
L
Linus Torvalds 已提交
128 129
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
K
Kapil Juneja 已提交
130
static void gfar_configure_serdes(struct net_device *dev);
131 132 133 134
static int gfar_poll_rx(struct napi_struct *napi, int budget);
static int gfar_poll_tx(struct napi_struct *napi, int budget);
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
135 136 137
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
138
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
C
Claudiu Manoil 已提交
139
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
140 141
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi);
142
static void gfar_halt_nodisable(struct gfar_private *priv);
143
static void gfar_clear_exact_match(struct net_device *dev);
J
Joe Perches 已提交
144 145
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr);
146
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
L
Linus Torvalds 已提交
147 148 149 150 151

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

152
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 154 155 156 157 158 159
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 162 163 164 165 166 167
		lstatus |= BD_LFLAG(RXBD_WRAP);

	eieio();

	bdp->lstatus = lstatus;
}

168
static int gfar_init_bds(struct net_device *ndev)
169
{
170
	struct gfar_private *priv = netdev_priv(ndev);
171 172
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
173 174
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
175
	int i, j;
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		/* Initialize some variables in our dev structure */
		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
		tx_queue->dirty_tx = tx_queue->tx_bd_base;
		tx_queue->cur_tx = tx_queue->tx_bd_base;
		tx_queue->skb_curtx = 0;
		tx_queue->skb_dirtytx = 0;

		/* Initialize Transmit Descriptor Ring */
		txbdp = tx_queue->tx_bd_base;
		for (j = 0; j < tx_queue->tx_ring_size; j++) {
			txbdp->lstatus = 0;
			txbdp->bufPtr = 0;
			txbdp++;
		}
193

194 195 196
		/* Set the last descriptor in the ring to indicate wrap */
		txbdp--;
		txbdp->status |= TXBD_WRAP;
197 198
	}

199 200 201 202 203
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->cur_rx = rx_queue->rx_bd_base;
		rx_queue->skb_currx = 0;
		rxbdp = rx_queue->rx_bd_base;
204

205 206
		for (j = 0; j < rx_queue->rx_ring_size; j++) {
			struct sk_buff *skb = rx_queue->rx_skbuff[j];
207

208 209 210 211 212 213
			if (skb) {
				gfar_init_rxbdp(rx_queue, rxbdp,
						rxbdp->bufPtr);
			} else {
				skb = gfar_new_skb(ndev);
				if (!skb) {
214
					netdev_err(ndev, "Can't allocate RX buffers\n");
215
					return -ENOMEM;
216 217 218 219
				}
				rx_queue->rx_skbuff[j] = skb;

				gfar_new_rxbdp(rx_queue, rxbdp, skb);
220 221
			}

222
			rxbdp++;
223 224 225 226 227 228 229 230 231
		}

	}

	return 0;
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
232
	void *vaddr;
233 234
	dma_addr_t addr;
	int i, j, k;
235
	struct gfar_private *priv = netdev_priv(ndev);
236
	struct device *dev = priv->dev;
237 238 239
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

240 241 242 243 244 245 246
	priv->total_tx_ring_size = 0;
	for (i = 0; i < priv->num_tx_queues; i++)
		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;

	priv->total_rx_ring_size = 0;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
247 248

	/* Allocate memory for the buffer descriptors */
249
	vaddr = dma_alloc_coherent(dev,
250 251 252 253 254 255
				   (priv->total_tx_ring_size *
				    sizeof(struct txbd8)) +
				   (priv->total_rx_ring_size *
				    sizeof(struct rxbd8)),
				   &addr, GFP_KERNEL);
	if (!vaddr)
256 257
		return -ENOMEM;

258 259
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
260
		tx_queue->tx_bd_base = vaddr;
261 262 263
		tx_queue->tx_bd_dma_base = addr;
		tx_queue->dev = ndev;
		/* enet DMA only understands physical addresses */
264 265
		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
266
	}
267 268

	/* Start the rx descriptor ring where the tx ring leaves off */
269 270
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
271
		rx_queue->rx_bd_base = vaddr;
272 273
		rx_queue->rx_bd_dma_base = addr;
		rx_queue->dev = ndev;
274 275
		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
276
	}
277 278

	/* Setup the skbuff rings */
279 280
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
281 282 283 284 285
		tx_queue->tx_skbuff =
			kmalloc_array(tx_queue->tx_ring_size,
				      sizeof(*tx_queue->tx_skbuff),
				      GFP_KERNEL);
		if (!tx_queue->tx_skbuff)
286
			goto cleanup;
287

288 289 290
		for (k = 0; k < tx_queue->tx_ring_size; k++)
			tx_queue->tx_skbuff[k] = NULL;
	}
291

292 293
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
294 295 296 297 298
		rx_queue->rx_skbuff =
			kmalloc_array(rx_queue->rx_ring_size,
				      sizeof(*rx_queue->rx_skbuff),
				      GFP_KERNEL);
		if (!rx_queue->rx_skbuff)
299 300 301 302 303
			goto cleanup;

		for (j = 0; j < rx_queue->rx_ring_size; j++)
			rx_queue->rx_skbuff[j] = NULL;
	}
304

305 306
	if (gfar_init_bds(ndev))
		goto cleanup;
307 308 309 310 311 312 313 314

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

315 316
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
317
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
318
	u32 __iomem *baddr;
319 320 321
	int i;

	baddr = &regs->tbase0;
322
	for (i = 0; i < priv->num_tx_queues; i++) {
323
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
324
		baddr += 2;
325 326 327
	}

	baddr = &regs->rbase0;
328
	for (i = 0; i < priv->num_rx_queues; i++) {
329
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
330
		baddr += 2;
331 332 333
	}
}

334
static void gfar_rx_buff_size_config(struct gfar_private *priv)
335
{
336
	int frame_size = priv->ndev->mtu + ETH_HLEN;
337

338 339 340
	/* set this when rx hw offload (TOE) functions are being used */
	priv->uses_rxfcb = 0;

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
		priv->uses_rxfcb = 1;

	if (priv->hwts_rx_en)
		priv->uses_rxfcb = 1;

	if (priv->uses_rxfcb)
		frame_size += GMAC_FCB_LEN;

	frame_size += priv->padding;

	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
		     INCREMENTAL_BUFFER_SIZE;

	priv->rx_buffer_size = frame_size;
}

static void gfar_mac_rx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 rctrl = 0;

S
Sandeep Gopalpet 已提交
363
	if (priv->rx_filer_enable) {
364
		rctrl |= RCTRL_FILREN;
S
Sandeep Gopalpet 已提交
365
		/* Program the RIR0 reg with the required distribution */
366 367 368 369
		if (priv->poll_mode == GFAR_SQ_POLLING)
			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
		else /* GFAR_MQ_POLLING */
			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
S
Sandeep Gopalpet 已提交
370
	}
371

372
	/* Restore PROMISC mode */
373
	if (priv->ndev->flags & IFF_PROMISC)
374 375
		rctrl |= RCTRL_PROM;

376
	if (priv->ndev->features & NETIF_F_RXCSUM)
377 378
		rctrl |= RCTRL_CHECKSUMMING;

379 380
	if (priv->extended_hash)
		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
381 382 383 384 385 386

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

387
	/* Enable HW time stamping if requested from user space */
388
	if (priv->hwts_rx_en)
389 390
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;

391
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
392
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
393 394 395

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);
396
}
397

398 399 400 401 402 403
static void gfar_mac_tx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 tctrl = 0;

	if (priv->ndev->features & NETIF_F_IP_CSUM)
404 405
		tctrl |= TCTRL_INIT_CSUM;

406 407 408 409 410 411 412
	if (priv->prio_sched_en)
		tctrl |= TCTRL_TXSCHED_PRIO;
	else {
		tctrl |= TCTRL_TXSCHED_WRRS;
		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
	}
413

414 415 416
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
		tctrl |= TCTRL_VLINS;

417 418 419
	gfar_write(&regs->tctrl, tctrl);
}

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
static void gfar_configure_coalescing(struct gfar_private *priv,
			       unsigned long tx_mask, unsigned long rx_mask)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 __iomem *baddr;

	if (priv->mode == MQ_MG_MODE) {
		int i = 0;

		baddr = &regs->txic0;
		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->tx_queue[i]->txcoalescing))
				gfar_write(baddr + i, priv->tx_queue[i]->txic);
		}

		baddr = &regs->rxic0;
		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->rx_queue[i]->rxcoalescing))
				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
		}
	} else {
		/* Backward compatible case -- even if we enable
		 * multiple queues, there's only single reg to program
		 */
		gfar_write(&regs->txic, 0);
		if (likely(priv->tx_queue[0]->txcoalescing))
			gfar_write(&regs->txic, priv->tx_queue[0]->txic);

		gfar_write(&regs->rxic, 0);
		if (unlikely(priv->rx_queue[0]->rxcoalescing))
			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
	}
}

void gfar_configure_coalescing_all(struct gfar_private *priv)
{
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
}

S
Sandeep Gopalpet 已提交
461 462 463 464 465
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
466
	int i;
S
Sandeep Gopalpet 已提交
467 468 469

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
470
		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
S
Sandeep Gopalpet 已提交
471 472 473 474
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
475
	dev->stats.rx_bytes   = rx_bytes;
S
Sandeep Gopalpet 已提交
476 477 478
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
E
Eric Dumazet 已提交
479 480
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
S
Sandeep Gopalpet 已提交
481 482
	}

483
	dev->stats.tx_bytes   = tx_bytes;
S
Sandeep Gopalpet 已提交
484 485 486 487 488
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

489 490 491 492 493
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
494
	.ndo_set_features = gfar_set_features,
495
	.ndo_set_rx_mode = gfar_set_multi,
496 497
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
S
Sandeep Gopalpet 已提交
498
	.ndo_get_stats = gfar_get_stats,
499 500
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
501 502 503 504 505
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
static void gfar_ints_disable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Clear IEVENT */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);

		/* Initialize IMASK */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}
}

static void gfar_ints_enable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Unmask the interrupts we look for */
		gfar_write(&regs->imask, IMASK_DEFAULT);
	}
}

529 530
void lock_tx_qs(struct gfar_private *priv)
{
531
	int i;
532 533 534 535 536 537 538

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_lock(&priv->tx_queue[i]->txlock);
}

void unlock_tx_qs(struct gfar_private *priv)
{
539
	int i;
540 541 542 543 544

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_unlock(&priv->tx_queue[i]->txlock);
}

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
		if (!priv->tx_queue[i])
			return -ENOMEM;

		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = priv->ndev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}
	return 0;
}

static int gfar_alloc_rx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
		if (!priv->rx_queue[i])
			return -ENOMEM;

		priv->rx_queue[i]->rx_skbuff = NULL;
		priv->rx_queue[i]->qindex = i;
		priv->rx_queue[i]->dev = priv->ndev;
	}
	return 0;
}

static void gfar_free_tx_queues(struct gfar_private *priv)
581
{
582
	int i;
583 584 585 586 587

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

588
static void gfar_free_rx_queues(struct gfar_private *priv)
589
{
590
	int i;
591 592 593 594 595

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

596 597
static void unmap_group_regs(struct gfar_private *priv)
{
598
	int i;
599 600 601 602 603 604

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

605 606 607 608 609 610 611 612 613 614 615 616 617
static void free_gfar_dev(struct gfar_private *priv)
{
	int i, j;

	for (i = 0; i < priv->num_grps; i++)
		for (j = 0; j < GFAR_NUM_IRQS; j++) {
			kfree(priv->gfargrp[i].irqinfo[j]);
			priv->gfargrp[i].irqinfo[j] = NULL;
		}

	free_netdev(priv->ndev);
}

618 619
static void disable_napi(struct gfar_private *priv)
{
620
	int i;
621

622 623 624 625
	for (i = 0; i < priv->num_grps; i++) {
		napi_disable(&priv->gfargrp[i].napi_rx);
		napi_disable(&priv->gfargrp[i].napi_tx);
	}
626 627 628 629
}

static void enable_napi(struct gfar_private *priv)
{
630
	int i;
631

632 633 634 635
	for (i = 0; i < priv->num_grps; i++) {
		napi_enable(&priv->gfargrp[i].napi_rx);
		napi_enable(&priv->gfargrp[i].napi_tx);
	}
636 637 638
}

static int gfar_parse_group(struct device_node *np,
639
			    struct gfar_private *priv, const char *model)
640
{
641
	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
642 643
	int i;

644 645 646 647
	for (i = 0; i < GFAR_NUM_IRQS; i++) {
		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
					  GFP_KERNEL);
		if (!grp->irqinfo[i])
648 649
			return -ENOMEM;
	}
650

651 652
	grp->regs = of_iomap(np, 0);
	if (!grp->regs)
653 654
		return -ENOMEM;

655
	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
656 657 658

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
659 660 661 662 663
		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
		    gfar_irq(grp, RX)->irq == NO_IRQ ||
		    gfar_irq(grp, ER)->irq == NO_IRQ)
664 665 666
			return -EINVAL;
	}

667 668
	grp->priv = priv;
	spin_lock_init(&grp->grplock);
669
	if (priv->mode == MQ_MG_MODE) {
670 671 672 673 674 675 676 677 678 679 680 681 682 683
		u32 *rxq_mask, *txq_mask;
		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);

		if (priv->poll_mode == GFAR_SQ_POLLING) {
			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
		} else { /* GFAR_MQ_POLLING */
			grp->rx_bit_map = rxq_mask ?
			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
			grp->tx_bit_map = txq_mask ?
			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
		}
684
	} else {
685 686
		grp->rx_bit_map = 0xFF;
		grp->tx_bit_map = 0xFF;
687
	}
688 689 690 691 692 693 694 695 696 697 698

	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
	 * right to left, so we need to revert the 8 bits to get the q index
	 */
	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
	grp->tx_bit_map = bitrev8(grp->tx_bit_map);

	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
	 * also assign queues to groups
	 */
	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
699 700
		if (!grp->rx_queue)
			grp->rx_queue = priv->rx_queue[i];
701 702 703 704 705 706 707
		grp->num_rx_queues++;
		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
		priv->rx_queue[i]->grp = grp;
	}

	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
708 709
		if (!grp->tx_queue)
			grp->tx_queue = priv->tx_queue[i];
710 711 712 713 714 715
		grp->num_tx_queues++;
		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
		priv->tqueue |= (TQUEUE_EN0 >> i);
		priv->tx_queue[i]->grp = grp;
	}

716 717 718 719 720
	priv->num_grps++;

	return 0;
}

721
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
722 723 724 725
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
726 727 728
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
729
	struct device_node *np = ofdev->dev.of_node;
730
	struct device_node *child = NULL;
A
Andy Fleming 已提交
731 732 733
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
734 735
	unsigned int num_tx_qs, num_rx_qs;
	u32 *tx_queues, *rx_queues;
736
	unsigned short mode, poll_mode;
737 738 739 740

	if (!np || !of_device_is_available(np))
		return -ENODEV;

741 742 743 744 745 746 747 748
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		mode = MQ_MG_MODE;
		poll_mode = GFAR_SQ_POLLING;
	} else {
		mode = SQ_SG_MODE;
		poll_mode = GFAR_SQ_POLLING;
	}

749
	/* parse the num of HW tx and rx queues */
750
	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
751 752
	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);

753
	if (mode == SQ_SG_MODE) {
754 755 756
		num_tx_qs = 1;
		num_rx_qs = 1;
	} else { /* MQ_MG_MODE */
757 758 759 760 761 762 763 764 765 766
		/* get the actual number of supported groups */
		unsigned int num_grps = of_get_available_child_count(np);

		if (num_grps == 0 || num_grps > MAXGROUPS) {
			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
				num_grps);
			pr_err("Cannot do alloc_etherdev, aborting\n");
			return -EINVAL;
		}

767
		if (poll_mode == GFAR_SQ_POLLING) {
768 769
			num_tx_qs = num_grps; /* one txq per int group */
			num_rx_qs = num_grps; /* one rxq per int group */
770 771 772 773 774
		} else { /* GFAR_MQ_POLLING */
			num_tx_qs = tx_queues ? *tx_queues : 1;
			num_rx_qs = rx_queues ? *rx_queues : 1;
		}
	}
775 776

	if (num_tx_qs > MAX_TX_QS) {
777 778 779
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
780 781 782 783
		return -EINVAL;
	}

	if (num_rx_qs > MAX_RX_QS) {
784 785 786
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
787 788 789 790 791 792 793 794 795 796 797
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
	priv->ndev = dev;

798 799 800
	priv->mode = mode;
	priv->poll_mode = poll_mode;

801
	priv->num_tx_queues = num_tx_qs;
802
	netif_set_real_num_rx_queues(dev, num_rx_qs);
803
	priv->num_rx_queues = num_rx_qs;
804 805 806 807 808 809 810 811

	err = gfar_alloc_tx_queues(priv);
	if (err)
		goto tx_alloc_failed;

	err = gfar_alloc_rx_queues(priv);
	if (err)
		goto rx_alloc_failed;
812

J
Jan Ceuleers 已提交
813
	/* Init Rx queue filer rule set linked list */
S
Sebastian Poehn 已提交
814 815 816 817
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

818 819
	model = of_get_property(np, "model", NULL);

820 821
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
822

823
	/* Parse and initialize group specific information */
824
	if (priv->mode == MQ_MG_MODE) {
825 826 827 828
		for_each_child_of_node(np, child) {
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
829
		}
830
	} else { /* SQ_SG_MODE */
831
		err = gfar_parse_group(np, priv, model);
832
		if (err)
833
			goto err_grp_init;
834 835
	}

A
Andy Fleming 已提交
836 837
	stash = of_get_property(np, "bd-stash", NULL);

838
	if (stash) {
A
Andy Fleming 已提交
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

856
	mac_addr = of_get_mac_address(np);
857

858
	if (mac_addr)
859
		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
860 861

	if (model && !strcasecmp(model, "TSEC"))
862
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
863 864 865 866
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;

867
	if (model && !strcasecmp(model, "eTSEC"))
868
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
869 870 871 872 873 874 875 876
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
				     FSL_GIANFAR_DEV_HAS_CSUM |
				     FSL_GIANFAR_DEV_HAS_VLAN |
				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
				     FSL_GIANFAR_DEV_HAS_TIMER;
877 878 879 880 881 882 883 884 885 886 887 888

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

889
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
890 891

	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
892
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
893 894 895

	return 0;

896 897
err_grp_init:
	unmap_group_regs(priv);
898 899 900 901
rx_alloc_failed:
	gfar_free_rx_queues(priv);
tx_alloc_failed:
	gfar_free_tx_queues(priv);
902
	free_gfar_dev(priv);
903 904 905
	return err;
}

906
static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
907 908 909 910 911 912 913 914 915 916 917
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

918 919 920 921 922 923 924 925 926 927
	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->hwts_tx_en = 0;
		break;
	case HWTSTAMP_TX_ON:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
		priv->hwts_tx_en = 1;
		break;
	default:
928
		return -ERANGE;
929
	}
930 931 932

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
933 934
		if (priv->hwts_rx_en) {
			priv->hwts_rx_en = 0;
935
			reset_gfar(netdev);
936
		}
937 938 939 940
		break;
	default:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
941 942
		if (!priv->hwts_rx_en) {
			priv->hwts_rx_en = 1;
943
			reset_gfar(netdev);
944
		}
945 946 947 948 949 950 951 952
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
	}

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

953 954 955 956 957 958 959 960 961 962 963 964 965 966
static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	config.flags = 0;
	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
	config.rx_filter = (priv->hwts_rx_en ?
			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

967 968 969 970 971 972 973
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

974
	if (cmd == SIOCSHWTSTAMP)
975 976 977
		return gfar_hwtstamp_set(dev, rq);
	if (cmd == SIOCGHWTSTAMP)
		return gfar_hwtstamp_get(dev, rq);
978

979 980 981
	if (!priv->phydev)
		return -ENODEV;

982
	return phy_mii_ioctl(priv->phydev, rq, cmd);
983 984
}

985 986
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
987 988 989 990 991 992
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
W
Wu Jiajun-B06378 已提交
993 994
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
995 996 997 998
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
W
Wu Jiajun-B06378 已提交
999 1000
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
1001 1002 1003 1004 1005
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
1006 1007
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
1008 1009 1010 1011 1012
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
1013 1014
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
W
Wu Jiajun-B06378 已提交
1029 1030
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
1031 1032 1033 1034 1035 1036 1037 1038 1039
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

U
Uwe Kleine-König 已提交
1040
	/* cur_filer_idx indicated the first non-masked rule */
1041 1042 1043 1044 1045
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
W
Wu Jiajun-B06378 已提交
1046 1047
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
1048 1049 1050 1051
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

1052
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1053 1054 1055 1056 1057 1058 1059 1060
{
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1061
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1062 1063
		priv->errata |= GFAR_ERRATA_74;

1064 1065
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1066
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1067 1068
		priv->errata |= GFAR_ERRATA_76;

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
	/* MPC8313 Rev < 2.0 */
	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
		priv->errata |= GFAR_ERRATA_12;
}

static void __gfar_detect_errata_85xx(struct gfar_private *priv)
{
	unsigned int svr = mfspr(SPRN_SVR);

	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1079
		priv->errata |= GFAR_ERRATA_12;
1080 1081 1082
	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
}

static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;

	/* no plans to fix */
	priv->errata |= GFAR_ERRATA_A002;

	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
		__gfar_detect_errata_85xx(priv);
	else /* non-mpc85xx parts, i.e. e300 core based */
		__gfar_detect_errata_83xx(priv);
1096

1097 1098 1099 1100 1101
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

1102
void gfar_mac_reset(struct gfar_private *priv)
1103 1104
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1105
	u32 tempval;
1106 1107 1108 1109 1110

	/* Reset MAC layer */
	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);

	/* We need to delay at least 3 TX clocks */
1111
	udelay(3);
1112 1113 1114 1115 1116 1117

	/* the soft reset bit is not self-resetting, so we need to
	 * clear it before resuming normal operation
	 */
	gfar_write(&regs->maccfg1, 0);

1118 1119
	udelay(3);

1120 1121 1122 1123 1124
	/* Compute rx_buff_size based on config flags */
	gfar_rx_buff_size_config(priv);

	/* Initialize the max receive frame/buffer lengths */
	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1125 1126 1127 1128 1129
	gfar_write(&regs->mrblr, priv->rx_buffer_size);

	/* Initialize the Minimum Frame Length Register */
	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);

1130 1131
	/* Initialize MACCFG2. */
	tempval = MACCFG2_INIT_SETTINGS;
1132 1133 1134 1135 1136 1137 1138

	/* If the mtu is larger than the max size for standard
	 * ethernet frames (ie, a jumbo frame), then set maccfg2
	 * to allow huge frames, and to check the length
	 */
	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
	    gfar_has_errata(priv, GFAR_ERRATA_74))
1139
		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1140

1141 1142
	gfar_write(&regs->maccfg2, tempval);

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	/* Clear mac addr hash registers */
	gfar_write(&regs->igaddr0, 0);
	gfar_write(&regs->igaddr1, 0);
	gfar_write(&regs->igaddr2, 0);
	gfar_write(&regs->igaddr3, 0);
	gfar_write(&regs->igaddr4, 0);
	gfar_write(&regs->igaddr5, 0);
	gfar_write(&regs->igaddr6, 0);
	gfar_write(&regs->igaddr7, 0);

	gfar_write(&regs->gaddr0, 0);
	gfar_write(&regs->gaddr1, 0);
	gfar_write(&regs->gaddr2, 0);
	gfar_write(&regs->gaddr3, 0);
	gfar_write(&regs->gaddr4, 0);
	gfar_write(&regs->gaddr5, 0);
	gfar_write(&regs->gaddr6, 0);
	gfar_write(&regs->gaddr7, 0);

	if (priv->extended_hash)
		gfar_clear_exact_match(priv->ndev);

	gfar_mac_rx_config(priv);

	gfar_mac_tx_config(priv);

	gfar_set_mac_address(priv->ndev);

	gfar_set_multi(priv->ndev);

	/* clear ievent and imask before configuring coalescing */
	gfar_ints_disable(priv);

	/* Configure the coalescing support */
	gfar_configure_coalescing_all(priv);
}

static void gfar_hw_init(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 attrs;

	/* Stop the DMA engine now, in case it was running before
	 * (The firmware could have used it, and left it running).
	 */
	gfar_halt(priv);

	gfar_mac_reset(priv);

	/* Zero out the rmon mib registers if it has them */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));

		/* Mask off the CAM interrupts */
		gfar_write(&regs->rmon.cam1, 0xffffffff);
		gfar_write(&regs->rmon.cam2, 0xffffffff);
	}

1201 1202 1203
	/* Initialize ECNTRL */
	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing
	 * depending on driver parameters
	 */
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	/* FIFO configs */
	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
	/* Program the interrupt steering regs, only for MG devices */
	if (priv->num_grps > 1)
		gfar_write_isrg(priv);
}

static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;

	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
		priv->extended_hash = 1;
		priv->hash_width = 9;

		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

		priv->hash_regs[0] = &regs->gaddr0;
		priv->hash_regs[1] = &regs->gaddr1;
		priv->hash_regs[2] = &regs->gaddr2;
		priv->hash_regs[3] = &regs->gaddr3;
		priv->hash_regs[4] = &regs->gaddr4;
		priv->hash_regs[5] = &regs->gaddr5;
		priv->hash_regs[6] = &regs->gaddr6;
		priv->hash_regs[7] = &regs->gaddr7;
	}
}

1273
/* Set up the ethernet device structure, private data,
J
Jan Ceuleers 已提交
1274 1275
 * and anything else we need before we start
 */
1276
static int gfar_probe(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1277 1278 1279
{
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
1280
	int err = 0, i;
L
Linus Torvalds 已提交
1281

1282
	err = gfar_of_init(ofdev, &dev);
L
Linus Torvalds 已提交
1283

1284 1285
	if (err)
		return err;
L
Linus Torvalds 已提交
1286 1287

	priv = netdev_priv(dev);
1288 1289
	priv->ndev = dev;
	priv->ofdev = ofdev;
1290
	priv->dev = &ofdev->dev;
1291
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
1292

1293
	spin_lock_init(&priv->bflock);
1294
	INIT_WORK(&priv->reset_task, gfar_reset_task);
L
Linus Torvalds 已提交
1295

1296
	platform_set_drvdata(ofdev, priv);
L
Linus Torvalds 已提交
1297

1298 1299
	gfar_detect_errata(priv);

L
Linus Torvalds 已提交
1300
	/* Set the dev->base_addr to the gfar reg region */
1301
	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1302 1303 1304 1305

	/* Fill in the dev structure */
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->mtu = 1500;
1306
	dev->netdev_ops = &gfar_netdev_ops;
1307 1308
	dev->ethtool_ops = &gfar_ethtool_ops;

1309
	/* Register for napi ...We are registering NAPI for each grp */
1310 1311 1312 1313 1314 1315 1316
	for (i = 0; i < priv->num_grps; i++) {
		if (priv->poll_mode == GFAR_SQ_POLLING) {
			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
				       gfar_poll_tx_sq, 2);
		} else {
1317 1318 1319 1320 1321 1322
			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
				       gfar_poll_rx, GFAR_DEV_WEIGHT);
			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
				       gfar_poll_tx, 2);
		}
	}
1323

1324
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1325
		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1326
				   NETIF_F_RXCSUM;
1327
		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1328
				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1329
	}
1330

J
Jiri Pirko 已提交
1331
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1332 1333 1334
		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
				    NETIF_F_HW_VLAN_CTAG_RX;
		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
J
Jiri Pirko 已提交
1335
	}
1336

1337
	gfar_init_addr_hash_table(priv);
1338

1339 1340 1341
	/* Insert receive time stamps into padding alignment bytes */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
		priv->padding = 8;
1342

1343
	if (dev->features & NETIF_F_IP_CSUM ||
1344
	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1345
		dev->needed_headroom = GMAC_FCB_LEN;
L
Linus Torvalds 已提交
1346 1347 1348

	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;

1349
	/* Initializing some of the rx/tx queue level parameters */
1350 1351 1352 1353 1354 1355
	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
		priv->tx_queue[i]->txic = DEFAULT_TXIC;
	}
1356

1357 1358 1359 1360 1361
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
	}
L
Linus Torvalds 已提交
1362

J
Jan Ceuleers 已提交
1363
	/* always enable rx filer */
S
Sebastian Poehn 已提交
1364
	priv->rx_filer_enable = 1;
1365 1366
	/* Enable most messages by default */
	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1367 1368 1369
	/* use pritority h/w tx queue scheduling for single queue devices */
	if (priv->num_tx_queues == 1)
		priv->prio_sched_en = 1;
1370

1371 1372
	set_bit(GFAR_DOWN, &priv->state);

1373
	gfar_hw_init(priv);
1374

L
Linus Torvalds 已提交
1375 1376 1377
	err = register_netdev(dev);

	if (err) {
1378
		pr_err("%s: Cannot register net device, aborting\n", dev->name);
L
Linus Torvalds 已提交
1379 1380 1381
		goto register_fail;
	}

1382 1383 1384
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(dev);

1385
	device_init_wakeup(&dev->dev,
1386 1387
			   priv->device_flags &
			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1388

1389
	/* fill out IRQ number and name fields */
1390
	for (i = 0; i < priv->num_grps; i++) {
1391
		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1392
		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1393
			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1394
				dev->name, "_g", '0' + i, "_tx");
1395
			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1396
				dev->name, "_g", '0' + i, "_rx");
1397
			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1398
				dev->name, "_g", '0' + i, "_er");
1399
		} else
1400
			strcpy(gfar_irq(grp, TX)->name, dev->name);
1401
	}
1402

1403 1404 1405
	/* Initialize the filer table */
	gfar_init_filer_table(priv);

L
Linus Torvalds 已提交
1406
	/* Print out the device info */
1407
	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
L
Linus Torvalds 已提交
1408

J
Jan Ceuleers 已提交
1409 1410 1411
	/* Even more device info helps when determining which kernel
	 * provided which set of benchmarks.
	 */
1412
	netdev_info(dev, "Running with NAPI enabled\n");
1413
	for (i = 0; i < priv->num_rx_queues; i++)
1414 1415
		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
			    i, priv->rx_queue[i]->rx_ring_size);
1416
	for (i = 0; i < priv->num_tx_queues; i++)
1417 1418
		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
			    i, priv->tx_queue[i]->tx_ring_size);
L
Linus Torvalds 已提交
1419 1420 1421 1422

	return 0;

register_fail:
1423
	unmap_group_regs(priv);
1424 1425
	gfar_free_rx_queues(priv);
	gfar_free_tx_queues(priv);
1426 1427 1428 1429
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);
1430
	free_gfar_dev(priv);
1431
	return err;
L
Linus Torvalds 已提交
1432 1433
}

1434
static int gfar_remove(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1435
{
1436
	struct gfar_private *priv = platform_get_drvdata(ofdev);
L
Linus Torvalds 已提交
1437

1438 1439 1440 1441 1442
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);

D
David S. Miller 已提交
1443
	unregister_netdev(priv->ndev);
1444
	unmap_group_regs(priv);
1445 1446
	gfar_free_rx_queues(priv);
	gfar_free_tx_queues(priv);
1447
	free_gfar_dev(priv);
L
Linus Torvalds 已提交
1448 1449 1450 1451

	return 0;
}

1452
#ifdef CONFIG_PM
1453 1454

static int gfar_suspend(struct device *dev)
1455
{
1456 1457
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1458
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1459 1460 1461 1462
	unsigned long flags;
	u32 tempval;

	int magic_packet = priv->wol_en &&
1463 1464
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1465

1466
	netif_device_detach(ndev);
1467

1468
	if (netif_running(ndev)) {
1469 1470 1471

		local_irq_save(flags);
		lock_tx_qs(priv);
1472

1473
		gfar_halt_nodisable(priv);
1474 1475

		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1476
		tempval = gfar_read(&regs->maccfg1);
1477 1478 1479 1480 1481 1482

		tempval &= ~MACCFG1_TX_EN;

		if (!magic_packet)
			tempval &= ~MACCFG1_RX_EN;

1483
		gfar_write(&regs->maccfg1, tempval);
1484

1485 1486
		unlock_tx_qs(priv);
		local_irq_restore(flags);
1487

1488
		disable_napi(priv);
1489 1490 1491

		if (magic_packet) {
			/* Enable interrupt on Magic Packet */
1492
			gfar_write(&regs->imask, IMASK_MAG);
1493 1494

			/* Enable Magic Packet mode */
1495
			tempval = gfar_read(&regs->maccfg2);
1496
			tempval |= MACCFG2_MPEN;
1497
			gfar_write(&regs->maccfg2, tempval);
1498 1499 1500 1501 1502 1503 1504 1505
		} else {
			phy_stop(priv->phydev);
		}
	}

	return 0;
}

1506
static int gfar_resume(struct device *dev)
1507
{
1508 1509
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1510
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1511 1512 1513
	unsigned long flags;
	u32 tempval;
	int magic_packet = priv->wol_en &&
1514 1515
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1516

1517 1518
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);
1519 1520 1521 1522 1523 1524 1525 1526 1527
		return 0;
	}

	if (!magic_packet && priv->phydev)
		phy_start(priv->phydev);

	/* Disable Magic Packet mode, in case something
	 * else woke us up.
	 */
1528 1529
	local_irq_save(flags);
	lock_tx_qs(priv);
1530

1531
	tempval = gfar_read(&regs->maccfg2);
1532
	tempval &= ~MACCFG2_MPEN;
1533
	gfar_write(&regs->maccfg2, tempval);
1534

1535
	gfar_start(priv);
1536

1537 1538
	unlock_tx_qs(priv);
	local_irq_restore(flags);
1539

1540 1541
	netif_device_attach(ndev);

1542
	enable_napi(priv);
1543 1544 1545 1546 1547 1548 1549 1550 1551

	return 0;
}

static int gfar_restore(struct device *dev)
{
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;

1552 1553 1554
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);

1555
		return 0;
1556
	}
1557

1558 1559 1560 1561 1562
	if (gfar_init_bds(ndev)) {
		free_skb_resources(priv);
		return -ENOMEM;
	}

1563 1564 1565 1566
	gfar_mac_reset(priv);

	gfar_init_tx_rx_base(priv);

1567
	gfar_start(priv);
1568 1569 1570 1571 1572 1573 1574

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

	if (priv->phydev)
		phy_start(priv->phydev);
1575

1576
	netif_device_attach(ndev);
1577
	enable_napi(priv);
1578 1579 1580

	return 0;
}
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591

static struct dev_pm_ops gfar_pm_ops = {
	.suspend = gfar_suspend,
	.resume = gfar_resume,
	.freeze = gfar_suspend,
	.thaw = gfar_resume,
	.restore = gfar_restore,
};

#define GFAR_PM_OPS (&gfar_pm_ops)

1592
#else
1593 1594 1595

#define GFAR_PM_OPS NULL

1596
#endif
L
Linus Torvalds 已提交
1597

1598 1599 1600 1601 1602 1603
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1604
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1605 1606 1607
	u32 ecntrl;

	ecntrl = gfar_read(&regs->ecntrl);
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
1620
		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1621
			return PHY_INTERFACE_MODE_RMII;
1622
		}
A
Andy Fleming 已提交
1623
		else {
1624
			phy_interface_t interface = priv->interface;
A
Andy Fleming 已提交
1625

J
Jan Ceuleers 已提交
1626
			/* This isn't autodetected right now, so it must
A
Andy Fleming 已提交
1627 1628 1629 1630 1631
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

1632
			return PHY_INTERFACE_MODE_RGMII;
A
Andy Fleming 已提交
1633
		}
1634 1635
	}

1636
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1637 1638 1639 1640 1641 1642
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}


1643 1644
/* Initializes driver's PHY state, and attaches to the PHY.
 * Returns 0 on success.
L
Linus Torvalds 已提交
1645 1646 1647 1648
 */
static int init_phy(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1649
	uint gigabit_support =
1650
		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1651
		GFAR_SUPPORTED_GBIT : 0;
1652
	phy_interface_t interface;
L
Linus Torvalds 已提交
1653 1654 1655 1656 1657

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

1658 1659
	interface = gfar_get_interface(dev);

1660 1661 1662 1663 1664 1665 1666 1667
	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
				      interface);
	if (!priv->phydev)
		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
							 interface);
	if (!priv->phydev) {
		dev_err(&dev->dev, "could not attach to PHY\n");
		return -ENODEV;
1668
	}
L
Linus Torvalds 已提交
1669

K
Kapil Juneja 已提交
1670 1671 1672
	if (interface == PHY_INTERFACE_MODE_SGMII)
		gfar_configure_serdes(dev);

1673
	/* Remove any features not supported by the controller */
1674 1675
	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
	priv->phydev->advertising = priv->phydev->supported;
L
Linus Torvalds 已提交
1676 1677 1678 1679

	return 0;
}

J
Jan Ceuleers 已提交
1680
/* Initialize TBI PHY interface for communicating with the
1681 1682 1683 1684 1685 1686 1687
 * SERDES lynx PHY on the chip.  We communicate with this PHY
 * through the MDIO bus on each controller, treating it as a
 * "normal" PHY at the address found in the TBIPA register.  We assume
 * that the TBIPA register is valid.  Either the MDIO bus code will set
 * it to a value that doesn't conflict with other PHYs on the bus, or the
 * value doesn't matter, as there are no other PHYs on the bus.
 */
K
Kapil Juneja 已提交
1688 1689 1690
static void gfar_configure_serdes(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1691 1692 1693 1694 1695 1696 1697
	struct phy_device *tbiphy;

	if (!priv->tbi_node) {
		dev_warn(&dev->dev, "error: SGMII mode requires that the "
				    "device tree specify a tbi-handle\n");
		return;
	}
1698

1699 1700 1701
	tbiphy = of_phy_find_device(priv->tbi_node);
	if (!tbiphy) {
		dev_err(&dev->dev, "error: Could not get TBI device\n");
1702 1703
		return;
	}
K
Kapil Juneja 已提交
1704

J
Jan Ceuleers 已提交
1705
	/* If the link is already up, we must already be ok, and don't need to
1706 1707 1708 1709
	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
	 * everything for us?  Resetting it takes the link down and requires
	 * several seconds for it to come back.
	 */
1710
	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1711
		return;
K
Kapil Juneja 已提交
1712

1713
	/* Single clk mode, mii mode off(for serdes communication) */
1714
	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
K
Kapil Juneja 已提交
1715

1716
	phy_write(tbiphy, MII_ADVERTISE,
1717 1718
		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
		  ADVERTISE_1000XPSE_ASYM);
K
Kapil Juneja 已提交
1719

1720 1721 1722
	phy_write(tbiphy, MII_BMCR,
		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
		  BMCR_SPEED1000);
K
Kapil Juneja 已提交
1723 1724
}

1725 1726 1727 1728
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
	u32 res;

J
Jan Ceuleers 已提交
1729
	/* Normaly TSEC should not hang on GRS commands, so we should
1730 1731
	 * actually wait for IEVENT_GRSC flag.
	 */
1732
	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1733 1734
		return 0;

J
Jan Ceuleers 已提交
1735
	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745
	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
	 * and the Rx can be safely reset.
	 */
	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
	res &= 0x7f807f80;
	if ((res & 0xffff) == (res >> 16))
		return 1;

	return 0;
}
1746 1747

/* Halt the receive and transmit queues */
1748
static void gfar_halt_nodisable(struct gfar_private *priv)
L
Linus Torvalds 已提交
1749
{
1750
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1751 1752
	u32 tempval;

1753
	gfar_ints_disable(priv);
L
Linus Torvalds 已提交
1754 1755

	/* Stop the DMA, and wait for it to stop */
1756
	tempval = gfar_read(&regs->dmactrl);
1757 1758
	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
	    (DMACTRL_GRS | DMACTRL_GTS)) {
1759 1760
		int ret;

L
Linus Torvalds 已提交
1761
		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1762
		gfar_write(&regs->dmactrl, tempval);
L
Linus Torvalds 已提交
1763

1764 1765 1766 1767 1768 1769 1770
		do {
			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
				 (IEVENT_GRSC | IEVENT_GTSC)) ==
				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
				ret = __gfar_is_rx_idle(priv);
		} while (!ret);
L
Linus Torvalds 已提交
1771
	}
1772 1773 1774
}

/* Halt the receive and transmit queues */
1775
void gfar_halt(struct gfar_private *priv)
1776
{
1777
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1778
	u32 tempval;
L
Linus Torvalds 已提交
1779

1780 1781 1782
	/* Dissable the Rx/Tx hw queues */
	gfar_write(&regs->rqueue, 0);
	gfar_write(&regs->tqueue, 0);
1783

1784 1785 1786 1787 1788
	mdelay(10);

	gfar_halt_nodisable(priv);

	/* Disable Rx/Tx DMA */
L
Linus Torvalds 已提交
1789 1790 1791
	tempval = gfar_read(&regs->maccfg1);
	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);
1792 1793 1794 1795 1796 1797
}

void stop_gfar(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

1798
	netif_tx_stop_all_queues(dev);
1799

1800 1801 1802
	smp_mb__before_clear_bit();
	set_bit(GFAR_DOWN, &priv->state);
	smp_mb__after_clear_bit();
1803

1804
	disable_napi(priv);
1805

1806
	/* disable ints and gracefully shut down Rx/Tx DMA */
1807
	gfar_halt(priv);
L
Linus Torvalds 已提交
1808

1809
	phy_stop(priv->phydev);
L
Linus Torvalds 已提交
1810 1811 1812 1813

	free_skb_resources(priv);
}

1814
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
1815 1816
{
	struct txbd8 *txbdp;
1817
	struct gfar_private *priv = netdev_priv(tx_queue->dev);
D
Dai Haruki 已提交
1818
	int i, j;
L
Linus Torvalds 已提交
1819

1820
	txbdp = tx_queue->tx_bd_base;
L
Linus Torvalds 已提交
1821

1822 1823
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
		if (!tx_queue->tx_skbuff[i])
D
Dai Haruki 已提交
1824
			continue;
L
Linus Torvalds 已提交
1825

1826
		dma_unmap_single(priv->dev, txbdp->bufPtr,
1827
				 txbdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
1828
		txbdp->lstatus = 0;
1829
		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1830
		     j++) {
D
Dai Haruki 已提交
1831
			txbdp++;
1832
			dma_unmap_page(priv->dev, txbdp->bufPtr,
1833
				       txbdp->length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1834
		}
1835
		txbdp++;
1836 1837
		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
		tx_queue->tx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1838
	}
1839
	kfree(tx_queue->tx_skbuff);
1840
	tx_queue->tx_skbuff = NULL;
1841
}
L
Linus Torvalds 已提交
1842

1843 1844 1845 1846 1847
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
	struct rxbd8 *rxbdp;
	struct gfar_private *priv = netdev_priv(rx_queue->dev);
	int i;
L
Linus Torvalds 已提交
1848

1849
	rxbdp = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1850

1851 1852
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		if (rx_queue->rx_skbuff[i]) {
1853 1854
			dma_unmap_single(priv->dev, rxbdp->bufPtr,
					 priv->rx_buffer_size,
1855
					 DMA_FROM_DEVICE);
1856 1857
			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
			rx_queue->rx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1858
		}
1859 1860 1861
		rxbdp->lstatus = 0;
		rxbdp->bufPtr = 0;
		rxbdp++;
L
Linus Torvalds 已提交
1862
	}
1863
	kfree(rx_queue->rx_skbuff);
1864
	rx_queue->rx_skbuff = NULL;
1865
}
1866

1867
/* If there are any tx skbs or rx skbs still around, free them.
J
Jan Ceuleers 已提交
1868 1869
 * Then free tx_skbuff and rx_skbuff
 */
1870 1871 1872 1873 1874 1875 1876 1877
static void free_skb_resources(struct gfar_private *priv)
{
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
	int i;

	/* Go through all the buffer descriptors and free their data buffers */
	for (i = 0; i < priv->num_tx_queues; i++) {
1878
		struct netdev_queue *txq;
1879

1880
		tx_queue = priv->tx_queue[i];
1881
		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1882
		if (tx_queue->tx_skbuff)
1883
			free_skb_tx_queue(tx_queue);
1884
		netdev_tx_reset_queue(txq);
1885 1886 1887 1888
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
1889
		if (rx_queue->rx_skbuff)
1890 1891 1892
			free_skb_rx_queue(rx_queue);
	}

1893
	dma_free_coherent(priv->dev,
1894 1895 1896 1897
			  sizeof(struct txbd8) * priv->total_tx_ring_size +
			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
			  priv->tx_queue[0]->tx_bd_base,
			  priv->tx_queue[0]->tx_bd_dma_base);
L
Linus Torvalds 已提交
1898 1899
}

1900
void gfar_start(struct gfar_private *priv)
1901
{
1902
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1903
	u32 tempval;
1904
	int i = 0;
1905

1906 1907 1908
	/* Enable Rx/Tx hw queues */
	gfar_write(&regs->rqueue, priv->rqueue);
	gfar_write(&regs->tqueue, priv->tqueue);
1909 1910

	/* Initialize DMACTRL to have WWR and WOP */
1911
	tempval = gfar_read(&regs->dmactrl);
1912
	tempval |= DMACTRL_INIT_SETTINGS;
1913
	gfar_write(&regs->dmactrl, tempval);
1914 1915

	/* Make sure we aren't stopped */
1916
	tempval = gfar_read(&regs->dmactrl);
1917
	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1918
	gfar_write(&regs->dmactrl, tempval);
1919

1920 1921 1922 1923 1924 1925
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear THLT/RHLT, so that the DMA starts polling now */
		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
	}
1926

1927 1928 1929 1930 1931
	/* Enable Rx/Tx DMA */
	tempval = gfar_read(&regs->maccfg1);
	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);

1932 1933
	gfar_ints_enable(priv);

1934
	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1935 1936
}

1937 1938 1939 1940 1941 1942 1943
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
	free_irq(gfar_irq(grp, TX)->irq, grp);
	free_irq(gfar_irq(grp, RX)->irq, grp);
	free_irq(gfar_irq(grp, ER)->irq, grp);
}

1944 1945 1946 1947 1948
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
	struct gfar_private *priv = grp->priv;
	struct net_device *dev = priv->ndev;
	int err;
L
Linus Torvalds 已提交
1949 1950

	/* If the device has multiple interrupts, register for
J
Jan Ceuleers 已提交
1951 1952
	 * them.  Otherwise, only register for the one
	 */
1953
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1954
		/* Install our interrupt handlers for Error,
J
Jan Ceuleers 已提交
1955 1956
		 * Transmit, and Receive
		 */
1957 1958 1959
		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
				  gfar_irq(grp, ER)->name, grp);
		if (err < 0) {
1960
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1961
				  gfar_irq(grp, ER)->irq);
1962

1963
			goto err_irq_fail;
L
Linus Torvalds 已提交
1964
		}
1965 1966 1967
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1968
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1969
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1970 1971
			goto tx_irq_fail;
		}
1972 1973 1974
		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
				  gfar_irq(grp, RX)->name, grp);
		if (err < 0) {
1975
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1976
				  gfar_irq(grp, RX)->irq);
L
Linus Torvalds 已提交
1977 1978 1979
			goto rx_irq_fail;
		}
	} else {
1980 1981 1982
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1983
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1984
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1985 1986 1987 1988
			goto err_irq_fail;
		}
	}

1989 1990 1991
	return 0;

rx_irq_fail:
1992
	free_irq(gfar_irq(grp, TX)->irq, grp);
1993
tx_irq_fail:
1994
	free_irq(gfar_irq(grp, ER)->irq, grp);
1995 1996 1997 1998 1999
err_irq_fail:
	return err;

}

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
static void gfar_free_irq(struct gfar_private *priv)
{
	int i;

	/* Free the IRQs */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
		for (i = 0; i < priv->num_grps; i++)
			free_grp_irqs(&priv->gfargrp[i]);
	} else {
		for (i = 0; i < priv->num_grps; i++)
			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
				 &priv->gfargrp[i]);
	}
}

static int gfar_request_irq(struct gfar_private *priv)
{
	int err, i, j;

	for (i = 0; i < priv->num_grps; i++) {
		err = register_grp_irqs(&priv->gfargrp[i]);
		if (err) {
			for (j = 0; j < i; j++)
				free_grp_irqs(&priv->gfargrp[j]);
			return err;
		}
	}

	return 0;
}

2031 2032 2033 2034
/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
2035
	int err;
2036

2037
	gfar_mac_reset(priv);
2038 2039 2040 2041 2042

	err = gfar_alloc_skb_resources(ndev);
	if (err)
		return err;

2043
	gfar_init_tx_rx_base(priv);
2044

2045 2046 2047 2048 2049
	smp_mb__before_clear_bit();
	clear_bit(GFAR_DOWN, &priv->state);
	smp_mb__after_clear_bit();

	/* Start Rx/Tx DMA and enable the interrupts */
2050
	gfar_start(priv);
L
Linus Torvalds 已提交
2051

2052 2053
	phy_start(priv->phydev);

2054 2055 2056 2057
	enable_napi(priv);

	netif_tx_wake_all_queues(ndev);

L
Linus Torvalds 已提交
2058 2059 2060
	return 0;
}

J
Jan Ceuleers 已提交
2061 2062 2063
/* Called when something needs to use the ethernet device
 * Returns 0 for success.
 */
L
Linus Torvalds 已提交
2064 2065
static int gfar_enet_open(struct net_device *dev)
{
2066
	struct gfar_private *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
2067 2068 2069
	int err;

	err = init_phy(dev);
2070
	if (err)
L
Linus Torvalds 已提交
2071 2072
		return err;

2073 2074 2075 2076
	err = gfar_request_irq(priv);
	if (err)
		return err;

L
Linus Torvalds 已提交
2077
	err = startup_gfar(dev);
2078
	if (err)
2079
		return err;
L
Linus Torvalds 已提交
2080

2081 2082
	device_set_wakeup_enable(&dev->dev, priv->wol_en);

L
Linus Torvalds 已提交
2083 2084 2085
	return err;
}

2086
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2087
{
2088
	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2089 2090

	memset(fcb, 0, GMAC_FCB_LEN);
2091 2092 2093 2094

	return fcb;
}

2095
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2096
				    int fcb_length)
2097 2098 2099 2100 2101
{
	/* If we're here, it's a IP packet with a TCP or UDP
	 * payload.  We set it to checksum, using a pseudo-header
	 * we provide
	 */
2102
	u8 flags = TXFCB_DEFAULT;
2103

J
Jan Ceuleers 已提交
2104 2105 2106
	/* Tell the controller what the protocol is
	 * And provide the already calculated phcs
	 */
2107
	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2108
		flags |= TXFCB_UDP;
2109
		fcb->phcs = udp_hdr(skb)->check;
2110
	} else
2111
		fcb->phcs = tcp_hdr(skb)->check;
2112 2113 2114 2115

	/* l3os is the distance between the start of the
	 * frame (skb->data) and the start of the IP hdr.
	 * l4os is the distance between the start of the
J
Jan Ceuleers 已提交
2116 2117
	 * l3 hdr and the l4 hdr
	 */
2118
	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2119
	fcb->l4os = skb_network_header_len(skb);
2120

2121
	fcb->flags = flags;
2122 2123
}

2124
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2125
{
2126
	fcb->flags |= TXFCB_VLN;
2127 2128 2129
	fcb->vlctl = vlan_tx_tag_get(skb);
}

D
Dai Haruki 已提交
2130
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2131
				      struct txbd8 *base, int ring_size)
D
Dai Haruki 已提交
2132 2133 2134 2135 2136 2137 2138
{
	struct txbd8 *new_bd = bdp + stride;

	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
}

static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2139
				      int ring_size)
D
Dai Haruki 已提交
2140 2141 2142 2143
{
	return skip_txbd(bdp, 1, base, ring_size);
}

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
/* eTSEC12: csum generation not supported for some fcb offsets */
static inline bool gfar_csum_errata_12(struct gfar_private *priv,
				       unsigned long fcb_addr)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
	       (fcb_addr % 0x20) > 0x18);
}

/* eTSEC76: csum generation for frames larger than 2500 may
 * cause excess delays before start of transmission
 */
static inline bool gfar_csum_errata_76(struct gfar_private *priv,
				       unsigned int len)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
	       (len > 2500));
}

J
Jan Ceuleers 已提交
2162 2163 2164
/* This is called by the kernel when a frame is ready for transmission.
 * It is pointed to by the dev->hard_start_xmit function pointer
 */
L
Linus Torvalds 已提交
2165 2166 2167
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2168
	struct gfar_priv_tx_q *tx_queue = NULL;
2169
	struct netdev_queue *txq;
2170
	struct gfar __iomem *regs = NULL;
2171
	struct txfcb *fcb = NULL;
2172
	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2173
	u32 lstatus;
2174 2175
	int i, rq = 0;
	int do_tstamp, do_csum, do_vlan;
D
Dai Haruki 已提交
2176
	u32 bufaddr;
A
Andy Fleming 已提交
2177
	unsigned long flags;
2178
	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2179 2180 2181 2182

	rq = skb->queue_mapping;
	tx_queue = priv->tx_queue[rq];
	txq = netdev_get_tx_queue(dev, rq);
2183
	base = tx_queue->tx_bd_base;
2184
	regs = tx_queue->grp->regs;
2185

2186 2187 2188 2189 2190 2191 2192 2193
	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
	do_vlan = vlan_tx_tag_present(skb);
	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		    priv->hwts_tx_en;

	if (do_csum || do_vlan)
		fcb_len = GMAC_FCB_LEN;

2194
	/* check if time stamp should be generated */
2195 2196
	if (unlikely(do_tstamp))
		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
D
Dai Haruki 已提交
2197

2198
	/* make space for additional header when fcb is needed */
2199
	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2200 2201
		struct sk_buff *skb_new;

2202
		skb_new = skb_realloc_headroom(skb, fcb_len);
2203 2204
		if (!skb_new) {
			dev->stats.tx_errors++;
2205
			dev_kfree_skb_any(skb);
2206 2207
			return NETDEV_TX_OK;
		}
2208

2209 2210
		if (skb->sk)
			skb_set_owner_w(skb_new, skb->sk);
2211
		dev_consume_skb_any(skb);
2212 2213 2214
		skb = skb_new;
	}

D
Dai Haruki 已提交
2215 2216 2217
	/* total number of fragments in the SKB */
	nr_frags = skb_shinfo(skb)->nr_frags;

2218 2219 2220 2221 2222 2223
	/* calculate the required number of TxBDs for this skb */
	if (unlikely(do_tstamp))
		nr_txbds = nr_frags + 2;
	else
		nr_txbds = nr_frags + 1;

D
Dai Haruki 已提交
2224
	/* check if there is space to queue this packet */
2225
	if (nr_txbds > tx_queue->num_txbdfree) {
D
Dai Haruki 已提交
2226
		/* no space, stop the queue */
2227
		netif_tx_stop_queue(txq);
D
Dai Haruki 已提交
2228 2229 2230
		dev->stats.tx_fifo_errors++;
		return NETDEV_TX_BUSY;
	}
L
Linus Torvalds 已提交
2231 2232

	/* Update transmit stats */
2233 2234 2235 2236
	bytes_sent = skb->len;
	tx_queue->stats.tx_bytes += bytes_sent;
	/* keep Tx bytes on wire for BQL accounting */
	GFAR_CB(skb)->bytes_sent = bytes_sent;
E
Eric Dumazet 已提交
2237
	tx_queue->stats.tx_packets++;
L
Linus Torvalds 已提交
2238

2239
	txbdp = txbdp_start = tx_queue->cur_tx;
2240 2241 2242 2243 2244
	lstatus = txbdp->lstatus;

	/* Time stamp insertion requires one additional TxBD */
	if (unlikely(do_tstamp))
		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2245
						 tx_queue->tx_ring_size);
L
Linus Torvalds 已提交
2246

D
Dai Haruki 已提交
2247
	if (nr_frags == 0) {
2248 2249
		if (unlikely(do_tstamp))
			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2250
							  TXBD_INTERRUPT);
2251 2252
		else
			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
D
Dai Haruki 已提交
2253 2254 2255
	} else {
		/* Place the fragment addresses and lengths into the TxBDs */
		for (i = 0; i < nr_frags; i++) {
2256
			unsigned int frag_len;
D
Dai Haruki 已提交
2257
			/* Point at the next BD, wrapping as needed */
2258
			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2259

2260
			frag_len = skb_shinfo(skb)->frags[i].size;
D
Dai Haruki 已提交
2261

2262
			lstatus = txbdp->lstatus | frag_len |
2263
				  BD_LFLAG(TXBD_READY);
D
Dai Haruki 已提交
2264 2265 2266 2267

			/* Handle the last BD specially */
			if (i == nr_frags - 1)
				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
L
Linus Torvalds 已提交
2268

2269
			bufaddr = skb_frag_dma_map(priv->dev,
2270 2271
						   &skb_shinfo(skb)->frags[i],
						   0,
2272
						   frag_len,
2273
						   DMA_TO_DEVICE);
D
Dai Haruki 已提交
2274 2275 2276 2277 2278 2279 2280 2281

			/* set the TxBD length and buffer pointer */
			txbdp->bufPtr = bufaddr;
			txbdp->lstatus = lstatus;
		}

		lstatus = txbdp_start->lstatus;
	}
L
Linus Torvalds 已提交
2282

2283 2284 2285 2286 2287 2288
	/* Add TxPAL between FCB and frame if required */
	if (unlikely(do_tstamp)) {
		skb_push(skb, GMAC_TXPAL_LEN);
		memset(skb->data, 0, GMAC_TXPAL_LEN);
	}

2289 2290
	/* Add TxFCB if required */
	if (fcb_len) {
2291
		fcb = gfar_add_fcb(skb);
2292
		lstatus |= BD_LFLAG(TXBD_TOE);
2293 2294 2295 2296 2297
	}

	/* Set up checksumming */
	if (do_csum) {
		gfar_tx_checksum(skb, fcb, fcb_len);
2298 2299 2300

		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2301 2302
			__skb_pull(skb, GMAC_FCB_LEN);
			skb_checksum_help(skb);
2303 2304 2305 2306 2307 2308 2309 2310
			if (do_vlan || do_tstamp) {
				/* put back a new fcb for vlan/tstamp TOE */
				fcb = gfar_add_fcb(skb);
			} else {
				/* Tx TOE not used */
				lstatus &= ~(BD_LFLAG(TXBD_TOE));
				fcb = NULL;
			}
2311
		}
2312 2313
	}

2314
	if (do_vlan)
2315
		gfar_tx_vlan(skb, fcb);
2316

2317 2318
	/* Setup tx hardware time stamping if requested */
	if (unlikely(do_tstamp)) {
2319
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2320 2321 2322
		fcb->ptp = 1;
	}

2323
	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2324
					     skb_headlen(skb), DMA_TO_DEVICE);
L
Linus Torvalds 已提交
2325

J
Jan Ceuleers 已提交
2326
	/* If time stamping is requested one additional TxBD must be set up. The
2327 2328 2329 2330 2331
	 * first TxBD points to the FCB and must have a data length of
	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
	 * the full frame length.
	 */
	if (unlikely(do_tstamp)) {
2332
		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2333
		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2334
					 (skb_headlen(skb) - fcb_len);
2335 2336 2337 2338
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
	} else {
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
	}
L
Linus Torvalds 已提交
2339

2340
	netdev_tx_sent_queue(txq, bytes_sent);
2341

J
Jan Ceuleers 已提交
2342
	/* We can work in parallel with gfar_clean_tx_ring(), except
A
Anton Vorontsov 已提交
2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
	 * when modifying num_txbdfree. Note that we didn't grab the lock
	 * when we were reading the num_txbdfree and checking for available
	 * space, that's because outside of this function it can only grow,
	 * and once we've got needed space, it cannot suddenly disappear.
	 *
	 * The lock also protects us from gfar_error(), which can modify
	 * regs->tstat and thus retrigger the transfers, which is why we
	 * also must grab the lock before setting ready bit for the first
	 * to be transmitted BD.
	 */
	spin_lock_irqsave(&tx_queue->txlock, flags);

J
Jan Ceuleers 已提交
2355
	/* The powerpc-specific eieio() is used, as wmb() has too strong
2356 2357 2358 2359 2360 2361 2362
	 * semantics (it requires synchronization between cacheable and
	 * uncacheable mappings, which eieio doesn't provide and which we
	 * don't need), thus requiring a more expensive sync instruction.  At
	 * some point, the set of architecture-independent barrier functions
	 * should be expanded to include weaker barriers.
	 */
	eieio();
2363

D
Dai Haruki 已提交
2364 2365
	txbdp_start->lstatus = lstatus;

2366 2367 2368 2369
	eieio(); /* force lstatus write before tx_skbuff */

	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;

D
Dai Haruki 已提交
2370
	/* Update the current skb pointer to the next entry we will use
J
Jan Ceuleers 已提交
2371 2372
	 * (wrapping if necessary)
	 */
2373
	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2374
			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2375

2376
	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2377 2378

	/* reduce TxBD free count */
2379
	tx_queue->num_txbdfree -= (nr_txbds);
L
Linus Torvalds 已提交
2380 2381

	/* If the next BD still needs to be cleaned up, then the bds
J
Jan Ceuleers 已提交
2382 2383
	 * are full.  We need to tell the kernel to stop sending us stuff.
	 */
2384
	if (!tx_queue->num_txbdfree) {
2385
		netif_tx_stop_queue(txq);
L
Linus Torvalds 已提交
2386

2387
		dev->stats.tx_fifo_errors++;
L
Linus Torvalds 已提交
2388 2389 2390
	}

	/* Tell the DMA to go go go */
2391
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
L
Linus Torvalds 已提交
2392 2393

	/* Unlock priv */
2394
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
2395

2396
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
2397 2398 2399 2400 2401 2402
}

/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2403

2404
	cancel_work_sync(&priv->reset_task);
L
Linus Torvalds 已提交
2405 2406
	stop_gfar(dev);

2407 2408 2409
	/* Disconnect from the PHY */
	phy_disconnect(priv->phydev);
	priv->phydev = NULL;
L
Linus Torvalds 已提交
2410

2411 2412
	gfar_free_irq(priv);

L
Linus Torvalds 已提交
2413 2414 2415 2416
	return 0;
}

/* Changes the mac address if the controller is not running. */
2417
static int gfar_set_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
2418
{
2419
	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
L
Linus Torvalds 已提交
2420 2421 2422 2423 2424 2425 2426

	return 0;
}

static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
	struct gfar_private *priv = netdev_priv(dev);
2427 2428
	int frame_size = new_mtu + ETH_HLEN;

L
Linus Torvalds 已提交
2429
	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2430
		netif_err(priv, drv, dev, "Invalid MTU setting\n");
L
Linus Torvalds 已提交
2431 2432 2433
		return -EINVAL;
	}

2434 2435 2436
	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
		cpu_relax();

2437
	if (dev->flags & IFF_UP)
L
Linus Torvalds 已提交
2438 2439 2440 2441
		stop_gfar(dev);

	dev->mtu = new_mtu;

2442
	if (dev->flags & IFF_UP)
L
Linus Torvalds 已提交
2443 2444
		startup_gfar(dev);

2445 2446
	clear_bit_unlock(GFAR_RESETTING, &priv->state);

L
Linus Torvalds 已提交
2447 2448 2449
	return 0;
}

2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
void reset_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);

	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
		cpu_relax();

	stop_gfar(ndev);
	startup_gfar(ndev);

	clear_bit_unlock(GFAR_RESETTING, &priv->state);
}

2463
/* gfar_reset_task gets scheduled when a packet has not been
L
Linus Torvalds 已提交
2464 2465
 * transmitted after a set amount of time.
 * For now, assume that clearing out all the structures, and
2466 2467 2468
 * starting over will fix the problem.
 */
static void gfar_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
2469
{
2470
	struct gfar_private *priv = container_of(work, struct gfar_private,
2471
						 reset_task);
2472
	reset_gfar(priv->ndev);
L
Linus Torvalds 已提交
2473 2474
}

2475 2476 2477 2478 2479 2480 2481 2482
static void gfar_timeout(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	dev->stats.tx_errors++;
	schedule_work(&priv->reset_task);
}

E
Eran Liberty 已提交
2483 2484 2485 2486 2487 2488
static void gfar_align_skb(struct sk_buff *skb)
{
	/* We need the data buffer to be aligned properly.  We will reserve
	 * as many bytes as needed to align the data properly
	 */
	skb_reserve(skb, RXBUF_ALIGNMENT -
2489
		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
E
Eran Liberty 已提交
2490 2491
}

L
Linus Torvalds 已提交
2492
/* Interrupt Handler for Transmit complete */
C
Claudiu Manoil 已提交
2493
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
2494
{
2495
	struct net_device *dev = tx_queue->dev;
2496
	struct netdev_queue *txq;
D
Dai Haruki 已提交
2497
	struct gfar_private *priv = netdev_priv(dev);
2498
	struct txbd8 *bdp, *next = NULL;
D
Dai Haruki 已提交
2499
	struct txbd8 *lbdp = NULL;
2500
	struct txbd8 *base = tx_queue->tx_bd_base;
D
Dai Haruki 已提交
2501 2502
	struct sk_buff *skb;
	int skb_dirtytx;
2503
	int tx_ring_size = tx_queue->tx_ring_size;
2504
	int frags = 0, nr_txbds = 0;
D
Dai Haruki 已提交
2505
	int i;
D
Dai Haruki 已提交
2506
	int howmany = 0;
2507 2508
	int tqi = tx_queue->qindex;
	unsigned int bytes_sent = 0;
D
Dai Haruki 已提交
2509
	u32 lstatus;
2510
	size_t buflen;
L
Linus Torvalds 已提交
2511

2512
	txq = netdev_get_tx_queue(dev, tqi);
2513 2514
	bdp = tx_queue->dirty_tx;
	skb_dirtytx = tx_queue->skb_dirtytx;
L
Linus Torvalds 已提交
2515

2516
	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
A
Anton Vorontsov 已提交
2517 2518
		unsigned long flags;

D
Dai Haruki 已提交
2519
		frags = skb_shinfo(skb)->nr_frags;
2520

J
Jan Ceuleers 已提交
2521
		/* When time stamping, one additional TxBD must be freed.
2522 2523
		 * Also, we need to dma_unmap_single() the TxPAL.
		 */
2524
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2525 2526 2527 2528 2529
			nr_txbds = frags + 2;
		else
			nr_txbds = frags + 1;

		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
L
Linus Torvalds 已提交
2530

D
Dai Haruki 已提交
2531
		lstatus = lbdp->lstatus;
L
Linus Torvalds 已提交
2532

D
Dai Haruki 已提交
2533 2534
		/* Only clean completed frames */
		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2535
		    (lstatus & BD_LENGTH_MASK))
D
Dai Haruki 已提交
2536 2537
			break;

2538
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2539
			next = next_txbd(bdp, base, tx_ring_size);
2540
			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2541 2542 2543
		} else
			buflen = bdp->length;

2544
		dma_unmap_single(priv->dev, bdp->bufPtr,
2545
				 buflen, DMA_TO_DEVICE);
2546

2547
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2548 2549
			struct skb_shared_hwtstamps shhwtstamps;
			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2550

2551 2552
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2553
			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2554 2555 2556 2557
			skb_tstamp_tx(skb, &shhwtstamps);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next;
		}
A
Andy Fleming 已提交
2558

D
Dai Haruki 已提交
2559 2560
		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
		bdp = next_txbd(bdp, base, tx_ring_size);
D
Dai Haruki 已提交
2561

D
Dai Haruki 已提交
2562
		for (i = 0; i < frags; i++) {
2563
			dma_unmap_page(priv->dev, bdp->bufPtr,
2564
				       bdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
2565 2566 2567
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next_txbd(bdp, base, tx_ring_size);
		}
L
Linus Torvalds 已提交
2568

2569
		bytes_sent += GFAR_CB(skb)->bytes_sent;
2570

E
Eric Dumazet 已提交
2571
		dev_kfree_skb_any(skb);
2572

2573
		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
D
Dai Haruki 已提交
2574

D
Dai Haruki 已提交
2575
		skb_dirtytx = (skb_dirtytx + 1) &
2576
			      TX_RING_MOD_MASK(tx_ring_size);
D
Dai Haruki 已提交
2577 2578

		howmany++;
A
Anton Vorontsov 已提交
2579
		spin_lock_irqsave(&tx_queue->txlock, flags);
2580
		tx_queue->num_txbdfree += nr_txbds;
A
Anton Vorontsov 已提交
2581
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
2582
	}
L
Linus Torvalds 已提交
2583

D
Dai Haruki 已提交
2584
	/* If we freed a buffer, we can restart transmission, if necessary */
2585 2586 2587 2588
	if (tx_queue->num_txbdfree &&
	    netif_tx_queue_stopped(txq) &&
	    !(test_bit(GFAR_DOWN, &priv->state)))
		netif_wake_subqueue(priv->ndev, tqi);
L
Linus Torvalds 已提交
2589

D
Dai Haruki 已提交
2590
	/* Update dirty indicators */
2591 2592
	tx_queue->skb_dirtytx = skb_dirtytx;
	tx_queue->dirty_tx = bdp;
L
Linus Torvalds 已提交
2593

2594
	netdev_tx_completed_queue(txq, howmany, bytes_sent);
D
Dai Haruki 已提交
2595 2596
}

2597
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2598
			   struct sk_buff *skb)
2599
{
2600
	struct net_device *dev = rx_queue->dev;
2601
	struct gfar_private *priv = netdev_priv(dev);
2602
	dma_addr_t buf;
2603

2604
	buf = dma_map_single(priv->dev, skb->data,
2605
			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2606
	gfar_init_rxbdp(rx_queue, bdp, buf);
2607 2608
}

2609
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
L
Linus Torvalds 已提交
2610 2611
{
	struct gfar_private *priv = netdev_priv(dev);
E
Eric Dumazet 已提交
2612
	struct sk_buff *skb;
L
Linus Torvalds 已提交
2613

E
Eran Liberty 已提交
2614
	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2615
	if (!skb)
L
Linus Torvalds 已提交
2616 2617
		return NULL;

E
Eran Liberty 已提交
2618
	gfar_align_skb(skb);
2619

E
Eran Liberty 已提交
2620 2621 2622
	return skb;
}

2623
struct sk_buff *gfar_new_skb(struct net_device *dev)
E
Eran Liberty 已提交
2624
{
E
Eric Dumazet 已提交
2625
	return gfar_alloc_skb(dev);
L
Linus Torvalds 已提交
2626 2627
}

2628
static inline void count_errors(unsigned short status, struct net_device *dev)
L
Linus Torvalds 已提交
2629
{
2630
	struct gfar_private *priv = netdev_priv(dev);
2631
	struct net_device_stats *stats = &dev->stats;
L
Linus Torvalds 已提交
2632 2633
	struct gfar_extra_stats *estats = &priv->extra_stats;

J
Jan Ceuleers 已提交
2634
	/* If the packet was truncated, none of the other errors matter */
L
Linus Torvalds 已提交
2635 2636 2637
	if (status & RXBD_TRUNCATED) {
		stats->rx_length_errors++;

2638
		atomic64_inc(&estats->rx_trunc);
L
Linus Torvalds 已提交
2639 2640 2641 2642 2643 2644 2645 2646

		return;
	}
	/* Count the errors, if there were any */
	if (status & (RXBD_LARGE | RXBD_SHORT)) {
		stats->rx_length_errors++;

		if (status & RXBD_LARGE)
2647
			atomic64_inc(&estats->rx_large);
L
Linus Torvalds 已提交
2648
		else
2649
			atomic64_inc(&estats->rx_short);
L
Linus Torvalds 已提交
2650 2651 2652
	}
	if (status & RXBD_NONOCTET) {
		stats->rx_frame_errors++;
2653
		atomic64_inc(&estats->rx_nonoctet);
L
Linus Torvalds 已提交
2654 2655
	}
	if (status & RXBD_CRCERR) {
2656
		atomic64_inc(&estats->rx_crcerr);
L
Linus Torvalds 已提交
2657 2658 2659
		stats->rx_crc_errors++;
	}
	if (status & RXBD_OVERRUN) {
2660
		atomic64_inc(&estats->rx_overrun);
L
Linus Torvalds 已提交
2661 2662 2663 2664
		stats->rx_crc_errors++;
	}
}

2665
irqreturn_t gfar_receive(int irq, void *grp_id)
L
Linus Torvalds 已提交
2666
{
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708
	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
	unsigned long flags;
	u32 imask;

	if (likely(napi_schedule_prep(&grp->napi_rx))) {
		spin_lock_irqsave(&grp->grplock, flags);
		imask = gfar_read(&grp->regs->imask);
		imask &= IMASK_RX_DISABLED;
		gfar_write(&grp->regs->imask, imask);
		spin_unlock_irqrestore(&grp->grplock, flags);
		__napi_schedule(&grp->napi_rx);
	} else {
		/* Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
	}

	return IRQ_HANDLED;
}

/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *grp_id)
{
	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
	unsigned long flags;
	u32 imask;

	if (likely(napi_schedule_prep(&grp->napi_tx))) {
		spin_lock_irqsave(&grp->grplock, flags);
		imask = gfar_read(&grp->regs->imask);
		imask &= IMASK_TX_DISABLED;
		gfar_write(&grp->regs->imask, imask);
		spin_unlock_irqrestore(&grp->grplock, flags);
		__napi_schedule(&grp->napi_tx);
	} else {
		/* Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
	}

L
Linus Torvalds 已提交
2709 2710 2711
	return IRQ_HANDLED;
}

2712 2713 2714 2715
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
	/* If valid headers were found, and valid sums
	 * were verified, then we tell the kernel that no
J
Jan Ceuleers 已提交
2716 2717
	 * checksumming is necessary.  Otherwise, it is [FIXME]
	 */
2718
	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2719 2720
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
2721
		skb_checksum_none_assert(skb);
2722 2723 2724
}


J
Jan Ceuleers 已提交
2725
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2726 2727
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi)
L
Linus Torvalds 已提交
2728 2729
{
	struct gfar_private *priv = netdev_priv(dev);
2730
	struct rxfcb *fcb = NULL;
L
Linus Torvalds 已提交
2731

2732 2733
	/* fcb is at the beginning if exists */
	fcb = (struct rxfcb *)skb->data;
2734

J
Jan Ceuleers 已提交
2735 2736 2737
	/* Remove the FCB from the skb
	 * Remove the padded bytes, if there are any
	 */
2738 2739
	if (amount_pull) {
		skb_record_rx_queue(skb, fcb->rq);
2740
		skb_pull(skb, amount_pull);
2741
	}
2742

2743 2744 2745 2746
	/* Get receive timestamp from the skb */
	if (priv->hwts_rx_en) {
		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
		u64 *ns = (u64 *) skb->data;
2747

2748 2749 2750 2751 2752 2753 2754
		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
	}

	if (priv->padding)
		skb_pull(skb, priv->padding);

2755
	if (dev->features & NETIF_F_RXCSUM)
2756
		gfar_rx_checksum(skb, fcb);
2757

2758 2759
	/* Tell the skb what kind of packet this is */
	skb->protocol = eth_type_trans(skb, dev);
L
Linus Torvalds 已提交
2760

2761
	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2762 2763 2764
	 * Even if vlan rx accel is disabled, on some chips
	 * RXFCB_VLN is pseudo randomly set.
	 */
2765
	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2766
	    fcb->flags & RXFCB_VLN)
2767
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
J
Jiri Pirko 已提交
2768

2769
	/* Send the packet up the stack */
2770
	napi_gro_receive(napi, skb);
2771

L
Linus Torvalds 已提交
2772 2773 2774
}

/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2775 2776
 * until the budget/quota has been reached. Returns the number
 * of frames handled
L
Linus Torvalds 已提交
2777
 */
2778
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
L
Linus Torvalds 已提交
2779
{
2780
	struct net_device *dev = rx_queue->dev;
2781
	struct rxbd8 *bdp, *base;
L
Linus Torvalds 已提交
2782
	struct sk_buff *skb;
2783 2784
	int pkt_len;
	int amount_pull;
L
Linus Torvalds 已提交
2785 2786 2787 2788
	int howmany = 0;
	struct gfar_private *priv = netdev_priv(dev);

	/* Get the first full descriptor */
2789 2790
	bdp = rx_queue->cur_rx;
	base = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
2791

2792
	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2793

L
Linus Torvalds 已提交
2794
	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2795
		struct sk_buff *newskb;
2796

2797
		rmb();
2798 2799 2800 2801

		/* Add another skb for the future */
		newskb = gfar_new_skb(dev);

2802
		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
L
Linus Torvalds 已提交
2803

2804
		dma_unmap_single(priv->dev, bdp->bufPtr,
2805
				 priv->rx_buffer_size, DMA_FROM_DEVICE);
A
Andy Fleming 已提交
2806

2807
		if (unlikely(!(bdp->status & RXBD_ERR) &&
2808
			     bdp->length > priv->rx_buffer_size))
2809 2810
			bdp->status = RXBD_LARGE;

2811 2812
		/* We drop the frame if we failed to allocate a new buffer */
		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2813
			     bdp->status & RXBD_ERR)) {
2814 2815 2816 2817
			count_errors(bdp->status, dev);

			if (unlikely(!newskb))
				newskb = skb;
E
Eran Liberty 已提交
2818
			else if (skb)
E
Eric Dumazet 已提交
2819
				dev_kfree_skb(skb);
2820
		} else {
L
Linus Torvalds 已提交
2821
			/* Increment the number of packets */
S
Sandeep Gopalpet 已提交
2822
			rx_queue->stats.rx_packets++;
L
Linus Torvalds 已提交
2823 2824
			howmany++;

2825 2826 2827 2828
			if (likely(skb)) {
				pkt_len = bdp->length - ETH_FCS_LEN;
				/* Remove the FCS from the packet length */
				skb_put(skb, pkt_len);
S
Sandeep Gopalpet 已提交
2829
				rx_queue->stats.rx_bytes += pkt_len;
2830
				skb_record_rx_queue(skb, rx_queue->qindex);
W
Wu Jiajun-B06378 已提交
2831
				gfar_process_frame(dev, skb, amount_pull,
2832
						   &rx_queue->grp->napi_rx);
2833 2834

			} else {
2835
				netif_warn(priv, rx_err, dev, "Missing skb!\n");
S
Sandeep Gopalpet 已提交
2836
				rx_queue->stats.rx_dropped++;
2837
				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2838
			}
L
Linus Torvalds 已提交
2839 2840 2841

		}

2842
		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
L
Linus Torvalds 已提交
2843

2844
		/* Setup the new bdp */
2845
		gfar_new_rxbdp(rx_queue, bdp, newskb);
L
Linus Torvalds 已提交
2846 2847

		/* Update to the next pointer */
2848
		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2849 2850

		/* update to point at the next skb */
2851 2852
		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2853 2854 2855
	}

	/* Update the current rxbd pointer to be the next one */
2856
	rx_queue->cur_rx = bdp;
L
Linus Torvalds 已提交
2857 2858 2859 2860

	return howmany;
}

2861
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2862 2863
{
	struct gfar_priv_grp *gfargrp =
2864
		container_of(napi, struct gfar_priv_grp, napi_rx);
2865
	struct gfar __iomem *regs = gfargrp->regs;
2866
	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2867 2868 2869 2870 2871
	int work_done = 0;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
2872
	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2873 2874 2875 2876

	work_done = gfar_clean_rx_ring(rx_queue, budget);

	if (work_done < budget) {
2877
		u32 imask;
2878 2879 2880 2881
		napi_complete(napi);
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);

2882 2883 2884 2885 2886
		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_RX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
2887 2888 2889 2890 2891
	}

	return work_done;
}

2892
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
L
Linus Torvalds 已提交
2893
{
2894
	struct gfar_priv_grp *gfargrp =
2895 2896
		container_of(napi, struct gfar_priv_grp, napi_tx);
	struct gfar __iomem *regs = gfargrp->regs;
2897
	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
	u32 imask;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_TX_MASK);

	/* run Tx cleanup to completion */
	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
		gfar_clean_tx_ring(tx_queue);

	napi_complete(napi);

	spin_lock_irq(&gfargrp->grplock);
	imask = gfar_read(&regs->imask);
	imask |= IMASK_TX_DEFAULT;
	gfar_write(&regs->imask, imask);
	spin_unlock_irq(&gfargrp->grplock);

	return 0;
}

static int gfar_poll_rx(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi_rx);
2924
	struct gfar_private *priv = gfargrp->priv;
2925
	struct gfar __iomem *regs = gfargrp->regs;
2926
	struct gfar_priv_rx_q *rx_queue = NULL;
C
Claudiu Manoil 已提交
2927
	int work_done = 0, work_done_per_q = 0;
2928
	int i, budget_per_q = 0;
2929 2930
	unsigned long rstat_rxf;
	int num_act_queues;
2931

2932
	/* Clear IEVENT, so interrupts aren't called again
J
Jan Ceuleers 已提交
2933 2934
	 * because of the packets that have already arrived
	 */
2935
	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2936

2937 2938 2939 2940 2941 2942
	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;

	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
	if (num_act_queues)
		budget_per_q = budget/num_act_queues;

2943 2944 2945 2946
	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
		/* skip queue if not active */
		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
			continue;
L
Linus Torvalds 已提交
2947

2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
		rx_queue = priv->rx_queue[i];
		work_done_per_q =
			gfar_clean_rx_ring(rx_queue, budget_per_q);
		work_done += work_done_per_q;

		/* finished processing this queue */
		if (work_done_per_q < budget_per_q) {
			/* clear active queue hw indication */
			gfar_write(&regs->rstat,
				   RSTAT_CLEAR_RXF0 >> i);
			num_act_queues--;

			if (!num_act_queues)
				break;
		}
	}
2964

2965 2966
	if (!num_act_queues) {
		u32 imask;
2967
		napi_complete(napi);
L
Linus Torvalds 已提交
2968

2969 2970
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);
L
Linus Torvalds 已提交
2971

2972 2973 2974 2975 2976
		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_RX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
L
Linus Torvalds 已提交
2977 2978
	}

C
Claudiu Manoil 已提交
2979
	return work_done;
L
Linus Torvalds 已提交
2980 2981
}

2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020
static int gfar_poll_tx(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi_tx);
	struct gfar_private *priv = gfargrp->priv;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_priv_tx_q *tx_queue = NULL;
	int has_tx_work = 0;
	int i;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_TX_MASK);

	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
		tx_queue = priv->tx_queue[i];
		/* run Tx cleanup to completion */
		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
			gfar_clean_tx_ring(tx_queue);
			has_tx_work = 1;
		}
	}

	if (!has_tx_work) {
		u32 imask;
		napi_complete(napi);

		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_TX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
	}

	return 0;
}


3021
#ifdef CONFIG_NET_POLL_CONTROLLER
J
Jan Ceuleers 已提交
3022
/* Polling 'interrupt' - used by things like netconsole to send skbs
3023 3024 3025 3026 3027 3028
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void gfar_netpoll(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3029
	int i;
3030 3031

	/* If the device has multiple interrupts, run tx/rx */
3032
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3033
		for (i = 0; i < priv->num_grps; i++) {
3034 3035 3036 3037 3038 3039 3040 3041 3042
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			disable_irq(gfar_irq(grp, RX)->irq);
			disable_irq(gfar_irq(grp, ER)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, ER)->irq);
			enable_irq(gfar_irq(grp, RX)->irq);
			enable_irq(gfar_irq(grp, TX)->irq);
3043
		}
3044
	} else {
3045
		for (i = 0; i < priv->num_grps; i++) {
3046 3047 3048 3049 3050
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, TX)->irq);
3051
		}
3052 3053 3054 3055
	}
}
#endif

L
Linus Torvalds 已提交
3056
/* The interrupt handler for devices with one interrupt */
3057
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
L
Linus Torvalds 已提交
3058
{
3059
	struct gfar_priv_grp *gfargrp = grp_id;
L
Linus Torvalds 已提交
3060 3061

	/* Save ievent for future reference */
3062
	u32 events = gfar_read(&gfargrp->regs->ievent);
L
Linus Torvalds 已提交
3063 3064

	/* Check for reception */
3065
	if (events & IEVENT_RX_MASK)
3066
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3067 3068

	/* Check for transmit completion */
3069
	if (events & IEVENT_TX_MASK)
3070
		gfar_transmit(irq, grp_id);
L
Linus Torvalds 已提交
3071

3072 3073
	/* Check for errors */
	if (events & IEVENT_ERR_MASK)
3074
		gfar_error(irq, grp_id);
L
Linus Torvalds 已提交
3075 3076 3077 3078

	return IRQ_HANDLED;
}

3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
	struct phy_device *phydev = priv->phydev;
	u32 val = 0;

	if (!phydev->duplex)
		return val;

	if (!priv->pause_aneg_en) {
		if (priv->tx_pause_en)
			val |= MACCFG1_TX_FLOW;
		if (priv->rx_pause_en)
			val |= MACCFG1_RX_FLOW;
	} else {
		u16 lcl_adv, rmt_adv;
		u8 flowctrl;
		/* get link partner capabilities */
		rmt_adv = 0;
		if (phydev->pause)
			rmt_adv = LPA_PAUSE_CAP;
		if (phydev->asym_pause)
			rmt_adv |= LPA_PAUSE_ASYM;

		lcl_adv = mii_advertise_flowctrl(phydev->advertising);

		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
		if (flowctrl & FLOW_CTRL_TX)
			val |= MACCFG1_TX_FLOW;
		if (flowctrl & FLOW_CTRL_RX)
			val |= MACCFG1_RX_FLOW;
	}

	return val;
}

L
Linus Torvalds 已提交
3114 3115
/* Called every time the controller might need to be made
 * aware of new link state.  The PHY code conveys this
3116
 * information through variables in the phydev structure, and this
L
Linus Torvalds 已提交
3117 3118 3119 3120 3121 3122
 * function converts those variables into the appropriate
 * register values, and can bring down the device if needed.
 */
static void adjust_link(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3123
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3124 3125 3126
	struct phy_device *phydev = priv->phydev;
	int new_state = 0;

3127 3128
	if (test_bit(GFAR_RESETTING, &priv->state))
		return;
3129

3130
	if (phydev->link) {
3131
		u32 tempval1 = gfar_read(&regs->maccfg1);
3132
		u32 tempval = gfar_read(&regs->maccfg2);
3133
		u32 ecntrl = gfar_read(&regs->ecntrl);
L
Linus Torvalds 已提交
3134 3135

		/* Now we make sure that we can be in full duplex mode.
J
Jan Ceuleers 已提交
3136 3137
		 * If not, we operate in half-duplex mode.
		 */
3138 3139 3140
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
L
Linus Torvalds 已提交
3141
				tempval &= ~(MACCFG2_FULL_DUPLEX);
3142
			else
L
Linus Torvalds 已提交
3143 3144
				tempval |= MACCFG2_FULL_DUPLEX;

3145
			priv->oldduplex = phydev->duplex;
L
Linus Torvalds 已提交
3146 3147
		}

3148 3149 3150
		if (phydev->speed != priv->oldspeed) {
			new_state = 1;
			switch (phydev->speed) {
L
Linus Torvalds 已提交
3151 3152 3153
			case 1000:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3154 3155

				ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3156 3157 3158 3159 3160
				break;
			case 100:
			case 10:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3161 3162

				/* Reduced mode distinguishes
J
Jan Ceuleers 已提交
3163 3164
				 * between 10 and 100
				 */
3165 3166 3167 3168
				if (phydev->speed == SPEED_100)
					ecntrl |= ECNTRL_R100;
				else
					ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3169 3170
				break;
			default:
3171 3172 3173
				netif_warn(priv, link, dev,
					   "Ack!  Speed (%d) is not 10/100/1000!\n",
					   phydev->speed);
L
Linus Torvalds 已提交
3174 3175 3176
				break;
			}

3177
			priv->oldspeed = phydev->speed;
L
Linus Torvalds 已提交
3178 3179
		}

3180 3181 3182 3183
		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
		tempval1 |= gfar_get_flowctrl_cfg(priv);

		gfar_write(&regs->maccfg1, tempval1);
3184
		gfar_write(&regs->maccfg2, tempval);
3185
		gfar_write(&regs->ecntrl, ecntrl);
3186

L
Linus Torvalds 已提交
3187
		if (!priv->oldlink) {
3188
			new_state = 1;
L
Linus Torvalds 已提交
3189 3190
			priv->oldlink = 1;
		}
3191 3192 3193 3194 3195
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
		priv->oldspeed = 0;
		priv->oldduplex = -1;
L
Linus Torvalds 已提交
3196 3197
	}

3198 3199 3200
	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);
}
L
Linus Torvalds 已提交
3201 3202 3203 3204

/* Update the hash table based on the current list of multicast
 * addresses we subscribe to.  Also, change the promiscuity of
 * the device based on the flags (this function is called
J
Jan Ceuleers 已提交
3205 3206
 * whenever dev->flags is changed
 */
L
Linus Torvalds 已提交
3207 3208
static void gfar_set_multi(struct net_device *dev)
{
3209
	struct netdev_hw_addr *ha;
L
Linus Torvalds 已提交
3210
	struct gfar_private *priv = netdev_priv(dev);
3211
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
3212 3213
	u32 tempval;

3214
	if (dev->flags & IFF_PROMISC) {
L
Linus Torvalds 已提交
3215 3216 3217 3218 3219 3220 3221 3222 3223 3224
		/* Set RCTRL to PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval |= RCTRL_PROM;
		gfar_write(&regs->rctrl, tempval);
	} else {
		/* Set RCTRL to not PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval &= ~(RCTRL_PROM);
		gfar_write(&regs->rctrl, tempval);
	}
3225

3226
	if (dev->flags & IFF_ALLMULTI) {
L
Linus Torvalds 已提交
3227
		/* Set the hash to rx all multicast frames */
3228 3229 3230 3231 3232 3233 3234 3235
		gfar_write(&regs->igaddr0, 0xffffffff);
		gfar_write(&regs->igaddr1, 0xffffffff);
		gfar_write(&regs->igaddr2, 0xffffffff);
		gfar_write(&regs->igaddr3, 0xffffffff);
		gfar_write(&regs->igaddr4, 0xffffffff);
		gfar_write(&regs->igaddr5, 0xffffffff);
		gfar_write(&regs->igaddr6, 0xffffffff);
		gfar_write(&regs->igaddr7, 0xffffffff);
L
Linus Torvalds 已提交
3236 3237 3238 3239 3240 3241 3242 3243 3244
		gfar_write(&regs->gaddr0, 0xffffffff);
		gfar_write(&regs->gaddr1, 0xffffffff);
		gfar_write(&regs->gaddr2, 0xffffffff);
		gfar_write(&regs->gaddr3, 0xffffffff);
		gfar_write(&regs->gaddr4, 0xffffffff);
		gfar_write(&regs->gaddr5, 0xffffffff);
		gfar_write(&regs->gaddr6, 0xffffffff);
		gfar_write(&regs->gaddr7, 0xffffffff);
	} else {
3245 3246 3247
		int em_num;
		int idx;

L
Linus Torvalds 已提交
3248
		/* zero out the hash */
3249 3250 3251 3252 3253 3254 3255 3256
		gfar_write(&regs->igaddr0, 0x0);
		gfar_write(&regs->igaddr1, 0x0);
		gfar_write(&regs->igaddr2, 0x0);
		gfar_write(&regs->igaddr3, 0x0);
		gfar_write(&regs->igaddr4, 0x0);
		gfar_write(&regs->igaddr5, 0x0);
		gfar_write(&regs->igaddr6, 0x0);
		gfar_write(&regs->igaddr7, 0x0);
L
Linus Torvalds 已提交
3257 3258 3259 3260 3261 3262 3263 3264 3265
		gfar_write(&regs->gaddr0, 0x0);
		gfar_write(&regs->gaddr1, 0x0);
		gfar_write(&regs->gaddr2, 0x0);
		gfar_write(&regs->gaddr3, 0x0);
		gfar_write(&regs->gaddr4, 0x0);
		gfar_write(&regs->gaddr5, 0x0);
		gfar_write(&regs->gaddr6, 0x0);
		gfar_write(&regs->gaddr7, 0x0);

3266 3267
		/* If we have extended hash tables, we need to
		 * clear the exact match registers to prepare for
J
Jan Ceuleers 已提交
3268 3269
		 * setting them
		 */
3270 3271 3272 3273 3274 3275 3276 3277 3278
		if (priv->extended_hash) {
			em_num = GFAR_EM_NUM + 1;
			gfar_clear_exact_match(dev);
			idx = 1;
		} else {
			idx = 0;
			em_num = 0;
		}

3279
		if (netdev_mc_empty(dev))
L
Linus Torvalds 已提交
3280 3281 3282
			return;

		/* Parse the list, and set the appropriate bits */
3283
		netdev_for_each_mc_addr(ha, dev) {
3284
			if (idx < em_num) {
3285
				gfar_set_mac_for_addr(dev, idx, ha->addr);
3286 3287
				idx++;
			} else
3288
				gfar_set_hash_for_addr(dev, ha->addr);
L
Linus Torvalds 已提交
3289 3290 3291 3292
		}
	}
}

3293 3294

/* Clears each of the exact match registers to zero, so they
J
Jan Ceuleers 已提交
3295 3296
 * don't interfere with normal reception
 */
3297 3298 3299
static void gfar_clear_exact_match(struct net_device *dev)
{
	int idx;
3300
	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3301

3302
	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
J
Joe Perches 已提交
3303
		gfar_set_mac_for_addr(dev, idx, zero_arr);
3304 3305
}

L
Linus Torvalds 已提交
3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
J
Jan Ceuleers 已提交
3318 3319
 * the entry.
 */
L
Linus Torvalds 已提交
3320 3321 3322 3323
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
3324
	u32 result = ether_crc(ETH_ALEN, addr);
3325 3326 3327
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
L
Linus Torvalds 已提交
3328 3329
	u32 value = (1 << (31-whichbit));

3330
	tempval = gfar_read(priv->hash_regs[whichreg]);
L
Linus Torvalds 已提交
3331
	tempval |= value;
3332
	gfar_write(priv->hash_regs[whichreg], tempval);
L
Linus Torvalds 已提交
3333 3334
}

3335 3336 3337 3338

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
J
Joe Perches 已提交
3339 3340
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr)
3341 3342
{
	struct gfar_private *priv = netdev_priv(dev);
3343
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3344
	int idx;
3345
	char tmpbuf[ETH_ALEN];
3346
	u32 tempval;
3347
	u32 __iomem *macptr = &regs->macstnaddr1;
3348 3349 3350

	macptr += num*2;

J
Jan Ceuleers 已提交
3351 3352 3353
	/* Now copy it into the mac registers backwards, cuz
	 * little endian is silly
	 */
3354 3355
	for (idx = 0; idx < ETH_ALEN; idx++)
		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3356 3357 3358 3359 3360 3361 3362 3363

	gfar_write(macptr, *((u32 *) (tmpbuf)));

	tempval = *((u32 *) (tmpbuf + 4));

	gfar_write(macptr+1, tempval);
}

L
Linus Torvalds 已提交
3364
/* GFAR error interrupt handler */
3365
static irqreturn_t gfar_error(int irq, void *grp_id)
L
Linus Torvalds 已提交
3366
{
3367 3368 3369 3370
	struct gfar_priv_grp *gfargrp = grp_id;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_private *priv= gfargrp->priv;
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
3371 3372

	/* Save ievent for future reference */
3373
	u32 events = gfar_read(&regs->ievent);
L
Linus Torvalds 已提交
3374 3375

	/* Clear IEVENT */
3376
	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3377 3378

	/* Magic Packet is not an error. */
3379
	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3380 3381
	    (events & IEVENT_MAG))
		events &= ~IEVENT_MAG;
L
Linus Torvalds 已提交
3382 3383

	/* Hmm... */
3384
	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3385 3386
		netdev_dbg(dev,
			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3387
			   events, gfar_read(&regs->imask));
L
Linus Torvalds 已提交
3388 3389 3390

	/* Update the error counters */
	if (events & IEVENT_TXE) {
3391
		dev->stats.tx_errors++;
L
Linus Torvalds 已提交
3392 3393

		if (events & IEVENT_LC)
3394
			dev->stats.tx_window_errors++;
L
Linus Torvalds 已提交
3395
		if (events & IEVENT_CRL)
3396
			dev->stats.tx_aborted_errors++;
L
Linus Torvalds 已提交
3397
		if (events & IEVENT_XFUN) {
3398 3399
			unsigned long flags;

3400 3401
			netif_dbg(priv, tx_err, dev,
				  "TX FIFO underrun, packet dropped\n");
3402
			dev->stats.tx_dropped++;
3403
			atomic64_inc(&priv->extra_stats.tx_underrun);
L
Linus Torvalds 已提交
3404

3405 3406 3407
			local_irq_save(flags);
			lock_tx_qs(priv);

L
Linus Torvalds 已提交
3408
			/* Reactivate the Tx Queues */
3409
			gfar_write(&regs->tstat, gfargrp->tstat);
3410 3411 3412

			unlock_tx_qs(priv);
			local_irq_restore(flags);
L
Linus Torvalds 已提交
3413
		}
3414
		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
L
Linus Torvalds 已提交
3415 3416
	}
	if (events & IEVENT_BSY) {
3417
		dev->stats.rx_errors++;
3418
		atomic64_inc(&priv->extra_stats.rx_bsy);
L
Linus Torvalds 已提交
3419

3420
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3421

3422 3423
		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
			  gfar_read(&regs->rstat));
L
Linus Torvalds 已提交
3424 3425
	}
	if (events & IEVENT_BABR) {
3426
		dev->stats.rx_errors++;
3427
		atomic64_inc(&priv->extra_stats.rx_babr);
L
Linus Torvalds 已提交
3428

3429
		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
L
Linus Torvalds 已提交
3430 3431
	}
	if (events & IEVENT_EBERR) {
3432
		atomic64_inc(&priv->extra_stats.eberr);
3433
		netif_dbg(priv, rx_err, dev, "bus error\n");
L
Linus Torvalds 已提交
3434
	}
3435 3436
	if (events & IEVENT_RXC)
		netif_dbg(priv, rx_status, dev, "control frame\n");
L
Linus Torvalds 已提交
3437 3438

	if (events & IEVENT_BABT) {
3439
		atomic64_inc(&priv->extra_stats.tx_babt);
3440
		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
L
Linus Torvalds 已提交
3441 3442 3443 3444
	}
	return IRQ_HANDLED;
}

3445 3446 3447 3448 3449 3450
static struct of_device_id gfar_match[] =
{
	{
		.type = "network",
		.compatible = "gianfar",
	},
3451 3452 3453
	{
		.compatible = "fsl,etsec2",
	},
3454 3455
	{},
};
3456
MODULE_DEVICE_TABLE(of, gfar_match);
3457

L
Linus Torvalds 已提交
3458
/* Structure for a device driver */
3459
static struct platform_driver gfar_driver = {
3460 3461 3462 3463 3464 3465
	.driver = {
		.name = "fsl-gianfar",
		.owner = THIS_MODULE,
		.pm = GFAR_PM_OPS,
		.of_match_table = gfar_match,
	},
L
Linus Torvalds 已提交
3466 3467 3468 3469
	.probe = gfar_probe,
	.remove = gfar_remove,
};

3470
module_platform_driver(gfar_driver);