gianfar.c 88.7 KB
Newer Older
J
Jan Ceuleers 已提交
1
/* drivers/net/ethernet/freescale/gianfar.c
L
Linus Torvalds 已提交
2 3
 *
 * Gianfar Ethernet Driver
4 5
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
L
Linus Torvalds 已提交
6 7 8
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
9
 * Maintainer: Kumar Gala
10
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
L
Linus Torvalds 已提交
11
 *
12
 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13
 * Copyright 2007 MontaVista Software, Inc.
L
Linus Torvalds 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
27
 *
28 29
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
L
Linus Torvalds 已提交
30 31 32
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
33 34
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
L
Linus Torvalds 已提交
35 36 37
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
38
 *  IEVENT register is set, triggering an interrupt when the
L
Linus Torvalds 已提交
39 40 41
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
42
 *  of frames or amount of time have passed).  In NAPI, the
L
Linus Torvalds 已提交
43
 *  interrupt handler will signal there is work to be done, and
44
 *  exit. This method will start at the last known empty
45
 *  descriptor, and process every subsequent descriptor until there
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

64 65 66
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

L
Linus Torvalds 已提交
67 68 69
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
70
#include <linux/unistd.h>
L
Linus Torvalds 已提交
71 72 73 74 75 76
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
77
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
78 79
#include <linux/spinlock.h>
#include <linux/mm.h>
80 81
#include <linux/of_address.h>
#include <linux/of_irq.h>
82
#include <linux/of_mdio.h>
83
#include <linux/of_platform.h>
84 85 86
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
K
Kumar Gala 已提交
87
#include <linux/in.h>
88
#include <linux/net_tstamp.h>
L
Linus Torvalds 已提交
89 90

#include <asm/io.h>
91
#include <asm/reg.h>
92
#include <asm/mpc85xx.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
98 99
#include <linux/mii.h>
#include <linux/phy.h>
100 101
#include <linux/phy_fixed.h>
#include <linux/of.h>
102
#include <linux/of_net.h>
L
Linus Torvalds 已提交
103 104 105 106 107

#include "gianfar.h"

#define TX_TIMEOUT      (1*HZ)

108
const char gfar_driver_version[] = "1.3";
L
Linus Torvalds 已提交
109 110 111

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112
static void gfar_reset_task(struct work_struct *work);
L
Linus Torvalds 已提交
113 114
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
115
struct sk_buff *gfar_new_skb(struct net_device *dev);
116
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117
			   struct sk_buff *skb);
L
Linus Torvalds 已提交
118 119
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 121 122
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
L
Linus Torvalds 已提交
123 124
static void adjust_link(struct net_device *dev);
static int init_phy(struct net_device *dev);
125
static int gfar_probe(struct platform_device *ofdev);
126
static int gfar_remove(struct platform_device *ofdev);
127
static void free_skb_resources(struct gfar_private *priv);
L
Linus Torvalds 已提交
128 129
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
K
Kapil Juneja 已提交
130
static void gfar_configure_serdes(struct net_device *dev);
131 132 133 134
static int gfar_poll_rx(struct napi_struct *napi, int budget);
static int gfar_poll_tx(struct napi_struct *napi, int budget);
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
135 136 137
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
138
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
C
Claudiu Manoil 已提交
139
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
140 141
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi);
142
static void gfar_halt_nodisable(struct gfar_private *priv);
143
static void gfar_clear_exact_match(struct net_device *dev);
J
Joe Perches 已提交
144 145
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr);
146
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
L
Linus Torvalds 已提交
147 148 149 150 151

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

152
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 154 155 156 157 158 159
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 162 163 164 165 166 167
		lstatus |= BD_LFLAG(RXBD_WRAP);

	eieio();

	bdp->lstatus = lstatus;
}

168
static int gfar_init_bds(struct net_device *ndev)
169
{
170
	struct gfar_private *priv = netdev_priv(ndev);
171 172
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
173 174
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
175
	int i, j;
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		/* Initialize some variables in our dev structure */
		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
		tx_queue->dirty_tx = tx_queue->tx_bd_base;
		tx_queue->cur_tx = tx_queue->tx_bd_base;
		tx_queue->skb_curtx = 0;
		tx_queue->skb_dirtytx = 0;

		/* Initialize Transmit Descriptor Ring */
		txbdp = tx_queue->tx_bd_base;
		for (j = 0; j < tx_queue->tx_ring_size; j++) {
			txbdp->lstatus = 0;
			txbdp->bufPtr = 0;
			txbdp++;
		}
193

194 195 196
		/* Set the last descriptor in the ring to indicate wrap */
		txbdp--;
		txbdp->status |= TXBD_WRAP;
197 198
	}

199 200 201 202 203
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->cur_rx = rx_queue->rx_bd_base;
		rx_queue->skb_currx = 0;
		rxbdp = rx_queue->rx_bd_base;
204

205 206
		for (j = 0; j < rx_queue->rx_ring_size; j++) {
			struct sk_buff *skb = rx_queue->rx_skbuff[j];
207

208 209 210 211 212 213
			if (skb) {
				gfar_init_rxbdp(rx_queue, rxbdp,
						rxbdp->bufPtr);
			} else {
				skb = gfar_new_skb(ndev);
				if (!skb) {
214
					netdev_err(ndev, "Can't allocate RX buffers\n");
215
					return -ENOMEM;
216 217 218 219
				}
				rx_queue->rx_skbuff[j] = skb;

				gfar_new_rxbdp(rx_queue, rxbdp, skb);
220 221
			}

222
			rxbdp++;
223 224 225 226 227 228 229 230 231
		}

	}

	return 0;
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
232
	void *vaddr;
233 234
	dma_addr_t addr;
	int i, j, k;
235
	struct gfar_private *priv = netdev_priv(ndev);
236
	struct device *dev = priv->dev;
237 238 239
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

240 241 242 243 244 245 246
	priv->total_tx_ring_size = 0;
	for (i = 0; i < priv->num_tx_queues; i++)
		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;

	priv->total_rx_ring_size = 0;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
247 248

	/* Allocate memory for the buffer descriptors */
249
	vaddr = dma_alloc_coherent(dev,
250 251 252 253 254 255
				   (priv->total_tx_ring_size *
				    sizeof(struct txbd8)) +
				   (priv->total_rx_ring_size *
				    sizeof(struct rxbd8)),
				   &addr, GFP_KERNEL);
	if (!vaddr)
256 257
		return -ENOMEM;

258 259
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
260
		tx_queue->tx_bd_base = vaddr;
261 262 263
		tx_queue->tx_bd_dma_base = addr;
		tx_queue->dev = ndev;
		/* enet DMA only understands physical addresses */
264 265
		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
266
	}
267 268

	/* Start the rx descriptor ring where the tx ring leaves off */
269 270
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
271
		rx_queue->rx_bd_base = vaddr;
272 273
		rx_queue->rx_bd_dma_base = addr;
		rx_queue->dev = ndev;
274 275
		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
276
	}
277 278

	/* Setup the skbuff rings */
279 280
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
281 282 283 284 285
		tx_queue->tx_skbuff =
			kmalloc_array(tx_queue->tx_ring_size,
				      sizeof(*tx_queue->tx_skbuff),
				      GFP_KERNEL);
		if (!tx_queue->tx_skbuff)
286
			goto cleanup;
287

288 289 290
		for (k = 0; k < tx_queue->tx_ring_size; k++)
			tx_queue->tx_skbuff[k] = NULL;
	}
291

292 293
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
294 295 296 297 298
		rx_queue->rx_skbuff =
			kmalloc_array(rx_queue->rx_ring_size,
				      sizeof(*rx_queue->rx_skbuff),
				      GFP_KERNEL);
		if (!rx_queue->rx_skbuff)
299 300 301 302 303
			goto cleanup;

		for (j = 0; j < rx_queue->rx_ring_size; j++)
			rx_queue->rx_skbuff[j] = NULL;
	}
304

305 306
	if (gfar_init_bds(ndev))
		goto cleanup;
307 308 309 310 311 312 313 314

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

315 316
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
317
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
318
	u32 __iomem *baddr;
319 320 321
	int i;

	baddr = &regs->tbase0;
322
	for (i = 0; i < priv->num_tx_queues; i++) {
323
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
324
		baddr += 2;
325 326 327
	}

	baddr = &regs->rbase0;
328
	for (i = 0; i < priv->num_rx_queues; i++) {
329
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
330
		baddr += 2;
331 332 333
	}
}

334
static void gfar_rx_buff_size_config(struct gfar_private *priv)
335
{
336
	int frame_size = priv->ndev->mtu + ETH_HLEN;
337

338 339 340
	/* set this when rx hw offload (TOE) functions are being used */
	priv->uses_rxfcb = 0;

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
		priv->uses_rxfcb = 1;

	if (priv->hwts_rx_en)
		priv->uses_rxfcb = 1;

	if (priv->uses_rxfcb)
		frame_size += GMAC_FCB_LEN;

	frame_size += priv->padding;

	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
		     INCREMENTAL_BUFFER_SIZE;

	priv->rx_buffer_size = frame_size;
}

static void gfar_mac_rx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 rctrl = 0;

S
Sandeep Gopalpet 已提交
363
	if (priv->rx_filer_enable) {
364
		rctrl |= RCTRL_FILREN;
S
Sandeep Gopalpet 已提交
365 366 367
		/* Program the RIR0 reg with the required distribution */
		gfar_write(&regs->rir0, DEFAULT_RIR0);
	}
368

369
	/* Restore PROMISC mode */
370
	if (priv->ndev->flags & IFF_PROMISC)
371 372
		rctrl |= RCTRL_PROM;

373
	if (priv->ndev->features & NETIF_F_RXCSUM)
374 375
		rctrl |= RCTRL_CHECKSUMMING;

376 377
	if (priv->extended_hash)
		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
378 379 380 381 382 383

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

384
	/* Enable HW time stamping if requested from user space */
385
	if (priv->hwts_rx_en)
386 387
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;

388
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
389
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
390 391 392

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);
393
}
394

395 396 397 398 399 400
static void gfar_mac_tx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 tctrl = 0;

	if (priv->ndev->features & NETIF_F_IP_CSUM)
401 402
		tctrl |= TCTRL_INIT_CSUM;

403 404 405 406 407 408 409
	if (priv->prio_sched_en)
		tctrl |= TCTRL_TXSCHED_PRIO;
	else {
		tctrl |= TCTRL_TXSCHED_WRRS;
		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
	}
410

411 412 413
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
		tctrl |= TCTRL_VLINS;

414 415 416
	gfar_write(&regs->tctrl, tctrl);
}

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
static void gfar_configure_coalescing(struct gfar_private *priv,
			       unsigned long tx_mask, unsigned long rx_mask)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 __iomem *baddr;

	if (priv->mode == MQ_MG_MODE) {
		int i = 0;

		baddr = &regs->txic0;
		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->tx_queue[i]->txcoalescing))
				gfar_write(baddr + i, priv->tx_queue[i]->txic);
		}

		baddr = &regs->rxic0;
		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->rx_queue[i]->rxcoalescing))
				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
		}
	} else {
		/* Backward compatible case -- even if we enable
		 * multiple queues, there's only single reg to program
		 */
		gfar_write(&regs->txic, 0);
		if (likely(priv->tx_queue[0]->txcoalescing))
			gfar_write(&regs->txic, priv->tx_queue[0]->txic);

		gfar_write(&regs->rxic, 0);
		if (unlikely(priv->rx_queue[0]->rxcoalescing))
			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
	}
}

void gfar_configure_coalescing_all(struct gfar_private *priv)
{
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
}

S
Sandeep Gopalpet 已提交
458 459 460 461 462
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
463
	int i;
S
Sandeep Gopalpet 已提交
464 465 466

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
467
		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
S
Sandeep Gopalpet 已提交
468 469 470 471
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
472
	dev->stats.rx_bytes   = rx_bytes;
S
Sandeep Gopalpet 已提交
473 474 475
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
E
Eric Dumazet 已提交
476 477
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
S
Sandeep Gopalpet 已提交
478 479
	}

480
	dev->stats.tx_bytes   = tx_bytes;
S
Sandeep Gopalpet 已提交
481 482 483 484 485
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

486 487 488 489 490
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
491
	.ndo_set_features = gfar_set_features,
492
	.ndo_set_rx_mode = gfar_set_multi,
493 494
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
S
Sandeep Gopalpet 已提交
495
	.ndo_get_stats = gfar_get_stats,
496 497
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
498 499 500 501 502
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
static void gfar_ints_disable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Clear IEVENT */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);

		/* Initialize IMASK */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}
}

static void gfar_ints_enable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Unmask the interrupts we look for */
		gfar_write(&regs->imask, IMASK_DEFAULT);
	}
}

526 527
void lock_tx_qs(struct gfar_private *priv)
{
528
	int i;
529 530 531 532 533 534 535

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_lock(&priv->tx_queue[i]->txlock);
}

void unlock_tx_qs(struct gfar_private *priv)
{
536
	int i;
537 538 539 540 541

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_unlock(&priv->tx_queue[i]->txlock);
}

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
		if (!priv->tx_queue[i])
			return -ENOMEM;

		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = priv->ndev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}
	return 0;
}

static int gfar_alloc_rx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
		if (!priv->rx_queue[i])
			return -ENOMEM;

		priv->rx_queue[i]->rx_skbuff = NULL;
		priv->rx_queue[i]->qindex = i;
		priv->rx_queue[i]->dev = priv->ndev;
	}
	return 0;
}

static void gfar_free_tx_queues(struct gfar_private *priv)
578
{
579
	int i;
580 581 582 583 584

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

585
static void gfar_free_rx_queues(struct gfar_private *priv)
586
{
587
	int i;
588 589 590 591 592

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

593 594
static void unmap_group_regs(struct gfar_private *priv)
{
595
	int i;
596 597 598 599 600 601

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

602 603 604 605 606 607 608 609 610 611 612 613 614
static void free_gfar_dev(struct gfar_private *priv)
{
	int i, j;

	for (i = 0; i < priv->num_grps; i++)
		for (j = 0; j < GFAR_NUM_IRQS; j++) {
			kfree(priv->gfargrp[i].irqinfo[j]);
			priv->gfargrp[i].irqinfo[j] = NULL;
		}

	free_netdev(priv->ndev);
}

615 616
static void disable_napi(struct gfar_private *priv)
{
617
	int i;
618

619 620 621 622
	for (i = 0; i < priv->num_grps; i++) {
		napi_disable(&priv->gfargrp[i].napi_rx);
		napi_disable(&priv->gfargrp[i].napi_tx);
	}
623 624 625 626
}

static void enable_napi(struct gfar_private *priv)
{
627
	int i;
628

629 630 631 632
	for (i = 0; i < priv->num_grps; i++) {
		napi_enable(&priv->gfargrp[i].napi_rx);
		napi_enable(&priv->gfargrp[i].napi_tx);
	}
633 634 635
}

static int gfar_parse_group(struct device_node *np,
636
			    struct gfar_private *priv, const char *model)
637
{
638
	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
639
	u32 *queue_mask;
640 641
	int i;

642 643 644 645
	for (i = 0; i < GFAR_NUM_IRQS; i++) {
		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
					  GFP_KERNEL);
		if (!grp->irqinfo[i])
646 647
			return -ENOMEM;
	}
648

649 650
	grp->regs = of_iomap(np, 0);
	if (!grp->regs)
651 652
		return -ENOMEM;

653
	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
654 655 656

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
657 658 659 660 661
		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
		    gfar_irq(grp, RX)->irq == NO_IRQ ||
		    gfar_irq(grp, ER)->irq == NO_IRQ)
662 663 664
			return -EINVAL;
	}

665 666
	grp->priv = priv;
	spin_lock_init(&grp->grplock);
667 668
	if (priv->mode == MQ_MG_MODE) {
		queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
669
		grp->rx_bit_map = queue_mask ?
670 671
			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
		queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
672
		grp->tx_bit_map = queue_mask ?
673
			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
674
	} else {
675 676
		grp->rx_bit_map = 0xFF;
		grp->tx_bit_map = 0xFF;
677
	}
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701

	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
	 * right to left, so we need to revert the 8 bits to get the q index
	 */
	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
	grp->tx_bit_map = bitrev8(grp->tx_bit_map);

	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
	 * also assign queues to groups
	 */
	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
		grp->num_rx_queues++;
		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
		priv->rx_queue[i]->grp = grp;
	}

	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
		grp->num_tx_queues++;
		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
		priv->tqueue |= (TQUEUE_EN0 >> i);
		priv->tx_queue[i]->grp = grp;
	}

702 703 704 705 706
	priv->num_grps++;

	return 0;
}

707
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
708 709 710 711
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
712 713 714
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
715
	struct device_node *np = ofdev->dev.of_node;
716
	struct device_node *child = NULL;
A
Andy Fleming 已提交
717 718 719
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
720 721
	unsigned int num_tx_qs, num_rx_qs;
	u32 *tx_queues, *rx_queues;
722 723 724 725

	if (!np || !of_device_is_available(np))
		return -ENODEV;

726 727 728 729 730
	/* parse the num of tx and rx queues */
	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
	num_tx_qs = tx_queues ? *tx_queues : 1;

	if (num_tx_qs > MAX_TX_QS) {
731 732 733
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
734 735 736 737 738 739 740
		return -EINVAL;
	}

	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
	num_rx_qs = rx_queues ? *rx_queues : 1;

	if (num_rx_qs > MAX_RX_QS) {
741 742 743
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
744 745 746 747 748 749 750 751 752 753 754 755
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
	priv->ndev = dev;

	priv->num_tx_queues = num_tx_qs;
756
	netif_set_real_num_rx_queues(dev, num_rx_qs);
757
	priv->num_rx_queues = num_rx_qs;
758 759 760 761 762 763 764 765

	err = gfar_alloc_tx_queues(priv);
	if (err)
		goto tx_alloc_failed;

	err = gfar_alloc_rx_queues(priv);
	if (err)
		goto rx_alloc_failed;
766

J
Jan Ceuleers 已提交
767
	/* Init Rx queue filer rule set linked list */
S
Sebastian Poehn 已提交
768 769 770 771
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

772 773
	model = of_get_property(np, "model", NULL);

774 775
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
776

777 778 779 780 781 782 783
	/* Parse and initialize group specific information */
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		priv->mode = MQ_MG_MODE;
		for_each_child_of_node(np, child) {
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
784
		}
785 786 787
	} else {
		priv->mode = SQ_SG_MODE;
		err = gfar_parse_group(np, priv, model);
788
		if (err)
789
			goto err_grp_init;
790 791
	}

A
Andy Fleming 已提交
792 793
	stash = of_get_property(np, "bd-stash", NULL);

794
	if (stash) {
A
Andy Fleming 已提交
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

812
	mac_addr = of_get_mac_address(np);
813

814
	if (mac_addr)
815
		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
816 817

	if (model && !strcasecmp(model, "TSEC"))
818
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
819 820 821 822
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;

823
	if (model && !strcasecmp(model, "eTSEC"))
824
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
825 826 827 828 829 830 831 832
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
				     FSL_GIANFAR_DEV_HAS_CSUM |
				     FSL_GIANFAR_DEV_HAS_VLAN |
				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
				     FSL_GIANFAR_DEV_HAS_TIMER;
833 834 835 836 837 838 839 840 841 842 843 844

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

845
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
846 847

	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
848
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
849 850 851

	return 0;

852 853
err_grp_init:
	unmap_group_regs(priv);
854 855 856 857
rx_alloc_failed:
	gfar_free_rx_queues(priv);
tx_alloc_failed:
	gfar_free_tx_queues(priv);
858
	free_gfar_dev(priv);
859 860 861
	return err;
}

862
static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
863 864 865 866 867 868 869 870 871 872 873
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

874 875 876 877 878 879 880 881 882 883
	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->hwts_tx_en = 0;
		break;
	case HWTSTAMP_TX_ON:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
		priv->hwts_tx_en = 1;
		break;
	default:
884
		return -ERANGE;
885
	}
886 887 888

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
889 890
		if (priv->hwts_rx_en) {
			priv->hwts_rx_en = 0;
891
			reset_gfar(netdev);
892
		}
893 894 895 896
		break;
	default:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
897 898
		if (!priv->hwts_rx_en) {
			priv->hwts_rx_en = 1;
899
			reset_gfar(netdev);
900
		}
901 902 903 904 905 906 907 908
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
	}

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

909 910 911 912 913 914 915 916 917 918 919 920 921 922
static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	config.flags = 0;
	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
	config.rx_filter = (priv->hwts_rx_en ?
			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

923 924 925 926 927 928 929
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

930
	if (cmd == SIOCSHWTSTAMP)
931 932 933
		return gfar_hwtstamp_set(dev, rq);
	if (cmd == SIOCGHWTSTAMP)
		return gfar_hwtstamp_get(dev, rq);
934

935 936 937
	if (!priv->phydev)
		return -ENODEV;

938
	return phy_mii_ioctl(priv->phydev, rq, cmd);
939 940
}

941 942
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
943 944 945 946 947 948
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
W
Wu Jiajun-B06378 已提交
949 950
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
951 952 953 954
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
W
Wu Jiajun-B06378 已提交
955 956
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
957 958 959 960 961
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
962 963
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
964 965 966 967 968
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
969 970
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
971 972 973 974 975 976 977 978 979 980 981 982 983 984
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
W
Wu Jiajun-B06378 已提交
985 986
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
987 988 989 990 991 992 993 994 995
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

U
Uwe Kleine-König 已提交
996
	/* cur_filer_idx indicated the first non-masked rule */
997 998 999 1000 1001
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
W
Wu Jiajun-B06378 已提交
1002 1003
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
1004 1005 1006 1007
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

1008
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1009 1010 1011 1012 1013 1014 1015 1016
{
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1017
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1018 1019
		priv->errata |= GFAR_ERRATA_74;

1020 1021
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1022
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1023 1024
		priv->errata |= GFAR_ERRATA_76;

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	/* MPC8313 Rev < 2.0 */
	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
		priv->errata |= GFAR_ERRATA_12;
}

static void __gfar_detect_errata_85xx(struct gfar_private *priv)
{
	unsigned int svr = mfspr(SPRN_SVR);

	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1035
		priv->errata |= GFAR_ERRATA_12;
1036 1037 1038
	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
}

static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;

	/* no plans to fix */
	priv->errata |= GFAR_ERRATA_A002;

	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
		__gfar_detect_errata_85xx(priv);
	else /* non-mpc85xx parts, i.e. e300 core based */
		__gfar_detect_errata_83xx(priv);
1052

1053 1054 1055 1056 1057
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

1058
void gfar_mac_reset(struct gfar_private *priv)
1059 1060
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1061
	u32 tempval;
1062 1063 1064 1065 1066

	/* Reset MAC layer */
	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);

	/* We need to delay at least 3 TX clocks */
1067
	udelay(3);
1068 1069 1070 1071 1072 1073

	/* the soft reset bit is not self-resetting, so we need to
	 * clear it before resuming normal operation
	 */
	gfar_write(&regs->maccfg1, 0);

1074 1075
	udelay(3);

1076 1077 1078 1079 1080
	/* Compute rx_buff_size based on config flags */
	gfar_rx_buff_size_config(priv);

	/* Initialize the max receive frame/buffer lengths */
	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1081 1082 1083 1084 1085
	gfar_write(&regs->mrblr, priv->rx_buffer_size);

	/* Initialize the Minimum Frame Length Register */
	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);

1086 1087
	/* Initialize MACCFG2. */
	tempval = MACCFG2_INIT_SETTINGS;
1088 1089 1090 1091 1092 1093 1094

	/* If the mtu is larger than the max size for standard
	 * ethernet frames (ie, a jumbo frame), then set maccfg2
	 * to allow huge frames, and to check the length
	 */
	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
	    gfar_has_errata(priv, GFAR_ERRATA_74))
1095
		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1096

1097 1098
	gfar_write(&regs->maccfg2, tempval);

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
	/* Clear mac addr hash registers */
	gfar_write(&regs->igaddr0, 0);
	gfar_write(&regs->igaddr1, 0);
	gfar_write(&regs->igaddr2, 0);
	gfar_write(&regs->igaddr3, 0);
	gfar_write(&regs->igaddr4, 0);
	gfar_write(&regs->igaddr5, 0);
	gfar_write(&regs->igaddr6, 0);
	gfar_write(&regs->igaddr7, 0);

	gfar_write(&regs->gaddr0, 0);
	gfar_write(&regs->gaddr1, 0);
	gfar_write(&regs->gaddr2, 0);
	gfar_write(&regs->gaddr3, 0);
	gfar_write(&regs->gaddr4, 0);
	gfar_write(&regs->gaddr5, 0);
	gfar_write(&regs->gaddr6, 0);
	gfar_write(&regs->gaddr7, 0);

	if (priv->extended_hash)
		gfar_clear_exact_match(priv->ndev);

	gfar_mac_rx_config(priv);

	gfar_mac_tx_config(priv);

	gfar_set_mac_address(priv->ndev);

	gfar_set_multi(priv->ndev);

	/* clear ievent and imask before configuring coalescing */
	gfar_ints_disable(priv);

	/* Configure the coalescing support */
	gfar_configure_coalescing_all(priv);
}

static void gfar_hw_init(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 attrs;

	/* Stop the DMA engine now, in case it was running before
	 * (The firmware could have used it, and left it running).
	 */
	gfar_halt(priv);

	gfar_mac_reset(priv);

	/* Zero out the rmon mib registers if it has them */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));

		/* Mask off the CAM interrupts */
		gfar_write(&regs->rmon.cam1, 0xffffffff);
		gfar_write(&regs->rmon.cam2, 0xffffffff);
	}

1157 1158 1159
	/* Initialize ECNTRL */
	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing
	 * depending on driver parameters
	 */
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	/* FIFO configs */
	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);

1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	/* Program the interrupt steering regs, only for MG devices */
	if (priv->num_grps > 1)
		gfar_write_isrg(priv);
}

static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;

	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
		priv->extended_hash = 1;
		priv->hash_width = 9;

		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

		priv->hash_regs[0] = &regs->gaddr0;
		priv->hash_regs[1] = &regs->gaddr1;
		priv->hash_regs[2] = &regs->gaddr2;
		priv->hash_regs[3] = &regs->gaddr3;
		priv->hash_regs[4] = &regs->gaddr4;
		priv->hash_regs[5] = &regs->gaddr5;
		priv->hash_regs[6] = &regs->gaddr6;
		priv->hash_regs[7] = &regs->gaddr7;
	}
}

1229
/* Set up the ethernet device structure, private data,
J
Jan Ceuleers 已提交
1230 1231
 * and anything else we need before we start
 */
1232
static int gfar_probe(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1233 1234 1235
{
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
1236
	int err = 0, i;
L
Linus Torvalds 已提交
1237

1238
	err = gfar_of_init(ofdev, &dev);
L
Linus Torvalds 已提交
1239

1240 1241
	if (err)
		return err;
L
Linus Torvalds 已提交
1242 1243

	priv = netdev_priv(dev);
1244 1245
	priv->ndev = dev;
	priv->ofdev = ofdev;
1246
	priv->dev = &ofdev->dev;
1247
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
1248

1249
	spin_lock_init(&priv->bflock);
1250
	INIT_WORK(&priv->reset_task, gfar_reset_task);
L
Linus Torvalds 已提交
1251

1252
	platform_set_drvdata(ofdev, priv);
L
Linus Torvalds 已提交
1253

1254 1255
	gfar_detect_errata(priv);

L
Linus Torvalds 已提交
1256
	/* Set the dev->base_addr to the gfar reg region */
1257
	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1258 1259 1260 1261

	/* Fill in the dev structure */
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->mtu = 1500;
1262
	dev->netdev_ops = &gfar_netdev_ops;
1263 1264
	dev->ethtool_ops = &gfar_ethtool_ops;

1265
	/* Register for napi ...We are registering NAPI for each grp */
1266 1267
	if (priv->mode == SQ_SG_MODE) {
		netif_napi_add(dev, &priv->gfargrp[0].napi_rx, gfar_poll_rx_sq,
1268
			       GFAR_DEV_WEIGHT);
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
		netif_napi_add(dev, &priv->gfargrp[0].napi_tx, gfar_poll_tx_sq,
			       2);
	} else {
		for (i = 0; i < priv->num_grps; i++) {
			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
				       gfar_poll_rx, GFAR_DEV_WEIGHT);
			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
				       gfar_poll_tx, 2);
		}
	}
1279

1280
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1281
		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1282
				   NETIF_F_RXCSUM;
1283
		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1284
				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1285
	}
1286

J
Jiri Pirko 已提交
1287
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1288 1289 1290
		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
				    NETIF_F_HW_VLAN_CTAG_RX;
		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
J
Jiri Pirko 已提交
1291
	}
1292

1293
	gfar_init_addr_hash_table(priv);
1294

1295 1296 1297
	/* Insert receive time stamps into padding alignment bytes */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
		priv->padding = 8;
1298

1299
	if (dev->features & NETIF_F_IP_CSUM ||
1300
	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1301
		dev->needed_headroom = GMAC_FCB_LEN;
L
Linus Torvalds 已提交
1302 1303 1304

	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;

1305
	/* Initializing some of the rx/tx queue level parameters */
1306 1307 1308 1309 1310 1311
	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
		priv->tx_queue[i]->txic = DEFAULT_TXIC;
	}
1312

1313 1314 1315 1316 1317
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
	}
L
Linus Torvalds 已提交
1318

J
Jan Ceuleers 已提交
1319
	/* always enable rx filer */
S
Sebastian Poehn 已提交
1320
	priv->rx_filer_enable = 1;
1321 1322
	/* Enable most messages by default */
	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1323 1324 1325
	/* use pritority h/w tx queue scheduling for single queue devices */
	if (priv->num_tx_queues == 1)
		priv->prio_sched_en = 1;
1326

1327 1328
	set_bit(GFAR_DOWN, &priv->state);

1329
	gfar_hw_init(priv);
1330

L
Linus Torvalds 已提交
1331 1332 1333
	err = register_netdev(dev);

	if (err) {
1334
		pr_err("%s: Cannot register net device, aborting\n", dev->name);
L
Linus Torvalds 已提交
1335 1336 1337
		goto register_fail;
	}

1338 1339 1340
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(dev);

1341
	device_init_wakeup(&dev->dev,
1342 1343
			   priv->device_flags &
			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1344

1345
	/* fill out IRQ number and name fields */
1346
	for (i = 0; i < priv->num_grps; i++) {
1347
		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1348
		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1349
			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1350
				dev->name, "_g", '0' + i, "_tx");
1351
			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1352
				dev->name, "_g", '0' + i, "_rx");
1353
			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1354
				dev->name, "_g", '0' + i, "_er");
1355
		} else
1356
			strcpy(gfar_irq(grp, TX)->name, dev->name);
1357
	}
1358

1359 1360 1361
	/* Initialize the filer table */
	gfar_init_filer_table(priv);

L
Linus Torvalds 已提交
1362
	/* Print out the device info */
1363
	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
L
Linus Torvalds 已提交
1364

J
Jan Ceuleers 已提交
1365 1366 1367
	/* Even more device info helps when determining which kernel
	 * provided which set of benchmarks.
	 */
1368
	netdev_info(dev, "Running with NAPI enabled\n");
1369
	for (i = 0; i < priv->num_rx_queues; i++)
1370 1371
		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
			    i, priv->rx_queue[i]->rx_ring_size);
1372
	for (i = 0; i < priv->num_tx_queues; i++)
1373 1374
		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
			    i, priv->tx_queue[i]->tx_ring_size);
L
Linus Torvalds 已提交
1375 1376 1377 1378

	return 0;

register_fail:
1379
	unmap_group_regs(priv);
1380 1381
	gfar_free_rx_queues(priv);
	gfar_free_tx_queues(priv);
1382 1383 1384 1385
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);
1386
	free_gfar_dev(priv);
1387
	return err;
L
Linus Torvalds 已提交
1388 1389
}

1390
static int gfar_remove(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1391
{
1392
	struct gfar_private *priv = platform_get_drvdata(ofdev);
L
Linus Torvalds 已提交
1393

1394 1395 1396 1397 1398
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);

D
David S. Miller 已提交
1399
	unregister_netdev(priv->ndev);
1400
	unmap_group_regs(priv);
1401 1402
	gfar_free_rx_queues(priv);
	gfar_free_tx_queues(priv);
1403
	free_gfar_dev(priv);
L
Linus Torvalds 已提交
1404 1405 1406 1407

	return 0;
}

1408
#ifdef CONFIG_PM
1409 1410

static int gfar_suspend(struct device *dev)
1411
{
1412 1413
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1414
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1415 1416 1417 1418
	unsigned long flags;
	u32 tempval;

	int magic_packet = priv->wol_en &&
1419 1420
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1421

1422
	netif_device_detach(ndev);
1423

1424
	if (netif_running(ndev)) {
1425 1426 1427

		local_irq_save(flags);
		lock_tx_qs(priv);
1428

1429
		gfar_halt_nodisable(priv);
1430 1431

		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1432
		tempval = gfar_read(&regs->maccfg1);
1433 1434 1435 1436 1437 1438

		tempval &= ~MACCFG1_TX_EN;

		if (!magic_packet)
			tempval &= ~MACCFG1_RX_EN;

1439
		gfar_write(&regs->maccfg1, tempval);
1440

1441 1442
		unlock_tx_qs(priv);
		local_irq_restore(flags);
1443

1444
		disable_napi(priv);
1445 1446 1447

		if (magic_packet) {
			/* Enable interrupt on Magic Packet */
1448
			gfar_write(&regs->imask, IMASK_MAG);
1449 1450

			/* Enable Magic Packet mode */
1451
			tempval = gfar_read(&regs->maccfg2);
1452
			tempval |= MACCFG2_MPEN;
1453
			gfar_write(&regs->maccfg2, tempval);
1454 1455 1456 1457 1458 1459 1460 1461
		} else {
			phy_stop(priv->phydev);
		}
	}

	return 0;
}

1462
static int gfar_resume(struct device *dev)
1463
{
1464 1465
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1466
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1467 1468 1469
	unsigned long flags;
	u32 tempval;
	int magic_packet = priv->wol_en &&
1470 1471
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1472

1473 1474
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);
1475 1476 1477 1478 1479 1480 1481 1482 1483
		return 0;
	}

	if (!magic_packet && priv->phydev)
		phy_start(priv->phydev);

	/* Disable Magic Packet mode, in case something
	 * else woke us up.
	 */
1484 1485
	local_irq_save(flags);
	lock_tx_qs(priv);
1486

1487
	tempval = gfar_read(&regs->maccfg2);
1488
	tempval &= ~MACCFG2_MPEN;
1489
	gfar_write(&regs->maccfg2, tempval);
1490

1491
	gfar_start(priv);
1492

1493 1494
	unlock_tx_qs(priv);
	local_irq_restore(flags);
1495

1496 1497
	netif_device_attach(ndev);

1498
	enable_napi(priv);
1499 1500 1501 1502 1503 1504 1505 1506 1507

	return 0;
}

static int gfar_restore(struct device *dev)
{
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;

1508 1509 1510
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);

1511
		return 0;
1512
	}
1513

1514 1515 1516 1517 1518
	if (gfar_init_bds(ndev)) {
		free_skb_resources(priv);
		return -ENOMEM;
	}

1519 1520 1521 1522
	gfar_mac_reset(priv);

	gfar_init_tx_rx_base(priv);

1523
	gfar_start(priv);
1524 1525 1526 1527 1528 1529 1530

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

	if (priv->phydev)
		phy_start(priv->phydev);
1531

1532
	netif_device_attach(ndev);
1533
	enable_napi(priv);
1534 1535 1536

	return 0;
}
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547

static struct dev_pm_ops gfar_pm_ops = {
	.suspend = gfar_suspend,
	.resume = gfar_resume,
	.freeze = gfar_suspend,
	.thaw = gfar_resume,
	.restore = gfar_restore,
};

#define GFAR_PM_OPS (&gfar_pm_ops)

1548
#else
1549 1550 1551

#define GFAR_PM_OPS NULL

1552
#endif
L
Linus Torvalds 已提交
1553

1554 1555 1556 1557 1558 1559
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1560
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1561 1562 1563
	u32 ecntrl;

	ecntrl = gfar_read(&regs->ecntrl);
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
1576
		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1577
			return PHY_INTERFACE_MODE_RMII;
1578
		}
A
Andy Fleming 已提交
1579
		else {
1580
			phy_interface_t interface = priv->interface;
A
Andy Fleming 已提交
1581

J
Jan Ceuleers 已提交
1582
			/* This isn't autodetected right now, so it must
A
Andy Fleming 已提交
1583 1584 1585 1586 1587
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

1588
			return PHY_INTERFACE_MODE_RGMII;
A
Andy Fleming 已提交
1589
		}
1590 1591
	}

1592
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1593 1594 1595 1596 1597 1598
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}


1599 1600
/* Initializes driver's PHY state, and attaches to the PHY.
 * Returns 0 on success.
L
Linus Torvalds 已提交
1601 1602 1603 1604
 */
static int init_phy(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1605
	uint gigabit_support =
1606
		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1607
		GFAR_SUPPORTED_GBIT : 0;
1608
	phy_interface_t interface;
L
Linus Torvalds 已提交
1609 1610 1611 1612 1613

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

1614 1615
	interface = gfar_get_interface(dev);

1616 1617 1618 1619 1620 1621 1622 1623
	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
				      interface);
	if (!priv->phydev)
		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
							 interface);
	if (!priv->phydev) {
		dev_err(&dev->dev, "could not attach to PHY\n");
		return -ENODEV;
1624
	}
L
Linus Torvalds 已提交
1625

K
Kapil Juneja 已提交
1626 1627 1628
	if (interface == PHY_INTERFACE_MODE_SGMII)
		gfar_configure_serdes(dev);

1629
	/* Remove any features not supported by the controller */
1630 1631
	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
	priv->phydev->advertising = priv->phydev->supported;
L
Linus Torvalds 已提交
1632 1633 1634 1635

	return 0;
}

J
Jan Ceuleers 已提交
1636
/* Initialize TBI PHY interface for communicating with the
1637 1638 1639 1640 1641 1642 1643
 * SERDES lynx PHY on the chip.  We communicate with this PHY
 * through the MDIO bus on each controller, treating it as a
 * "normal" PHY at the address found in the TBIPA register.  We assume
 * that the TBIPA register is valid.  Either the MDIO bus code will set
 * it to a value that doesn't conflict with other PHYs on the bus, or the
 * value doesn't matter, as there are no other PHYs on the bus.
 */
K
Kapil Juneja 已提交
1644 1645 1646
static void gfar_configure_serdes(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1647 1648 1649 1650 1651 1652 1653
	struct phy_device *tbiphy;

	if (!priv->tbi_node) {
		dev_warn(&dev->dev, "error: SGMII mode requires that the "
				    "device tree specify a tbi-handle\n");
		return;
	}
1654

1655 1656 1657
	tbiphy = of_phy_find_device(priv->tbi_node);
	if (!tbiphy) {
		dev_err(&dev->dev, "error: Could not get TBI device\n");
1658 1659
		return;
	}
K
Kapil Juneja 已提交
1660

J
Jan Ceuleers 已提交
1661
	/* If the link is already up, we must already be ok, and don't need to
1662 1663 1664 1665
	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
	 * everything for us?  Resetting it takes the link down and requires
	 * several seconds for it to come back.
	 */
1666
	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1667
		return;
K
Kapil Juneja 已提交
1668

1669
	/* Single clk mode, mii mode off(for serdes communication) */
1670
	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
K
Kapil Juneja 已提交
1671

1672
	phy_write(tbiphy, MII_ADVERTISE,
1673 1674
		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
		  ADVERTISE_1000XPSE_ASYM);
K
Kapil Juneja 已提交
1675

1676 1677 1678
	phy_write(tbiphy, MII_BMCR,
		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
		  BMCR_SPEED1000);
K
Kapil Juneja 已提交
1679 1680
}

1681 1682 1683 1684
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
	u32 res;

J
Jan Ceuleers 已提交
1685
	/* Normaly TSEC should not hang on GRS commands, so we should
1686 1687
	 * actually wait for IEVENT_GRSC flag.
	 */
1688
	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1689 1690
		return 0;

J
Jan Ceuleers 已提交
1691
	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
	 * and the Rx can be safely reset.
	 */
	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
	res &= 0x7f807f80;
	if ((res & 0xffff) == (res >> 16))
		return 1;

	return 0;
}
1702 1703

/* Halt the receive and transmit queues */
1704
static void gfar_halt_nodisable(struct gfar_private *priv)
L
Linus Torvalds 已提交
1705
{
1706
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1707 1708
	u32 tempval;

1709
	gfar_ints_disable(priv);
L
Linus Torvalds 已提交
1710 1711

	/* Stop the DMA, and wait for it to stop */
1712
	tempval = gfar_read(&regs->dmactrl);
1713 1714
	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
	    (DMACTRL_GRS | DMACTRL_GTS)) {
1715 1716
		int ret;

L
Linus Torvalds 已提交
1717
		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1718
		gfar_write(&regs->dmactrl, tempval);
L
Linus Torvalds 已提交
1719

1720 1721 1722 1723 1724 1725 1726
		do {
			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
				 (IEVENT_GRSC | IEVENT_GTSC)) ==
				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
				ret = __gfar_is_rx_idle(priv);
		} while (!ret);
L
Linus Torvalds 已提交
1727
	}
1728 1729 1730
}

/* Halt the receive and transmit queues */
1731
void gfar_halt(struct gfar_private *priv)
1732
{
1733
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1734
	u32 tempval;
L
Linus Torvalds 已提交
1735

1736 1737 1738
	/* Dissable the Rx/Tx hw queues */
	gfar_write(&regs->rqueue, 0);
	gfar_write(&regs->tqueue, 0);
1739

1740 1741 1742 1743 1744
	mdelay(10);

	gfar_halt_nodisable(priv);

	/* Disable Rx/Tx DMA */
L
Linus Torvalds 已提交
1745 1746 1747
	tempval = gfar_read(&regs->maccfg1);
	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);
1748 1749 1750 1751 1752 1753
}

void stop_gfar(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

1754
	netif_tx_stop_all_queues(dev);
1755

1756 1757 1758
	smp_mb__before_clear_bit();
	set_bit(GFAR_DOWN, &priv->state);
	smp_mb__after_clear_bit();
1759

1760
	disable_napi(priv);
1761

1762
	/* disable ints and gracefully shut down Rx/Tx DMA */
1763
	gfar_halt(priv);
L
Linus Torvalds 已提交
1764

1765
	phy_stop(priv->phydev);
L
Linus Torvalds 已提交
1766 1767 1768 1769

	free_skb_resources(priv);
}

1770
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
1771 1772
{
	struct txbd8 *txbdp;
1773
	struct gfar_private *priv = netdev_priv(tx_queue->dev);
D
Dai Haruki 已提交
1774
	int i, j;
L
Linus Torvalds 已提交
1775

1776
	txbdp = tx_queue->tx_bd_base;
L
Linus Torvalds 已提交
1777

1778 1779
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
		if (!tx_queue->tx_skbuff[i])
D
Dai Haruki 已提交
1780
			continue;
L
Linus Torvalds 已提交
1781

1782
		dma_unmap_single(priv->dev, txbdp->bufPtr,
1783
				 txbdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
1784
		txbdp->lstatus = 0;
1785
		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1786
		     j++) {
D
Dai Haruki 已提交
1787
			txbdp++;
1788
			dma_unmap_page(priv->dev, txbdp->bufPtr,
1789
				       txbdp->length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1790
		}
1791
		txbdp++;
1792 1793
		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
		tx_queue->tx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1794
	}
1795
	kfree(tx_queue->tx_skbuff);
1796
	tx_queue->tx_skbuff = NULL;
1797
}
L
Linus Torvalds 已提交
1798

1799 1800 1801 1802 1803
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
	struct rxbd8 *rxbdp;
	struct gfar_private *priv = netdev_priv(rx_queue->dev);
	int i;
L
Linus Torvalds 已提交
1804

1805
	rxbdp = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1806

1807 1808
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		if (rx_queue->rx_skbuff[i]) {
1809 1810
			dma_unmap_single(priv->dev, rxbdp->bufPtr,
					 priv->rx_buffer_size,
1811
					 DMA_FROM_DEVICE);
1812 1813
			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
			rx_queue->rx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1814
		}
1815 1816 1817
		rxbdp->lstatus = 0;
		rxbdp->bufPtr = 0;
		rxbdp++;
L
Linus Torvalds 已提交
1818
	}
1819
	kfree(rx_queue->rx_skbuff);
1820
	rx_queue->rx_skbuff = NULL;
1821
}
1822

1823
/* If there are any tx skbs or rx skbs still around, free them.
J
Jan Ceuleers 已提交
1824 1825
 * Then free tx_skbuff and rx_skbuff
 */
1826 1827 1828 1829 1830 1831 1832 1833
static void free_skb_resources(struct gfar_private *priv)
{
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
	int i;

	/* Go through all the buffer descriptors and free their data buffers */
	for (i = 0; i < priv->num_tx_queues; i++) {
1834
		struct netdev_queue *txq;
1835

1836
		tx_queue = priv->tx_queue[i];
1837
		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1838
		if (tx_queue->tx_skbuff)
1839
			free_skb_tx_queue(tx_queue);
1840
		netdev_tx_reset_queue(txq);
1841 1842 1843 1844
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
1845
		if (rx_queue->rx_skbuff)
1846 1847 1848
			free_skb_rx_queue(rx_queue);
	}

1849
	dma_free_coherent(priv->dev,
1850 1851 1852 1853
			  sizeof(struct txbd8) * priv->total_tx_ring_size +
			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
			  priv->tx_queue[0]->tx_bd_base,
			  priv->tx_queue[0]->tx_bd_dma_base);
L
Linus Torvalds 已提交
1854 1855
}

1856
void gfar_start(struct gfar_private *priv)
1857
{
1858
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1859
	u32 tempval;
1860
	int i = 0;
1861

1862 1863 1864
	/* Enable Rx/Tx hw queues */
	gfar_write(&regs->rqueue, priv->rqueue);
	gfar_write(&regs->tqueue, priv->tqueue);
1865 1866

	/* Initialize DMACTRL to have WWR and WOP */
1867
	tempval = gfar_read(&regs->dmactrl);
1868
	tempval |= DMACTRL_INIT_SETTINGS;
1869
	gfar_write(&regs->dmactrl, tempval);
1870 1871

	/* Make sure we aren't stopped */
1872
	tempval = gfar_read(&regs->dmactrl);
1873
	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1874
	gfar_write(&regs->dmactrl, tempval);
1875

1876 1877 1878 1879 1880 1881
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear THLT/RHLT, so that the DMA starts polling now */
		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
	}
1882

1883 1884 1885 1886 1887
	/* Enable Rx/Tx DMA */
	tempval = gfar_read(&regs->maccfg1);
	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);

1888 1889
	gfar_ints_enable(priv);

1890
	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1891 1892
}

1893 1894 1895 1896 1897 1898 1899
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
	free_irq(gfar_irq(grp, TX)->irq, grp);
	free_irq(gfar_irq(grp, RX)->irq, grp);
	free_irq(gfar_irq(grp, ER)->irq, grp);
}

1900 1901 1902 1903 1904
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
	struct gfar_private *priv = grp->priv;
	struct net_device *dev = priv->ndev;
	int err;
L
Linus Torvalds 已提交
1905 1906

	/* If the device has multiple interrupts, register for
J
Jan Ceuleers 已提交
1907 1908
	 * them.  Otherwise, only register for the one
	 */
1909
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1910
		/* Install our interrupt handlers for Error,
J
Jan Ceuleers 已提交
1911 1912
		 * Transmit, and Receive
		 */
1913 1914 1915
		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
				  gfar_irq(grp, ER)->name, grp);
		if (err < 0) {
1916
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1917
				  gfar_irq(grp, ER)->irq);
1918

1919
			goto err_irq_fail;
L
Linus Torvalds 已提交
1920
		}
1921 1922 1923
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1924
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1925
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1926 1927
			goto tx_irq_fail;
		}
1928 1929 1930
		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
				  gfar_irq(grp, RX)->name, grp);
		if (err < 0) {
1931
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1932
				  gfar_irq(grp, RX)->irq);
L
Linus Torvalds 已提交
1933 1934 1935
			goto rx_irq_fail;
		}
	} else {
1936 1937 1938
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1939
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1940
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1941 1942 1943 1944
			goto err_irq_fail;
		}
	}

1945 1946 1947
	return 0;

rx_irq_fail:
1948
	free_irq(gfar_irq(grp, TX)->irq, grp);
1949
tx_irq_fail:
1950
	free_irq(gfar_irq(grp, ER)->irq, grp);
1951 1952 1953 1954 1955
err_irq_fail:
	return err;

}

1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
static void gfar_free_irq(struct gfar_private *priv)
{
	int i;

	/* Free the IRQs */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
		for (i = 0; i < priv->num_grps; i++)
			free_grp_irqs(&priv->gfargrp[i]);
	} else {
		for (i = 0; i < priv->num_grps; i++)
			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
				 &priv->gfargrp[i]);
	}
}

static int gfar_request_irq(struct gfar_private *priv)
{
	int err, i, j;

	for (i = 0; i < priv->num_grps; i++) {
		err = register_grp_irqs(&priv->gfargrp[i]);
		if (err) {
			for (j = 0; j < i; j++)
				free_grp_irqs(&priv->gfargrp[j]);
			return err;
		}
	}

	return 0;
}

1987 1988 1989 1990
/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
1991
	int err;
1992

1993
	gfar_mac_reset(priv);
1994 1995 1996 1997 1998

	err = gfar_alloc_skb_resources(ndev);
	if (err)
		return err;

1999
	gfar_init_tx_rx_base(priv);
2000

2001 2002 2003 2004 2005
	smp_mb__before_clear_bit();
	clear_bit(GFAR_DOWN, &priv->state);
	smp_mb__after_clear_bit();

	/* Start Rx/Tx DMA and enable the interrupts */
2006
	gfar_start(priv);
L
Linus Torvalds 已提交
2007

2008 2009
	phy_start(priv->phydev);

2010 2011 2012 2013
	enable_napi(priv);

	netif_tx_wake_all_queues(ndev);

L
Linus Torvalds 已提交
2014 2015 2016
	return 0;
}

J
Jan Ceuleers 已提交
2017 2018 2019
/* Called when something needs to use the ethernet device
 * Returns 0 for success.
 */
L
Linus Torvalds 已提交
2020 2021
static int gfar_enet_open(struct net_device *dev)
{
2022
	struct gfar_private *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
2023 2024 2025
	int err;

	err = init_phy(dev);
2026
	if (err)
L
Linus Torvalds 已提交
2027 2028
		return err;

2029 2030 2031 2032
	err = gfar_request_irq(priv);
	if (err)
		return err;

L
Linus Torvalds 已提交
2033
	err = startup_gfar(dev);
2034
	if (err)
2035
		return err;
L
Linus Torvalds 已提交
2036

2037 2038
	device_set_wakeup_enable(&dev->dev, priv->wol_en);

L
Linus Torvalds 已提交
2039 2040 2041
	return err;
}

2042
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2043
{
2044
	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2045 2046

	memset(fcb, 0, GMAC_FCB_LEN);
2047 2048 2049 2050

	return fcb;
}

2051
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2052
				    int fcb_length)
2053 2054 2055 2056 2057
{
	/* If we're here, it's a IP packet with a TCP or UDP
	 * payload.  We set it to checksum, using a pseudo-header
	 * we provide
	 */
2058
	u8 flags = TXFCB_DEFAULT;
2059

J
Jan Ceuleers 已提交
2060 2061 2062
	/* Tell the controller what the protocol is
	 * And provide the already calculated phcs
	 */
2063
	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2064
		flags |= TXFCB_UDP;
2065
		fcb->phcs = udp_hdr(skb)->check;
2066
	} else
2067
		fcb->phcs = tcp_hdr(skb)->check;
2068 2069 2070 2071

	/* l3os is the distance between the start of the
	 * frame (skb->data) and the start of the IP hdr.
	 * l4os is the distance between the start of the
J
Jan Ceuleers 已提交
2072 2073
	 * l3 hdr and the l4 hdr
	 */
2074
	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2075
	fcb->l4os = skb_network_header_len(skb);
2076

2077
	fcb->flags = flags;
2078 2079
}

2080
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2081
{
2082
	fcb->flags |= TXFCB_VLN;
2083 2084 2085
	fcb->vlctl = vlan_tx_tag_get(skb);
}

D
Dai Haruki 已提交
2086
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2087
				      struct txbd8 *base, int ring_size)
D
Dai Haruki 已提交
2088 2089 2090 2091 2092 2093 2094
{
	struct txbd8 *new_bd = bdp + stride;

	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
}

static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2095
				      int ring_size)
D
Dai Haruki 已提交
2096 2097 2098 2099
{
	return skip_txbd(bdp, 1, base, ring_size);
}

2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117
/* eTSEC12: csum generation not supported for some fcb offsets */
static inline bool gfar_csum_errata_12(struct gfar_private *priv,
				       unsigned long fcb_addr)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
	       (fcb_addr % 0x20) > 0x18);
}

/* eTSEC76: csum generation for frames larger than 2500 may
 * cause excess delays before start of transmission
 */
static inline bool gfar_csum_errata_76(struct gfar_private *priv,
				       unsigned int len)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
	       (len > 2500));
}

J
Jan Ceuleers 已提交
2118 2119 2120
/* This is called by the kernel when a frame is ready for transmission.
 * It is pointed to by the dev->hard_start_xmit function pointer
 */
L
Linus Torvalds 已提交
2121 2122 2123
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2124
	struct gfar_priv_tx_q *tx_queue = NULL;
2125
	struct netdev_queue *txq;
2126
	struct gfar __iomem *regs = NULL;
2127
	struct txfcb *fcb = NULL;
2128
	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2129
	u32 lstatus;
2130 2131
	int i, rq = 0;
	int do_tstamp, do_csum, do_vlan;
D
Dai Haruki 已提交
2132
	u32 bufaddr;
A
Andy Fleming 已提交
2133
	unsigned long flags;
2134
	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2135 2136 2137 2138

	rq = skb->queue_mapping;
	tx_queue = priv->tx_queue[rq];
	txq = netdev_get_tx_queue(dev, rq);
2139
	base = tx_queue->tx_bd_base;
2140
	regs = tx_queue->grp->regs;
2141

2142 2143 2144 2145 2146 2147 2148 2149
	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
	do_vlan = vlan_tx_tag_present(skb);
	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		    priv->hwts_tx_en;

	if (do_csum || do_vlan)
		fcb_len = GMAC_FCB_LEN;

2150
	/* check if time stamp should be generated */
2151 2152
	if (unlikely(do_tstamp))
		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
D
Dai Haruki 已提交
2153

2154
	/* make space for additional header when fcb is needed */
2155
	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2156 2157
		struct sk_buff *skb_new;

2158
		skb_new = skb_realloc_headroom(skb, fcb_len);
2159 2160
		if (!skb_new) {
			dev->stats.tx_errors++;
D
David S. Miller 已提交
2161
			kfree_skb(skb);
2162 2163
			return NETDEV_TX_OK;
		}
2164

2165 2166 2167
		if (skb->sk)
			skb_set_owner_w(skb_new, skb->sk);
		consume_skb(skb);
2168 2169 2170
		skb = skb_new;
	}

D
Dai Haruki 已提交
2171 2172 2173
	/* total number of fragments in the SKB */
	nr_frags = skb_shinfo(skb)->nr_frags;

2174 2175 2176 2177 2178 2179
	/* calculate the required number of TxBDs for this skb */
	if (unlikely(do_tstamp))
		nr_txbds = nr_frags + 2;
	else
		nr_txbds = nr_frags + 1;

D
Dai Haruki 已提交
2180
	/* check if there is space to queue this packet */
2181
	if (nr_txbds > tx_queue->num_txbdfree) {
D
Dai Haruki 已提交
2182
		/* no space, stop the queue */
2183
		netif_tx_stop_queue(txq);
D
Dai Haruki 已提交
2184 2185 2186
		dev->stats.tx_fifo_errors++;
		return NETDEV_TX_BUSY;
	}
L
Linus Torvalds 已提交
2187 2188

	/* Update transmit stats */
2189 2190 2191 2192
	bytes_sent = skb->len;
	tx_queue->stats.tx_bytes += bytes_sent;
	/* keep Tx bytes on wire for BQL accounting */
	GFAR_CB(skb)->bytes_sent = bytes_sent;
E
Eric Dumazet 已提交
2193
	tx_queue->stats.tx_packets++;
L
Linus Torvalds 已提交
2194

2195
	txbdp = txbdp_start = tx_queue->cur_tx;
2196 2197 2198 2199 2200
	lstatus = txbdp->lstatus;

	/* Time stamp insertion requires one additional TxBD */
	if (unlikely(do_tstamp))
		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2201
						 tx_queue->tx_ring_size);
L
Linus Torvalds 已提交
2202

D
Dai Haruki 已提交
2203
	if (nr_frags == 0) {
2204 2205
		if (unlikely(do_tstamp))
			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2206
							  TXBD_INTERRUPT);
2207 2208
		else
			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
D
Dai Haruki 已提交
2209 2210 2211
	} else {
		/* Place the fragment addresses and lengths into the TxBDs */
		for (i = 0; i < nr_frags; i++) {
2212
			unsigned int frag_len;
D
Dai Haruki 已提交
2213
			/* Point at the next BD, wrapping as needed */
2214
			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2215

2216
			frag_len = skb_shinfo(skb)->frags[i].size;
D
Dai Haruki 已提交
2217

2218
			lstatus = txbdp->lstatus | frag_len |
2219
				  BD_LFLAG(TXBD_READY);
D
Dai Haruki 已提交
2220 2221 2222 2223

			/* Handle the last BD specially */
			if (i == nr_frags - 1)
				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
L
Linus Torvalds 已提交
2224

2225
			bufaddr = skb_frag_dma_map(priv->dev,
2226 2227
						   &skb_shinfo(skb)->frags[i],
						   0,
2228
						   frag_len,
2229
						   DMA_TO_DEVICE);
D
Dai Haruki 已提交
2230 2231 2232 2233 2234 2235 2236 2237

			/* set the TxBD length and buffer pointer */
			txbdp->bufPtr = bufaddr;
			txbdp->lstatus = lstatus;
		}

		lstatus = txbdp_start->lstatus;
	}
L
Linus Torvalds 已提交
2238

2239 2240 2241 2242 2243 2244
	/* Add TxPAL between FCB and frame if required */
	if (unlikely(do_tstamp)) {
		skb_push(skb, GMAC_TXPAL_LEN);
		memset(skb->data, 0, GMAC_TXPAL_LEN);
	}

2245 2246
	/* Add TxFCB if required */
	if (fcb_len) {
2247
		fcb = gfar_add_fcb(skb);
2248
		lstatus |= BD_LFLAG(TXBD_TOE);
2249 2250 2251 2252 2253
	}

	/* Set up checksumming */
	if (do_csum) {
		gfar_tx_checksum(skb, fcb, fcb_len);
2254 2255 2256

		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2257 2258
			__skb_pull(skb, GMAC_FCB_LEN);
			skb_checksum_help(skb);
2259 2260 2261 2262 2263 2264 2265 2266
			if (do_vlan || do_tstamp) {
				/* put back a new fcb for vlan/tstamp TOE */
				fcb = gfar_add_fcb(skb);
			} else {
				/* Tx TOE not used */
				lstatus &= ~(BD_LFLAG(TXBD_TOE));
				fcb = NULL;
			}
2267
		}
2268 2269
	}

2270
	if (do_vlan)
2271
		gfar_tx_vlan(skb, fcb);
2272

2273 2274
	/* Setup tx hardware time stamping if requested */
	if (unlikely(do_tstamp)) {
2275
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2276 2277 2278
		fcb->ptp = 1;
	}

2279
	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2280
					     skb_headlen(skb), DMA_TO_DEVICE);
L
Linus Torvalds 已提交
2281

J
Jan Ceuleers 已提交
2282
	/* If time stamping is requested one additional TxBD must be set up. The
2283 2284 2285 2286 2287
	 * first TxBD points to the FCB and must have a data length of
	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
	 * the full frame length.
	 */
	if (unlikely(do_tstamp)) {
2288
		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2289
		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2290
					 (skb_headlen(skb) - fcb_len);
2291 2292 2293 2294
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
	} else {
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
	}
L
Linus Torvalds 已提交
2295

2296
	netdev_tx_sent_queue(txq, bytes_sent);
2297

J
Jan Ceuleers 已提交
2298
	/* We can work in parallel with gfar_clean_tx_ring(), except
A
Anton Vorontsov 已提交
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
	 * when modifying num_txbdfree. Note that we didn't grab the lock
	 * when we were reading the num_txbdfree and checking for available
	 * space, that's because outside of this function it can only grow,
	 * and once we've got needed space, it cannot suddenly disappear.
	 *
	 * The lock also protects us from gfar_error(), which can modify
	 * regs->tstat and thus retrigger the transfers, which is why we
	 * also must grab the lock before setting ready bit for the first
	 * to be transmitted BD.
	 */
	spin_lock_irqsave(&tx_queue->txlock, flags);

J
Jan Ceuleers 已提交
2311
	/* The powerpc-specific eieio() is used, as wmb() has too strong
2312 2313 2314 2315 2316 2317 2318
	 * semantics (it requires synchronization between cacheable and
	 * uncacheable mappings, which eieio doesn't provide and which we
	 * don't need), thus requiring a more expensive sync instruction.  At
	 * some point, the set of architecture-independent barrier functions
	 * should be expanded to include weaker barriers.
	 */
	eieio();
2319

D
Dai Haruki 已提交
2320 2321
	txbdp_start->lstatus = lstatus;

2322 2323 2324 2325
	eieio(); /* force lstatus write before tx_skbuff */

	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;

D
Dai Haruki 已提交
2326
	/* Update the current skb pointer to the next entry we will use
J
Jan Ceuleers 已提交
2327 2328
	 * (wrapping if necessary)
	 */
2329
	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2330
			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2331

2332
	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2333 2334

	/* reduce TxBD free count */
2335
	tx_queue->num_txbdfree -= (nr_txbds);
L
Linus Torvalds 已提交
2336 2337

	/* If the next BD still needs to be cleaned up, then the bds
J
Jan Ceuleers 已提交
2338 2339
	 * are full.  We need to tell the kernel to stop sending us stuff.
	 */
2340
	if (!tx_queue->num_txbdfree) {
2341
		netif_tx_stop_queue(txq);
L
Linus Torvalds 已提交
2342

2343
		dev->stats.tx_fifo_errors++;
L
Linus Torvalds 已提交
2344 2345 2346
	}

	/* Tell the DMA to go go go */
2347
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
L
Linus Torvalds 已提交
2348 2349

	/* Unlock priv */
2350
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
2351

2352
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
2353 2354 2355 2356 2357 2358
}

/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2359

2360
	cancel_work_sync(&priv->reset_task);
L
Linus Torvalds 已提交
2361 2362
	stop_gfar(dev);

2363 2364 2365
	/* Disconnect from the PHY */
	phy_disconnect(priv->phydev);
	priv->phydev = NULL;
L
Linus Torvalds 已提交
2366

2367 2368
	gfar_free_irq(priv);

L
Linus Torvalds 已提交
2369 2370 2371 2372
	return 0;
}

/* Changes the mac address if the controller is not running. */
2373
static int gfar_set_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
2374
{
2375
	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
L
Linus Torvalds 已提交
2376 2377 2378 2379 2380 2381 2382

	return 0;
}

static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
	struct gfar_private *priv = netdev_priv(dev);
2383 2384
	int frame_size = new_mtu + ETH_HLEN;

L
Linus Torvalds 已提交
2385
	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2386
		netif_err(priv, drv, dev, "Invalid MTU setting\n");
L
Linus Torvalds 已提交
2387 2388 2389
		return -EINVAL;
	}

2390 2391 2392
	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
		cpu_relax();

2393
	if (dev->flags & IFF_UP)
L
Linus Torvalds 已提交
2394 2395 2396 2397
		stop_gfar(dev);

	dev->mtu = new_mtu;

2398
	if (dev->flags & IFF_UP)
L
Linus Torvalds 已提交
2399 2400
		startup_gfar(dev);

2401 2402
	clear_bit_unlock(GFAR_RESETTING, &priv->state);

L
Linus Torvalds 已提交
2403 2404 2405
	return 0;
}

2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
void reset_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);

	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
		cpu_relax();

	stop_gfar(ndev);
	startup_gfar(ndev);

	clear_bit_unlock(GFAR_RESETTING, &priv->state);
}

2419
/* gfar_reset_task gets scheduled when a packet has not been
L
Linus Torvalds 已提交
2420 2421
 * transmitted after a set amount of time.
 * For now, assume that clearing out all the structures, and
2422 2423 2424
 * starting over will fix the problem.
 */
static void gfar_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
2425
{
2426
	struct gfar_private *priv = container_of(work, struct gfar_private,
2427
						 reset_task);
2428
	reset_gfar(priv->ndev);
L
Linus Torvalds 已提交
2429 2430
}

2431 2432 2433 2434 2435 2436 2437 2438
static void gfar_timeout(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	dev->stats.tx_errors++;
	schedule_work(&priv->reset_task);
}

E
Eran Liberty 已提交
2439 2440 2441 2442 2443 2444
static void gfar_align_skb(struct sk_buff *skb)
{
	/* We need the data buffer to be aligned properly.  We will reserve
	 * as many bytes as needed to align the data properly
	 */
	skb_reserve(skb, RXBUF_ALIGNMENT -
2445
		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
E
Eran Liberty 已提交
2446 2447
}

L
Linus Torvalds 已提交
2448
/* Interrupt Handler for Transmit complete */
C
Claudiu Manoil 已提交
2449
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
2450
{
2451
	struct net_device *dev = tx_queue->dev;
2452
	struct netdev_queue *txq;
D
Dai Haruki 已提交
2453
	struct gfar_private *priv = netdev_priv(dev);
2454
	struct txbd8 *bdp, *next = NULL;
D
Dai Haruki 已提交
2455
	struct txbd8 *lbdp = NULL;
2456
	struct txbd8 *base = tx_queue->tx_bd_base;
D
Dai Haruki 已提交
2457 2458
	struct sk_buff *skb;
	int skb_dirtytx;
2459
	int tx_ring_size = tx_queue->tx_ring_size;
2460
	int frags = 0, nr_txbds = 0;
D
Dai Haruki 已提交
2461
	int i;
D
Dai Haruki 已提交
2462
	int howmany = 0;
2463 2464
	int tqi = tx_queue->qindex;
	unsigned int bytes_sent = 0;
D
Dai Haruki 已提交
2465
	u32 lstatus;
2466
	size_t buflen;
L
Linus Torvalds 已提交
2467

2468
	txq = netdev_get_tx_queue(dev, tqi);
2469 2470
	bdp = tx_queue->dirty_tx;
	skb_dirtytx = tx_queue->skb_dirtytx;
L
Linus Torvalds 已提交
2471

2472
	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
A
Anton Vorontsov 已提交
2473 2474
		unsigned long flags;

D
Dai Haruki 已提交
2475
		frags = skb_shinfo(skb)->nr_frags;
2476

J
Jan Ceuleers 已提交
2477
		/* When time stamping, one additional TxBD must be freed.
2478 2479
		 * Also, we need to dma_unmap_single() the TxPAL.
		 */
2480
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2481 2482 2483 2484 2485
			nr_txbds = frags + 2;
		else
			nr_txbds = frags + 1;

		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
L
Linus Torvalds 已提交
2486

D
Dai Haruki 已提交
2487
		lstatus = lbdp->lstatus;
L
Linus Torvalds 已提交
2488

D
Dai Haruki 已提交
2489 2490
		/* Only clean completed frames */
		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2491
		    (lstatus & BD_LENGTH_MASK))
D
Dai Haruki 已提交
2492 2493
			break;

2494
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2495
			next = next_txbd(bdp, base, tx_ring_size);
2496
			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2497 2498 2499
		} else
			buflen = bdp->length;

2500
		dma_unmap_single(priv->dev, bdp->bufPtr,
2501
				 buflen, DMA_TO_DEVICE);
2502

2503
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2504 2505
			struct skb_shared_hwtstamps shhwtstamps;
			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2506

2507 2508
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2509
			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2510 2511 2512 2513
			skb_tstamp_tx(skb, &shhwtstamps);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next;
		}
A
Andy Fleming 已提交
2514

D
Dai Haruki 已提交
2515 2516
		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
		bdp = next_txbd(bdp, base, tx_ring_size);
D
Dai Haruki 已提交
2517

D
Dai Haruki 已提交
2518
		for (i = 0; i < frags; i++) {
2519
			dma_unmap_page(priv->dev, bdp->bufPtr,
2520
				       bdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
2521 2522 2523
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next_txbd(bdp, base, tx_ring_size);
		}
L
Linus Torvalds 已提交
2524

2525
		bytes_sent += GFAR_CB(skb)->bytes_sent;
2526

E
Eric Dumazet 已提交
2527
		dev_kfree_skb_any(skb);
2528

2529
		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
D
Dai Haruki 已提交
2530

D
Dai Haruki 已提交
2531
		skb_dirtytx = (skb_dirtytx + 1) &
2532
			      TX_RING_MOD_MASK(tx_ring_size);
D
Dai Haruki 已提交
2533 2534

		howmany++;
A
Anton Vorontsov 已提交
2535
		spin_lock_irqsave(&tx_queue->txlock, flags);
2536
		tx_queue->num_txbdfree += nr_txbds;
A
Anton Vorontsov 已提交
2537
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
2538
	}
L
Linus Torvalds 已提交
2539

D
Dai Haruki 已提交
2540
	/* If we freed a buffer, we can restart transmission, if necessary */
2541 2542 2543 2544
	if (tx_queue->num_txbdfree &&
	    netif_tx_queue_stopped(txq) &&
	    !(test_bit(GFAR_DOWN, &priv->state)))
		netif_wake_subqueue(priv->ndev, tqi);
L
Linus Torvalds 已提交
2545

D
Dai Haruki 已提交
2546
	/* Update dirty indicators */
2547 2548
	tx_queue->skb_dirtytx = skb_dirtytx;
	tx_queue->dirty_tx = bdp;
L
Linus Torvalds 已提交
2549

2550
	netdev_tx_completed_queue(txq, howmany, bytes_sent);
D
Dai Haruki 已提交
2551 2552
}

2553
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2554
			   struct sk_buff *skb)
2555
{
2556
	struct net_device *dev = rx_queue->dev;
2557
	struct gfar_private *priv = netdev_priv(dev);
2558
	dma_addr_t buf;
2559

2560
	buf = dma_map_single(priv->dev, skb->data,
2561
			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2562
	gfar_init_rxbdp(rx_queue, bdp, buf);
2563 2564
}

2565
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
L
Linus Torvalds 已提交
2566 2567
{
	struct gfar_private *priv = netdev_priv(dev);
E
Eric Dumazet 已提交
2568
	struct sk_buff *skb;
L
Linus Torvalds 已提交
2569

E
Eran Liberty 已提交
2570
	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2571
	if (!skb)
L
Linus Torvalds 已提交
2572 2573
		return NULL;

E
Eran Liberty 已提交
2574
	gfar_align_skb(skb);
2575

E
Eran Liberty 已提交
2576 2577 2578
	return skb;
}

2579
struct sk_buff *gfar_new_skb(struct net_device *dev)
E
Eran Liberty 已提交
2580
{
E
Eric Dumazet 已提交
2581
	return gfar_alloc_skb(dev);
L
Linus Torvalds 已提交
2582 2583
}

2584
static inline void count_errors(unsigned short status, struct net_device *dev)
L
Linus Torvalds 已提交
2585
{
2586
	struct gfar_private *priv = netdev_priv(dev);
2587
	struct net_device_stats *stats = &dev->stats;
L
Linus Torvalds 已提交
2588 2589
	struct gfar_extra_stats *estats = &priv->extra_stats;

J
Jan Ceuleers 已提交
2590
	/* If the packet was truncated, none of the other errors matter */
L
Linus Torvalds 已提交
2591 2592 2593
	if (status & RXBD_TRUNCATED) {
		stats->rx_length_errors++;

2594
		atomic64_inc(&estats->rx_trunc);
L
Linus Torvalds 已提交
2595 2596 2597 2598 2599 2600 2601 2602

		return;
	}
	/* Count the errors, if there were any */
	if (status & (RXBD_LARGE | RXBD_SHORT)) {
		stats->rx_length_errors++;

		if (status & RXBD_LARGE)
2603
			atomic64_inc(&estats->rx_large);
L
Linus Torvalds 已提交
2604
		else
2605
			atomic64_inc(&estats->rx_short);
L
Linus Torvalds 已提交
2606 2607 2608
	}
	if (status & RXBD_NONOCTET) {
		stats->rx_frame_errors++;
2609
		atomic64_inc(&estats->rx_nonoctet);
L
Linus Torvalds 已提交
2610 2611
	}
	if (status & RXBD_CRCERR) {
2612
		atomic64_inc(&estats->rx_crcerr);
L
Linus Torvalds 已提交
2613 2614 2615
		stats->rx_crc_errors++;
	}
	if (status & RXBD_OVERRUN) {
2616
		atomic64_inc(&estats->rx_overrun);
L
Linus Torvalds 已提交
2617 2618 2619 2620
		stats->rx_crc_errors++;
	}
}

2621
irqreturn_t gfar_receive(int irq, void *grp_id)
L
Linus Torvalds 已提交
2622
{
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
	unsigned long flags;
	u32 imask;

	if (likely(napi_schedule_prep(&grp->napi_rx))) {
		spin_lock_irqsave(&grp->grplock, flags);
		imask = gfar_read(&grp->regs->imask);
		imask &= IMASK_RX_DISABLED;
		gfar_write(&grp->regs->imask, imask);
		spin_unlock_irqrestore(&grp->grplock, flags);
		__napi_schedule(&grp->napi_rx);
	} else {
		/* Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
	}

	return IRQ_HANDLED;
}

/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *grp_id)
{
	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
	unsigned long flags;
	u32 imask;

	if (likely(napi_schedule_prep(&grp->napi_tx))) {
		spin_lock_irqsave(&grp->grplock, flags);
		imask = gfar_read(&grp->regs->imask);
		imask &= IMASK_TX_DISABLED;
		gfar_write(&grp->regs->imask, imask);
		spin_unlock_irqrestore(&grp->grplock, flags);
		__napi_schedule(&grp->napi_tx);
	} else {
		/* Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
	}

L
Linus Torvalds 已提交
2665 2666 2667
	return IRQ_HANDLED;
}

2668 2669 2670 2671
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
	/* If valid headers were found, and valid sums
	 * were verified, then we tell the kernel that no
J
Jan Ceuleers 已提交
2672 2673
	 * checksumming is necessary.  Otherwise, it is [FIXME]
	 */
2674
	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2675 2676
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
2677
		skb_checksum_none_assert(skb);
2678 2679 2680
}


J
Jan Ceuleers 已提交
2681
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2682 2683
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi)
L
Linus Torvalds 已提交
2684 2685
{
	struct gfar_private *priv = netdev_priv(dev);
2686
	struct rxfcb *fcb = NULL;
L
Linus Torvalds 已提交
2687

2688 2689
	/* fcb is at the beginning if exists */
	fcb = (struct rxfcb *)skb->data;
2690

J
Jan Ceuleers 已提交
2691 2692 2693
	/* Remove the FCB from the skb
	 * Remove the padded bytes, if there are any
	 */
2694 2695
	if (amount_pull) {
		skb_record_rx_queue(skb, fcb->rq);
2696
		skb_pull(skb, amount_pull);
2697
	}
2698

2699 2700 2701 2702
	/* Get receive timestamp from the skb */
	if (priv->hwts_rx_en) {
		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
		u64 *ns = (u64 *) skb->data;
2703

2704 2705 2706 2707 2708 2709 2710
		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
	}

	if (priv->padding)
		skb_pull(skb, priv->padding);

2711
	if (dev->features & NETIF_F_RXCSUM)
2712
		gfar_rx_checksum(skb, fcb);
2713

2714 2715
	/* Tell the skb what kind of packet this is */
	skb->protocol = eth_type_trans(skb, dev);
L
Linus Torvalds 已提交
2716

2717
	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2718 2719 2720
	 * Even if vlan rx accel is disabled, on some chips
	 * RXFCB_VLN is pseudo randomly set.
	 */
2721
	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2722
	    fcb->flags & RXFCB_VLN)
2723
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
J
Jiri Pirko 已提交
2724

2725
	/* Send the packet up the stack */
2726
	napi_gro_receive(napi, skb);
2727

L
Linus Torvalds 已提交
2728 2729 2730
}

/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2731 2732
 * until the budget/quota has been reached. Returns the number
 * of frames handled
L
Linus Torvalds 已提交
2733
 */
2734
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
L
Linus Torvalds 已提交
2735
{
2736
	struct net_device *dev = rx_queue->dev;
2737
	struct rxbd8 *bdp, *base;
L
Linus Torvalds 已提交
2738
	struct sk_buff *skb;
2739 2740
	int pkt_len;
	int amount_pull;
L
Linus Torvalds 已提交
2741 2742 2743 2744
	int howmany = 0;
	struct gfar_private *priv = netdev_priv(dev);

	/* Get the first full descriptor */
2745 2746
	bdp = rx_queue->cur_rx;
	base = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
2747

2748
	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2749

L
Linus Torvalds 已提交
2750
	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2751
		struct sk_buff *newskb;
2752

2753
		rmb();
2754 2755 2756 2757

		/* Add another skb for the future */
		newskb = gfar_new_skb(dev);

2758
		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
L
Linus Torvalds 已提交
2759

2760
		dma_unmap_single(priv->dev, bdp->bufPtr,
2761
				 priv->rx_buffer_size, DMA_FROM_DEVICE);
A
Andy Fleming 已提交
2762

2763
		if (unlikely(!(bdp->status & RXBD_ERR) &&
2764
			     bdp->length > priv->rx_buffer_size))
2765 2766
			bdp->status = RXBD_LARGE;

2767 2768
		/* We drop the frame if we failed to allocate a new buffer */
		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2769
			     bdp->status & RXBD_ERR)) {
2770 2771 2772 2773
			count_errors(bdp->status, dev);

			if (unlikely(!newskb))
				newskb = skb;
E
Eran Liberty 已提交
2774
			else if (skb)
E
Eric Dumazet 已提交
2775
				dev_kfree_skb(skb);
2776
		} else {
L
Linus Torvalds 已提交
2777
			/* Increment the number of packets */
S
Sandeep Gopalpet 已提交
2778
			rx_queue->stats.rx_packets++;
L
Linus Torvalds 已提交
2779 2780
			howmany++;

2781 2782 2783 2784
			if (likely(skb)) {
				pkt_len = bdp->length - ETH_FCS_LEN;
				/* Remove the FCS from the packet length */
				skb_put(skb, pkt_len);
S
Sandeep Gopalpet 已提交
2785
				rx_queue->stats.rx_bytes += pkt_len;
2786
				skb_record_rx_queue(skb, rx_queue->qindex);
W
Wu Jiajun-B06378 已提交
2787
				gfar_process_frame(dev, skb, amount_pull,
2788
						   &rx_queue->grp->napi_rx);
2789 2790

			} else {
2791
				netif_warn(priv, rx_err, dev, "Missing skb!\n");
S
Sandeep Gopalpet 已提交
2792
				rx_queue->stats.rx_dropped++;
2793
				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2794
			}
L
Linus Torvalds 已提交
2795 2796 2797

		}

2798
		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
L
Linus Torvalds 已提交
2799

2800
		/* Setup the new bdp */
2801
		gfar_new_rxbdp(rx_queue, bdp, newskb);
L
Linus Torvalds 已提交
2802 2803

		/* Update to the next pointer */
2804
		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2805 2806

		/* update to point at the next skb */
2807 2808
		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2809 2810 2811
	}

	/* Update the current rxbd pointer to be the next one */
2812
	rx_queue->cur_rx = bdp;
L
Linus Torvalds 已提交
2813 2814 2815 2816

	return howmany;
}

2817
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2818 2819
{
	struct gfar_priv_grp *gfargrp =
2820
		container_of(napi, struct gfar_priv_grp, napi_rx);
2821 2822 2823 2824 2825 2826 2827
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
	int work_done = 0;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
2828
	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2829 2830 2831 2832

	work_done = gfar_clean_rx_ring(rx_queue, budget);

	if (work_done < budget) {
2833
		u32 imask;
2834 2835 2836 2837
		napi_complete(napi);
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);

2838 2839 2840 2841 2842
		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_RX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
2843 2844 2845 2846 2847
	}

	return work_done;
}

2848
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
L
Linus Torvalds 已提交
2849
{
2850
	struct gfar_priv_grp *gfargrp =
2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
		container_of(napi, struct gfar_priv_grp, napi_tx);
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
	u32 imask;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_TX_MASK);

	/* run Tx cleanup to completion */
	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
		gfar_clean_tx_ring(tx_queue);

	napi_complete(napi);

	spin_lock_irq(&gfargrp->grplock);
	imask = gfar_read(&regs->imask);
	imask |= IMASK_TX_DEFAULT;
	gfar_write(&regs->imask, imask);
	spin_unlock_irq(&gfargrp->grplock);

	return 0;
}

static int gfar_poll_rx(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi_rx);
2880
	struct gfar_private *priv = gfargrp->priv;
2881
	struct gfar __iomem *regs = gfargrp->regs;
2882
	struct gfar_priv_rx_q *rx_queue = NULL;
C
Claudiu Manoil 已提交
2883
	int work_done = 0, work_done_per_q = 0;
2884
	int i, budget_per_q = 0;
2885 2886
	unsigned long rstat_rxf;
	int num_act_queues;
2887

2888
	/* Clear IEVENT, so interrupts aren't called again
J
Jan Ceuleers 已提交
2889 2890
	 * because of the packets that have already arrived
	 */
2891
	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2892

2893 2894 2895 2896 2897 2898
	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;

	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
	if (num_act_queues)
		budget_per_q = budget/num_act_queues;

2899 2900 2901 2902
	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
		/* skip queue if not active */
		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
			continue;
L
Linus Torvalds 已提交
2903

2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919
		rx_queue = priv->rx_queue[i];
		work_done_per_q =
			gfar_clean_rx_ring(rx_queue, budget_per_q);
		work_done += work_done_per_q;

		/* finished processing this queue */
		if (work_done_per_q < budget_per_q) {
			/* clear active queue hw indication */
			gfar_write(&regs->rstat,
				   RSTAT_CLEAR_RXF0 >> i);
			num_act_queues--;

			if (!num_act_queues)
				break;
		}
	}
2920

2921 2922
	if (!num_act_queues) {
		u32 imask;
2923
		napi_complete(napi);
L
Linus Torvalds 已提交
2924

2925 2926
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);
L
Linus Torvalds 已提交
2927

2928 2929 2930 2931 2932
		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_RX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
L
Linus Torvalds 已提交
2933 2934
	}

C
Claudiu Manoil 已提交
2935
	return work_done;
L
Linus Torvalds 已提交
2936 2937
}

2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
static int gfar_poll_tx(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi_tx);
	struct gfar_private *priv = gfargrp->priv;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_priv_tx_q *tx_queue = NULL;
	int has_tx_work = 0;
	int i;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_TX_MASK);

	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
		tx_queue = priv->tx_queue[i];
		/* run Tx cleanup to completion */
		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
			gfar_clean_tx_ring(tx_queue);
			has_tx_work = 1;
		}
	}

	if (!has_tx_work) {
		u32 imask;
		napi_complete(napi);

		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_TX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
	}

	return 0;
}


2977
#ifdef CONFIG_NET_POLL_CONTROLLER
J
Jan Ceuleers 已提交
2978
/* Polling 'interrupt' - used by things like netconsole to send skbs
2979 2980 2981 2982 2983 2984
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void gfar_netpoll(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2985
	int i;
2986 2987

	/* If the device has multiple interrupts, run tx/rx */
2988
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2989
		for (i = 0; i < priv->num_grps; i++) {
2990 2991 2992 2993 2994 2995 2996 2997 2998
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			disable_irq(gfar_irq(grp, RX)->irq);
			disable_irq(gfar_irq(grp, ER)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, ER)->irq);
			enable_irq(gfar_irq(grp, RX)->irq);
			enable_irq(gfar_irq(grp, TX)->irq);
2999
		}
3000
	} else {
3001
		for (i = 0; i < priv->num_grps; i++) {
3002 3003 3004 3005 3006
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, TX)->irq);
3007
		}
3008 3009 3010 3011
	}
}
#endif

L
Linus Torvalds 已提交
3012
/* The interrupt handler for devices with one interrupt */
3013
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
L
Linus Torvalds 已提交
3014
{
3015
	struct gfar_priv_grp *gfargrp = grp_id;
L
Linus Torvalds 已提交
3016 3017

	/* Save ievent for future reference */
3018
	u32 events = gfar_read(&gfargrp->regs->ievent);
L
Linus Torvalds 已提交
3019 3020

	/* Check for reception */
3021
	if (events & IEVENT_RX_MASK)
3022
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3023 3024

	/* Check for transmit completion */
3025
	if (events & IEVENT_TX_MASK)
3026
		gfar_transmit(irq, grp_id);
L
Linus Torvalds 已提交
3027

3028 3029
	/* Check for errors */
	if (events & IEVENT_ERR_MASK)
3030
		gfar_error(irq, grp_id);
L
Linus Torvalds 已提交
3031 3032 3033 3034

	return IRQ_HANDLED;
}

3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
	struct phy_device *phydev = priv->phydev;
	u32 val = 0;

	if (!phydev->duplex)
		return val;

	if (!priv->pause_aneg_en) {
		if (priv->tx_pause_en)
			val |= MACCFG1_TX_FLOW;
		if (priv->rx_pause_en)
			val |= MACCFG1_RX_FLOW;
	} else {
		u16 lcl_adv, rmt_adv;
		u8 flowctrl;
		/* get link partner capabilities */
		rmt_adv = 0;
		if (phydev->pause)
			rmt_adv = LPA_PAUSE_CAP;
		if (phydev->asym_pause)
			rmt_adv |= LPA_PAUSE_ASYM;

		lcl_adv = mii_advertise_flowctrl(phydev->advertising);

		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
		if (flowctrl & FLOW_CTRL_TX)
			val |= MACCFG1_TX_FLOW;
		if (flowctrl & FLOW_CTRL_RX)
			val |= MACCFG1_RX_FLOW;
	}

	return val;
}

L
Linus Torvalds 已提交
3070 3071
/* Called every time the controller might need to be made
 * aware of new link state.  The PHY code conveys this
3072
 * information through variables in the phydev structure, and this
L
Linus Torvalds 已提交
3073 3074 3075 3076 3077 3078
 * function converts those variables into the appropriate
 * register values, and can bring down the device if needed.
 */
static void adjust_link(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3079
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3080 3081 3082
	struct phy_device *phydev = priv->phydev;
	int new_state = 0;

3083 3084
	if (test_bit(GFAR_RESETTING, &priv->state))
		return;
3085

3086
	if (phydev->link) {
3087
		u32 tempval1 = gfar_read(&regs->maccfg1);
3088
		u32 tempval = gfar_read(&regs->maccfg2);
3089
		u32 ecntrl = gfar_read(&regs->ecntrl);
L
Linus Torvalds 已提交
3090 3091

		/* Now we make sure that we can be in full duplex mode.
J
Jan Ceuleers 已提交
3092 3093
		 * If not, we operate in half-duplex mode.
		 */
3094 3095 3096
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
L
Linus Torvalds 已提交
3097
				tempval &= ~(MACCFG2_FULL_DUPLEX);
3098
			else
L
Linus Torvalds 已提交
3099 3100
				tempval |= MACCFG2_FULL_DUPLEX;

3101
			priv->oldduplex = phydev->duplex;
L
Linus Torvalds 已提交
3102 3103
		}

3104 3105 3106
		if (phydev->speed != priv->oldspeed) {
			new_state = 1;
			switch (phydev->speed) {
L
Linus Torvalds 已提交
3107 3108 3109
			case 1000:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3110 3111

				ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3112 3113 3114 3115 3116
				break;
			case 100:
			case 10:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3117 3118

				/* Reduced mode distinguishes
J
Jan Ceuleers 已提交
3119 3120
				 * between 10 and 100
				 */
3121 3122 3123 3124
				if (phydev->speed == SPEED_100)
					ecntrl |= ECNTRL_R100;
				else
					ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3125 3126
				break;
			default:
3127 3128 3129
				netif_warn(priv, link, dev,
					   "Ack!  Speed (%d) is not 10/100/1000!\n",
					   phydev->speed);
L
Linus Torvalds 已提交
3130 3131 3132
				break;
			}

3133
			priv->oldspeed = phydev->speed;
L
Linus Torvalds 已提交
3134 3135
		}

3136 3137 3138 3139
		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
		tempval1 |= gfar_get_flowctrl_cfg(priv);

		gfar_write(&regs->maccfg1, tempval1);
3140
		gfar_write(&regs->maccfg2, tempval);
3141
		gfar_write(&regs->ecntrl, ecntrl);
3142

L
Linus Torvalds 已提交
3143
		if (!priv->oldlink) {
3144
			new_state = 1;
L
Linus Torvalds 已提交
3145 3146
			priv->oldlink = 1;
		}
3147 3148 3149 3150 3151
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
		priv->oldspeed = 0;
		priv->oldduplex = -1;
L
Linus Torvalds 已提交
3152 3153
	}

3154 3155 3156
	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);
}
L
Linus Torvalds 已提交
3157 3158 3159 3160

/* Update the hash table based on the current list of multicast
 * addresses we subscribe to.  Also, change the promiscuity of
 * the device based on the flags (this function is called
J
Jan Ceuleers 已提交
3161 3162
 * whenever dev->flags is changed
 */
L
Linus Torvalds 已提交
3163 3164
static void gfar_set_multi(struct net_device *dev)
{
3165
	struct netdev_hw_addr *ha;
L
Linus Torvalds 已提交
3166
	struct gfar_private *priv = netdev_priv(dev);
3167
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
3168 3169
	u32 tempval;

3170
	if (dev->flags & IFF_PROMISC) {
L
Linus Torvalds 已提交
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180
		/* Set RCTRL to PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval |= RCTRL_PROM;
		gfar_write(&regs->rctrl, tempval);
	} else {
		/* Set RCTRL to not PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval &= ~(RCTRL_PROM);
		gfar_write(&regs->rctrl, tempval);
	}
3181

3182
	if (dev->flags & IFF_ALLMULTI) {
L
Linus Torvalds 已提交
3183
		/* Set the hash to rx all multicast frames */
3184 3185 3186 3187 3188 3189 3190 3191
		gfar_write(&regs->igaddr0, 0xffffffff);
		gfar_write(&regs->igaddr1, 0xffffffff);
		gfar_write(&regs->igaddr2, 0xffffffff);
		gfar_write(&regs->igaddr3, 0xffffffff);
		gfar_write(&regs->igaddr4, 0xffffffff);
		gfar_write(&regs->igaddr5, 0xffffffff);
		gfar_write(&regs->igaddr6, 0xffffffff);
		gfar_write(&regs->igaddr7, 0xffffffff);
L
Linus Torvalds 已提交
3192 3193 3194 3195 3196 3197 3198 3199 3200
		gfar_write(&regs->gaddr0, 0xffffffff);
		gfar_write(&regs->gaddr1, 0xffffffff);
		gfar_write(&regs->gaddr2, 0xffffffff);
		gfar_write(&regs->gaddr3, 0xffffffff);
		gfar_write(&regs->gaddr4, 0xffffffff);
		gfar_write(&regs->gaddr5, 0xffffffff);
		gfar_write(&regs->gaddr6, 0xffffffff);
		gfar_write(&regs->gaddr7, 0xffffffff);
	} else {
3201 3202 3203
		int em_num;
		int idx;

L
Linus Torvalds 已提交
3204
		/* zero out the hash */
3205 3206 3207 3208 3209 3210 3211 3212
		gfar_write(&regs->igaddr0, 0x0);
		gfar_write(&regs->igaddr1, 0x0);
		gfar_write(&regs->igaddr2, 0x0);
		gfar_write(&regs->igaddr3, 0x0);
		gfar_write(&regs->igaddr4, 0x0);
		gfar_write(&regs->igaddr5, 0x0);
		gfar_write(&regs->igaddr6, 0x0);
		gfar_write(&regs->igaddr7, 0x0);
L
Linus Torvalds 已提交
3213 3214 3215 3216 3217 3218 3219 3220 3221
		gfar_write(&regs->gaddr0, 0x0);
		gfar_write(&regs->gaddr1, 0x0);
		gfar_write(&regs->gaddr2, 0x0);
		gfar_write(&regs->gaddr3, 0x0);
		gfar_write(&regs->gaddr4, 0x0);
		gfar_write(&regs->gaddr5, 0x0);
		gfar_write(&regs->gaddr6, 0x0);
		gfar_write(&regs->gaddr7, 0x0);

3222 3223
		/* If we have extended hash tables, we need to
		 * clear the exact match registers to prepare for
J
Jan Ceuleers 已提交
3224 3225
		 * setting them
		 */
3226 3227 3228 3229 3230 3231 3232 3233 3234
		if (priv->extended_hash) {
			em_num = GFAR_EM_NUM + 1;
			gfar_clear_exact_match(dev);
			idx = 1;
		} else {
			idx = 0;
			em_num = 0;
		}

3235
		if (netdev_mc_empty(dev))
L
Linus Torvalds 已提交
3236 3237 3238
			return;

		/* Parse the list, and set the appropriate bits */
3239
		netdev_for_each_mc_addr(ha, dev) {
3240
			if (idx < em_num) {
3241
				gfar_set_mac_for_addr(dev, idx, ha->addr);
3242 3243
				idx++;
			} else
3244
				gfar_set_hash_for_addr(dev, ha->addr);
L
Linus Torvalds 已提交
3245 3246 3247 3248
		}
	}
}

3249 3250

/* Clears each of the exact match registers to zero, so they
J
Jan Ceuleers 已提交
3251 3252
 * don't interfere with normal reception
 */
3253 3254 3255
static void gfar_clear_exact_match(struct net_device *dev)
{
	int idx;
3256
	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3257

3258
	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
J
Joe Perches 已提交
3259
		gfar_set_mac_for_addr(dev, idx, zero_arr);
3260 3261
}

L
Linus Torvalds 已提交
3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
J
Jan Ceuleers 已提交
3274 3275
 * the entry.
 */
L
Linus Torvalds 已提交
3276 3277 3278 3279
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
3280
	u32 result = ether_crc(ETH_ALEN, addr);
3281 3282 3283
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
L
Linus Torvalds 已提交
3284 3285
	u32 value = (1 << (31-whichbit));

3286
	tempval = gfar_read(priv->hash_regs[whichreg]);
L
Linus Torvalds 已提交
3287
	tempval |= value;
3288
	gfar_write(priv->hash_regs[whichreg], tempval);
L
Linus Torvalds 已提交
3289 3290
}

3291 3292 3293 3294

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
J
Joe Perches 已提交
3295 3296
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr)
3297 3298
{
	struct gfar_private *priv = netdev_priv(dev);
3299
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3300
	int idx;
3301
	char tmpbuf[ETH_ALEN];
3302
	u32 tempval;
3303
	u32 __iomem *macptr = &regs->macstnaddr1;
3304 3305 3306

	macptr += num*2;

J
Jan Ceuleers 已提交
3307 3308 3309
	/* Now copy it into the mac registers backwards, cuz
	 * little endian is silly
	 */
3310 3311
	for (idx = 0; idx < ETH_ALEN; idx++)
		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3312 3313 3314 3315 3316 3317 3318 3319

	gfar_write(macptr, *((u32 *) (tmpbuf)));

	tempval = *((u32 *) (tmpbuf + 4));

	gfar_write(macptr+1, tempval);
}

L
Linus Torvalds 已提交
3320
/* GFAR error interrupt handler */
3321
static irqreturn_t gfar_error(int irq, void *grp_id)
L
Linus Torvalds 已提交
3322
{
3323 3324 3325 3326
	struct gfar_priv_grp *gfargrp = grp_id;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_private *priv= gfargrp->priv;
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
3327 3328

	/* Save ievent for future reference */
3329
	u32 events = gfar_read(&regs->ievent);
L
Linus Torvalds 已提交
3330 3331

	/* Clear IEVENT */
3332
	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3333 3334

	/* Magic Packet is not an error. */
3335
	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3336 3337
	    (events & IEVENT_MAG))
		events &= ~IEVENT_MAG;
L
Linus Torvalds 已提交
3338 3339

	/* Hmm... */
3340
	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3341 3342
		netdev_dbg(dev,
			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3343
			   events, gfar_read(&regs->imask));
L
Linus Torvalds 已提交
3344 3345 3346

	/* Update the error counters */
	if (events & IEVENT_TXE) {
3347
		dev->stats.tx_errors++;
L
Linus Torvalds 已提交
3348 3349

		if (events & IEVENT_LC)
3350
			dev->stats.tx_window_errors++;
L
Linus Torvalds 已提交
3351
		if (events & IEVENT_CRL)
3352
			dev->stats.tx_aborted_errors++;
L
Linus Torvalds 已提交
3353
		if (events & IEVENT_XFUN) {
3354 3355
			unsigned long flags;

3356 3357
			netif_dbg(priv, tx_err, dev,
				  "TX FIFO underrun, packet dropped\n");
3358
			dev->stats.tx_dropped++;
3359
			atomic64_inc(&priv->extra_stats.tx_underrun);
L
Linus Torvalds 已提交
3360

3361 3362 3363
			local_irq_save(flags);
			lock_tx_qs(priv);

L
Linus Torvalds 已提交
3364
			/* Reactivate the Tx Queues */
3365
			gfar_write(&regs->tstat, gfargrp->tstat);
3366 3367 3368

			unlock_tx_qs(priv);
			local_irq_restore(flags);
L
Linus Torvalds 已提交
3369
		}
3370
		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
L
Linus Torvalds 已提交
3371 3372
	}
	if (events & IEVENT_BSY) {
3373
		dev->stats.rx_errors++;
3374
		atomic64_inc(&priv->extra_stats.rx_bsy);
L
Linus Torvalds 已提交
3375

3376
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3377

3378 3379
		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
			  gfar_read(&regs->rstat));
L
Linus Torvalds 已提交
3380 3381
	}
	if (events & IEVENT_BABR) {
3382
		dev->stats.rx_errors++;
3383
		atomic64_inc(&priv->extra_stats.rx_babr);
L
Linus Torvalds 已提交
3384

3385
		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
L
Linus Torvalds 已提交
3386 3387
	}
	if (events & IEVENT_EBERR) {
3388
		atomic64_inc(&priv->extra_stats.eberr);
3389
		netif_dbg(priv, rx_err, dev, "bus error\n");
L
Linus Torvalds 已提交
3390
	}
3391 3392
	if (events & IEVENT_RXC)
		netif_dbg(priv, rx_status, dev, "control frame\n");
L
Linus Torvalds 已提交
3393 3394

	if (events & IEVENT_BABT) {
3395
		atomic64_inc(&priv->extra_stats.tx_babt);
3396
		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
L
Linus Torvalds 已提交
3397 3398 3399 3400
	}
	return IRQ_HANDLED;
}

3401 3402 3403 3404 3405 3406
static struct of_device_id gfar_match[] =
{
	{
		.type = "network",
		.compatible = "gianfar",
	},
3407 3408 3409
	{
		.compatible = "fsl,etsec2",
	},
3410 3411
	{},
};
3412
MODULE_DEVICE_TABLE(of, gfar_match);
3413

L
Linus Torvalds 已提交
3414
/* Structure for a device driver */
3415
static struct platform_driver gfar_driver = {
3416 3417 3418 3419 3420 3421
	.driver = {
		.name = "fsl-gianfar",
		.owner = THIS_MODULE,
		.pm = GFAR_PM_OPS,
		.of_match_table = gfar_match,
	},
L
Linus Torvalds 已提交
3422 3423 3424 3425
	.probe = gfar_probe,
	.remove = gfar_remove,
};

3426
module_platform_driver(gfar_driver);