gianfar.c 90.1 KB
Newer Older
J
Jan Ceuleers 已提交
1
/* drivers/net/ethernet/freescale/gianfar.c
L
Linus Torvalds 已提交
2 3
 *
 * Gianfar Ethernet Driver
4 5
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
L
Linus Torvalds 已提交
6 7 8
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
9
 * Maintainer: Kumar Gala
10
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
L
Linus Torvalds 已提交
11
 *
12
 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13
 * Copyright 2007 MontaVista Software, Inc.
L
Linus Torvalds 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
27
 *
28 29
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
L
Linus Torvalds 已提交
30 31 32
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
33 34
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
L
Linus Torvalds 已提交
35 36 37
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
38
 *  IEVENT register is set, triggering an interrupt when the
L
Linus Torvalds 已提交
39 40 41
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
42
 *  of frames or amount of time have passed).  In NAPI, the
L
Linus Torvalds 已提交
43
 *  interrupt handler will signal there is work to be done, and
44
 *  exit. This method will start at the last known empty
45
 *  descriptor, and process every subsequent descriptor until there
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

64 65 66
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

L
Linus Torvalds 已提交
67 68 69
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
70
#include <linux/unistd.h>
L
Linus Torvalds 已提交
71 72 73 74 75 76
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
77
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
78 79
#include <linux/spinlock.h>
#include <linux/mm.h>
80 81
#include <linux/of_address.h>
#include <linux/of_irq.h>
82
#include <linux/of_mdio.h>
83
#include <linux/of_platform.h>
84 85 86
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
K
Kumar Gala 已提交
87
#include <linux/in.h>
88
#include <linux/net_tstamp.h>
L
Linus Torvalds 已提交
89 90

#include <asm/io.h>
91
#ifdef CONFIG_PPC
92
#include <asm/reg.h>
93
#include <asm/mpc85xx.h>
94
#endif
L
Linus Torvalds 已提交
95 96 97 98 99
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
100 101
#include <linux/mii.h>
#include <linux/phy.h>
102 103
#include <linux/phy_fixed.h>
#include <linux/of.h>
104
#include <linux/of_net.h>
105 106
#include <linux/of_address.h>
#include <linux/of_irq.h>
L
Linus Torvalds 已提交
107 108 109 110 111

#include "gianfar.h"

#define TX_TIMEOUT      (1*HZ)

112
const char gfar_driver_version[] = "1.3";
L
Linus Torvalds 已提交
113 114 115

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116
static void gfar_reset_task(struct work_struct *work);
L
Linus Torvalds 已提交
117 118
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
119
struct sk_buff *gfar_new_skb(struct net_device *dev);
120
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
121
			   struct sk_buff *skb);
L
Linus Torvalds 已提交
122 123
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
124 125 126
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
L
Linus Torvalds 已提交
127
static void adjust_link(struct net_device *dev);
128
static noinline void gfar_update_link_state(struct gfar_private *priv);
L
Linus Torvalds 已提交
129
static int init_phy(struct net_device *dev);
130
static int gfar_probe(struct platform_device *ofdev);
131
static int gfar_remove(struct platform_device *ofdev);
132
static void free_skb_resources(struct gfar_private *priv);
L
Linus Torvalds 已提交
133 134
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
K
Kapil Juneja 已提交
135
static void gfar_configure_serdes(struct net_device *dev);
136 137 138 139
static int gfar_poll_rx(struct napi_struct *napi, int budget);
static int gfar_poll_tx(struct napi_struct *napi, int budget);
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
140 141 142
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
143
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
C
Claudiu Manoil 已提交
144
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
145 146
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi);
147
static void gfar_halt_nodisable(struct gfar_private *priv);
148
static void gfar_clear_exact_match(struct net_device *dev);
J
Joe Perches 已提交
149 150
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr);
151
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
L
Linus Torvalds 已提交
152 153 154 155 156

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

157
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
158 159 160 161 162 163 164
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
165
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
166 167
		lstatus |= BD_LFLAG(RXBD_WRAP);

168
	gfar_wmb();
169 170 171 172

	bdp->lstatus = lstatus;
}

173
static int gfar_init_bds(struct net_device *ndev)
174
{
175
	struct gfar_private *priv = netdev_priv(ndev);
176 177
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
178 179
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
180
	int i, j;
181

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		/* Initialize some variables in our dev structure */
		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
		tx_queue->dirty_tx = tx_queue->tx_bd_base;
		tx_queue->cur_tx = tx_queue->tx_bd_base;
		tx_queue->skb_curtx = 0;
		tx_queue->skb_dirtytx = 0;

		/* Initialize Transmit Descriptor Ring */
		txbdp = tx_queue->tx_bd_base;
		for (j = 0; j < tx_queue->tx_ring_size; j++) {
			txbdp->lstatus = 0;
			txbdp->bufPtr = 0;
			txbdp++;
		}
198

199 200 201
		/* Set the last descriptor in the ring to indicate wrap */
		txbdp--;
		txbdp->status |= TXBD_WRAP;
202 203
	}

204 205 206 207 208
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->cur_rx = rx_queue->rx_bd_base;
		rx_queue->skb_currx = 0;
		rxbdp = rx_queue->rx_bd_base;
209

210 211
		for (j = 0; j < rx_queue->rx_ring_size; j++) {
			struct sk_buff *skb = rx_queue->rx_skbuff[j];
212

213 214 215 216 217 218
			if (skb) {
				gfar_init_rxbdp(rx_queue, rxbdp,
						rxbdp->bufPtr);
			} else {
				skb = gfar_new_skb(ndev);
				if (!skb) {
219
					netdev_err(ndev, "Can't allocate RX buffers\n");
220
					return -ENOMEM;
221 222 223 224
				}
				rx_queue->rx_skbuff[j] = skb;

				gfar_new_rxbdp(rx_queue, rxbdp, skb);
225 226
			}

227
			rxbdp++;
228 229 230 231 232 233 234 235 236
		}

	}

	return 0;
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
237
	void *vaddr;
238 239
	dma_addr_t addr;
	int i, j, k;
240
	struct gfar_private *priv = netdev_priv(ndev);
241
	struct device *dev = priv->dev;
242 243 244
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

245 246 247 248 249 250 251
	priv->total_tx_ring_size = 0;
	for (i = 0; i < priv->num_tx_queues; i++)
		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;

	priv->total_rx_ring_size = 0;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252 253

	/* Allocate memory for the buffer descriptors */
254
	vaddr = dma_alloc_coherent(dev,
255 256 257 258 259 260
				   (priv->total_tx_ring_size *
				    sizeof(struct txbd8)) +
				   (priv->total_rx_ring_size *
				    sizeof(struct rxbd8)),
				   &addr, GFP_KERNEL);
	if (!vaddr)
261 262
		return -ENOMEM;

263 264
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
265
		tx_queue->tx_bd_base = vaddr;
266 267 268
		tx_queue->tx_bd_dma_base = addr;
		tx_queue->dev = ndev;
		/* enet DMA only understands physical addresses */
269 270
		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
271
	}
272 273

	/* Start the rx descriptor ring where the tx ring leaves off */
274 275
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
276
		rx_queue->rx_bd_base = vaddr;
277 278
		rx_queue->rx_bd_dma_base = addr;
		rx_queue->dev = ndev;
279 280
		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
281
	}
282 283

	/* Setup the skbuff rings */
284 285
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
286 287 288 289 290
		tx_queue->tx_skbuff =
			kmalloc_array(tx_queue->tx_ring_size,
				      sizeof(*tx_queue->tx_skbuff),
				      GFP_KERNEL);
		if (!tx_queue->tx_skbuff)
291
			goto cleanup;
292

293 294 295
		for (k = 0; k < tx_queue->tx_ring_size; k++)
			tx_queue->tx_skbuff[k] = NULL;
	}
296

297 298
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
299 300 301 302 303
		rx_queue->rx_skbuff =
			kmalloc_array(rx_queue->rx_ring_size,
				      sizeof(*rx_queue->rx_skbuff),
				      GFP_KERNEL);
		if (!rx_queue->rx_skbuff)
304 305 306 307 308
			goto cleanup;

		for (j = 0; j < rx_queue->rx_ring_size; j++)
			rx_queue->rx_skbuff[j] = NULL;
	}
309

310 311
	if (gfar_init_bds(ndev))
		goto cleanup;
312 313 314 315 316 317 318 319

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

320 321
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
322
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
323
	u32 __iomem *baddr;
324 325 326
	int i;

	baddr = &regs->tbase0;
327
	for (i = 0; i < priv->num_tx_queues; i++) {
328
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
329
		baddr += 2;
330 331 332
	}

	baddr = &regs->rbase0;
333
	for (i = 0; i < priv->num_rx_queues; i++) {
334
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
335
		baddr += 2;
336 337 338
	}
}

339
static void gfar_rx_buff_size_config(struct gfar_private *priv)
340
{
341
	int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
342

343 344 345
	/* set this when rx hw offload (TOE) functions are being used */
	priv->uses_rxfcb = 0;

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
	if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
		priv->uses_rxfcb = 1;

	if (priv->hwts_rx_en)
		priv->uses_rxfcb = 1;

	if (priv->uses_rxfcb)
		frame_size += GMAC_FCB_LEN;

	frame_size += priv->padding;

	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
		     INCREMENTAL_BUFFER_SIZE;

	priv->rx_buffer_size = frame_size;
}

static void gfar_mac_rx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 rctrl = 0;

S
Sandeep Gopalpet 已提交
368
	if (priv->rx_filer_enable) {
369
		rctrl |= RCTRL_FILREN;
S
Sandeep Gopalpet 已提交
370
		/* Program the RIR0 reg with the required distribution */
371 372 373 374
		if (priv->poll_mode == GFAR_SQ_POLLING)
			gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
		else /* GFAR_MQ_POLLING */
			gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
S
Sandeep Gopalpet 已提交
375
	}
376

377
	/* Restore PROMISC mode */
378
	if (priv->ndev->flags & IFF_PROMISC)
379 380
		rctrl |= RCTRL_PROM;

381
	if (priv->ndev->features & NETIF_F_RXCSUM)
382 383
		rctrl |= RCTRL_CHECKSUMMING;

384 385
	if (priv->extended_hash)
		rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
386 387 388 389 390 391

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

392
	/* Enable HW time stamping if requested from user space */
393
	if (priv->hwts_rx_en)
394 395
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;

396
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
397
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
398 399 400

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);
401
}
402

403 404 405 406 407 408
static void gfar_mac_tx_config(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 tctrl = 0;

	if (priv->ndev->features & NETIF_F_IP_CSUM)
409 410
		tctrl |= TCTRL_INIT_CSUM;

411 412 413 414 415 416 417
	if (priv->prio_sched_en)
		tctrl |= TCTRL_TXSCHED_PRIO;
	else {
		tctrl |= TCTRL_TXSCHED_WRRS;
		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
	}
418

419 420 421
	if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
		tctrl |= TCTRL_VLINS;

422 423 424
	gfar_write(&regs->tctrl, tctrl);
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
static void gfar_configure_coalescing(struct gfar_private *priv,
			       unsigned long tx_mask, unsigned long rx_mask)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 __iomem *baddr;

	if (priv->mode == MQ_MG_MODE) {
		int i = 0;

		baddr = &regs->txic0;
		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->tx_queue[i]->txcoalescing))
				gfar_write(baddr + i, priv->tx_queue[i]->txic);
		}

		baddr = &regs->rxic0;
		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
			gfar_write(baddr + i, 0);
			if (likely(priv->rx_queue[i]->rxcoalescing))
				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
		}
	} else {
		/* Backward compatible case -- even if we enable
		 * multiple queues, there's only single reg to program
		 */
		gfar_write(&regs->txic, 0);
		if (likely(priv->tx_queue[0]->txcoalescing))
			gfar_write(&regs->txic, priv->tx_queue[0]->txic);

		gfar_write(&regs->rxic, 0);
		if (unlikely(priv->rx_queue[0]->rxcoalescing))
			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
	}
}

void gfar_configure_coalescing_all(struct gfar_private *priv)
{
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
}

S
Sandeep Gopalpet 已提交
466 467 468 469 470
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
471
	int i;
S
Sandeep Gopalpet 已提交
472 473 474

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
475
		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
S
Sandeep Gopalpet 已提交
476 477 478 479
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
480
	dev->stats.rx_bytes   = rx_bytes;
S
Sandeep Gopalpet 已提交
481 482 483
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
E
Eric Dumazet 已提交
484 485
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
S
Sandeep Gopalpet 已提交
486 487
	}

488
	dev->stats.tx_bytes   = tx_bytes;
S
Sandeep Gopalpet 已提交
489 490 491 492 493
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

494 495 496 497 498
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
499
	.ndo_set_features = gfar_set_features,
500
	.ndo_set_rx_mode = gfar_set_multi,
501 502
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
S
Sandeep Gopalpet 已提交
503
	.ndo_get_stats = gfar_get_stats,
504 505
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
506 507 508 509 510
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
static void gfar_ints_disable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Clear IEVENT */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);

		/* Initialize IMASK */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}
}

static void gfar_ints_enable(struct gfar_private *priv)
{
	int i;
	for (i = 0; i < priv->num_grps; i++) {
		struct gfar __iomem *regs = priv->gfargrp[i].regs;
		/* Unmask the interrupts we look for */
		gfar_write(&regs->imask, IMASK_DEFAULT);
	}
}

534 535
void lock_tx_qs(struct gfar_private *priv)
{
536
	int i;
537 538 539 540 541 542 543

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_lock(&priv->tx_queue[i]->txlock);
}

void unlock_tx_qs(struct gfar_private *priv)
{
544
	int i;
545 546 547 548 549

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_unlock(&priv->tx_queue[i]->txlock);
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
		if (!priv->tx_queue[i])
			return -ENOMEM;

		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = priv->ndev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}
	return 0;
}

static int gfar_alloc_rx_queues(struct gfar_private *priv)
{
	int i;

	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
		if (!priv->rx_queue[i])
			return -ENOMEM;

		priv->rx_queue[i]->rx_skbuff = NULL;
		priv->rx_queue[i]->qindex = i;
		priv->rx_queue[i]->dev = priv->ndev;
	}
	return 0;
}

static void gfar_free_tx_queues(struct gfar_private *priv)
586
{
587
	int i;
588 589 590 591 592

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

593
static void gfar_free_rx_queues(struct gfar_private *priv)
594
{
595
	int i;
596 597 598 599 600

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

601 602
static void unmap_group_regs(struct gfar_private *priv)
{
603
	int i;
604 605 606 607 608 609

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

610 611 612 613 614 615 616 617 618 619 620 621 622
static void free_gfar_dev(struct gfar_private *priv)
{
	int i, j;

	for (i = 0; i < priv->num_grps; i++)
		for (j = 0; j < GFAR_NUM_IRQS; j++) {
			kfree(priv->gfargrp[i].irqinfo[j]);
			priv->gfargrp[i].irqinfo[j] = NULL;
		}

	free_netdev(priv->ndev);
}

623 624
static void disable_napi(struct gfar_private *priv)
{
625
	int i;
626

627 628 629 630
	for (i = 0; i < priv->num_grps; i++) {
		napi_disable(&priv->gfargrp[i].napi_rx);
		napi_disable(&priv->gfargrp[i].napi_tx);
	}
631 632 633 634
}

static void enable_napi(struct gfar_private *priv)
{
635
	int i;
636

637 638 639 640
	for (i = 0; i < priv->num_grps; i++) {
		napi_enable(&priv->gfargrp[i].napi_rx);
		napi_enable(&priv->gfargrp[i].napi_tx);
	}
641 642 643
}

static int gfar_parse_group(struct device_node *np,
644
			    struct gfar_private *priv, const char *model)
645
{
646
	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
647 648
	int i;

649 650 651 652
	for (i = 0; i < GFAR_NUM_IRQS; i++) {
		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
					  GFP_KERNEL);
		if (!grp->irqinfo[i])
653 654
			return -ENOMEM;
	}
655

656 657
	grp->regs = of_iomap(np, 0);
	if (!grp->regs)
658 659
		return -ENOMEM;

660
	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
661 662 663

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
664 665 666 667 668
		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
		    gfar_irq(grp, RX)->irq == NO_IRQ ||
		    gfar_irq(grp, ER)->irq == NO_IRQ)
669 670 671
			return -EINVAL;
	}

672 673
	grp->priv = priv;
	spin_lock_init(&grp->grplock);
674
	if (priv->mode == MQ_MG_MODE) {
675 676 677 678 679 680 681 682 683 684 685 686 687 688
		u32 *rxq_mask, *txq_mask;
		rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
		txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);

		if (priv->poll_mode == GFAR_SQ_POLLING) {
			/* One Q per interrupt group: Q0 to G0, Q1 to G1 */
			grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
			grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
		} else { /* GFAR_MQ_POLLING */
			grp->rx_bit_map = rxq_mask ?
			*rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
			grp->tx_bit_map = txq_mask ?
			*txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
		}
689
	} else {
690 691
		grp->rx_bit_map = 0xFF;
		grp->tx_bit_map = 0xFF;
692
	}
693 694 695 696 697 698 699 700 701 702 703

	/* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
	 * right to left, so we need to revert the 8 bits to get the q index
	 */
	grp->rx_bit_map = bitrev8(grp->rx_bit_map);
	grp->tx_bit_map = bitrev8(grp->tx_bit_map);

	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
	 * also assign queues to groups
	 */
	for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
704 705
		if (!grp->rx_queue)
			grp->rx_queue = priv->rx_queue[i];
706 707 708 709 710 711 712
		grp->num_rx_queues++;
		grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
		priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
		priv->rx_queue[i]->grp = grp;
	}

	for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
713 714
		if (!grp->tx_queue)
			grp->tx_queue = priv->tx_queue[i];
715 716 717 718 719 720
		grp->num_tx_queues++;
		grp->tstat |= (TSTAT_CLEAR_THALT >> i);
		priv->tqueue |= (TQUEUE_EN0 >> i);
		priv->tx_queue[i]->grp = grp;
	}

721 722 723 724 725
	priv->num_grps++;

	return 0;
}

726
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
727 728 729 730
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
731 732 733
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
734
	struct device_node *np = ofdev->dev.of_node;
735
	struct device_node *child = NULL;
A
Andy Fleming 已提交
736 737 738
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
739 740
	unsigned int num_tx_qs, num_rx_qs;
	u32 *tx_queues, *rx_queues;
741
	unsigned short mode, poll_mode;
742 743 744 745

	if (!np || !of_device_is_available(np))
		return -ENODEV;

746 747 748 749 750 751 752 753
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		mode = MQ_MG_MODE;
		poll_mode = GFAR_SQ_POLLING;
	} else {
		mode = SQ_SG_MODE;
		poll_mode = GFAR_SQ_POLLING;
	}

754
	/* parse the num of HW tx and rx queues */
755
	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
756 757
	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);

758
	if (mode == SQ_SG_MODE) {
759 760 761
		num_tx_qs = 1;
		num_rx_qs = 1;
	} else { /* MQ_MG_MODE */
762 763 764 765 766 767 768 769 770 771
		/* get the actual number of supported groups */
		unsigned int num_grps = of_get_available_child_count(np);

		if (num_grps == 0 || num_grps > MAXGROUPS) {
			dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
				num_grps);
			pr_err("Cannot do alloc_etherdev, aborting\n");
			return -EINVAL;
		}

772
		if (poll_mode == GFAR_SQ_POLLING) {
773 774
			num_tx_qs = num_grps; /* one txq per int group */
			num_rx_qs = num_grps; /* one rxq per int group */
775 776 777 778 779
		} else { /* GFAR_MQ_POLLING */
			num_tx_qs = tx_queues ? *tx_queues : 1;
			num_rx_qs = rx_queues ? *rx_queues : 1;
		}
	}
780 781

	if (num_tx_qs > MAX_TX_QS) {
782 783 784
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
785 786 787 788
		return -EINVAL;
	}

	if (num_rx_qs > MAX_RX_QS) {
789 790 791
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
792 793 794 795 796 797 798 799 800 801 802
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
	priv->ndev = dev;

803 804 805
	priv->mode = mode;
	priv->poll_mode = poll_mode;

806
	priv->num_tx_queues = num_tx_qs;
807
	netif_set_real_num_rx_queues(dev, num_rx_qs);
808
	priv->num_rx_queues = num_rx_qs;
809 810 811 812 813 814 815 816

	err = gfar_alloc_tx_queues(priv);
	if (err)
		goto tx_alloc_failed;

	err = gfar_alloc_rx_queues(priv);
	if (err)
		goto rx_alloc_failed;
817

J
Jan Ceuleers 已提交
818
	/* Init Rx queue filer rule set linked list */
S
Sebastian Poehn 已提交
819 820 821 822
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

823 824
	model = of_get_property(np, "model", NULL);

825 826
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
827

828
	/* Parse and initialize group specific information */
829
	if (priv->mode == MQ_MG_MODE) {
830 831 832 833
		for_each_child_of_node(np, child) {
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
834
		}
835
	} else { /* SQ_SG_MODE */
836
		err = gfar_parse_group(np, priv, model);
837
		if (err)
838
			goto err_grp_init;
839 840
	}

A
Andy Fleming 已提交
841 842
	stash = of_get_property(np, "bd-stash", NULL);

843
	if (stash) {
A
Andy Fleming 已提交
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

861
	mac_addr = of_get_mac_address(np);
862

863
	if (mac_addr)
864
		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
865 866

	if (model && !strcasecmp(model, "TSEC"))
867
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
868 869 870 871
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;

872
	if (model && !strcasecmp(model, "eTSEC"))
873
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
874 875 876 877 878 879 880 881
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
				     FSL_GIANFAR_DEV_HAS_CSUM |
				     FSL_GIANFAR_DEV_HAS_VLAN |
				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
				     FSL_GIANFAR_DEV_HAS_TIMER;
882 883 884 885 886 887 888 889 890 891 892 893

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

894
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
895

896 897 898
	/* In the case of a fixed PHY, the DT node associated
	 * to the PHY is the Ethernet MAC DT node.
	 */
899
	if (!priv->phy_node && of_phy_is_fixed_link(np)) {
900 901 902 903
		err = of_phy_register_fixed_link(np);
		if (err)
			goto err_grp_init;

904
		priv->phy_node = of_node_get(np);
905 906
	}

907
	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
908
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
909 910 911

	return 0;

912 913
err_grp_init:
	unmap_group_regs(priv);
914 915 916 917
rx_alloc_failed:
	gfar_free_rx_queues(priv);
tx_alloc_failed:
	gfar_free_tx_queues(priv);
918
	free_gfar_dev(priv);
919 920 921
	return err;
}

922
static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
923 924 925 926 927 928 929 930 931 932 933
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

934 935 936 937 938 939 940 941 942 943
	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->hwts_tx_en = 0;
		break;
	case HWTSTAMP_TX_ON:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
		priv->hwts_tx_en = 1;
		break;
	default:
944
		return -ERANGE;
945
	}
946 947 948

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
949 950
		if (priv->hwts_rx_en) {
			priv->hwts_rx_en = 0;
951
			reset_gfar(netdev);
952
		}
953 954 955 956
		break;
	default:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
957 958
		if (!priv->hwts_rx_en) {
			priv->hwts_rx_en = 1;
959
			reset_gfar(netdev);
960
		}
961 962 963 964 965 966 967 968
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
	}

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

969 970 971 972 973 974 975 976 977 978 979 980 981 982
static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	config.flags = 0;
	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
	config.rx_filter = (priv->hwts_rx_en ?
			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

983 984 985 986 987 988 989
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

990
	if (cmd == SIOCSHWTSTAMP)
991 992 993
		return gfar_hwtstamp_set(dev, rq);
	if (cmd == SIOCGHWTSTAMP)
		return gfar_hwtstamp_get(dev, rq);
994

995 996 997
	if (!priv->phydev)
		return -ENODEV;

998
	return phy_mii_ioctl(priv->phydev, rq, cmd);
999 1000
}

1001 1002
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
1003 1004 1005 1006 1007 1008
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
W
Wu Jiajun-B06378 已提交
1009 1010
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
1011 1012 1013 1014
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
W
Wu Jiajun-B06378 已提交
1015 1016
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
1017 1018 1019 1020 1021
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
1022 1023
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
1024 1025 1026 1027 1028
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
1029 1030
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
W
Wu Jiajun-B06378 已提交
1045 1046
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
1047 1048 1049 1050 1051 1052 1053 1054 1055
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

U
Uwe Kleine-König 已提交
1056
	/* cur_filer_idx indicated the first non-masked rule */
1057 1058 1059 1060 1061
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
W
Wu Jiajun-B06378 已提交
1062 1063
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
1064 1065 1066 1067
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

1068
#ifdef CONFIG_PPC
1069
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
1070 1071 1072 1073 1074 1075 1076 1077
{
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
1078
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1079 1080
		priv->errata |= GFAR_ERRATA_74;

1081 1082
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
1083
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
1084 1085
		priv->errata |= GFAR_ERRATA_76;

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	/* MPC8313 Rev < 2.0 */
	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
		priv->errata |= GFAR_ERRATA_12;
}

static void __gfar_detect_errata_85xx(struct gfar_private *priv)
{
	unsigned int svr = mfspr(SPRN_SVR);

	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1096
		priv->errata |= GFAR_ERRATA_12;
1097 1098 1099
	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1100
}
1101
#endif
1102 1103 1104 1105 1106 1107 1108 1109

static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;

	/* no plans to fix */
	priv->errata |= GFAR_ERRATA_A002;

1110
#ifdef CONFIG_PPC
1111 1112 1113 1114
	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
		__gfar_detect_errata_85xx(priv);
	else /* non-mpc85xx parts, i.e. e300 core based */
		__gfar_detect_errata_83xx(priv);
1115
#endif
1116

1117 1118 1119 1120 1121
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

1122
void gfar_mac_reset(struct gfar_private *priv)
1123 1124
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1125
	u32 tempval;
1126 1127 1128 1129 1130

	/* Reset MAC layer */
	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);

	/* We need to delay at least 3 TX clocks */
1131
	udelay(3);
1132 1133 1134 1135 1136 1137

	/* the soft reset bit is not self-resetting, so we need to
	 * clear it before resuming normal operation
	 */
	gfar_write(&regs->maccfg1, 0);

1138 1139
	udelay(3);

1140 1141 1142 1143 1144
	/* Compute rx_buff_size based on config flags */
	gfar_rx_buff_size_config(priv);

	/* Initialize the max receive frame/buffer lengths */
	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1145 1146 1147 1148 1149
	gfar_write(&regs->mrblr, priv->rx_buffer_size);

	/* Initialize the Minimum Frame Length Register */
	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);

1150 1151
	/* Initialize MACCFG2. */
	tempval = MACCFG2_INIT_SETTINGS;
1152 1153 1154 1155 1156 1157 1158

	/* If the mtu is larger than the max size for standard
	 * ethernet frames (ie, a jumbo frame), then set maccfg2
	 * to allow huge frames, and to check the length
	 */
	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
	    gfar_has_errata(priv, GFAR_ERRATA_74))
1159
		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1160

1161 1162
	gfar_write(&regs->maccfg2, tempval);

1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	/* Clear mac addr hash registers */
	gfar_write(&regs->igaddr0, 0);
	gfar_write(&regs->igaddr1, 0);
	gfar_write(&regs->igaddr2, 0);
	gfar_write(&regs->igaddr3, 0);
	gfar_write(&regs->igaddr4, 0);
	gfar_write(&regs->igaddr5, 0);
	gfar_write(&regs->igaddr6, 0);
	gfar_write(&regs->igaddr7, 0);

	gfar_write(&regs->gaddr0, 0);
	gfar_write(&regs->gaddr1, 0);
	gfar_write(&regs->gaddr2, 0);
	gfar_write(&regs->gaddr3, 0);
	gfar_write(&regs->gaddr4, 0);
	gfar_write(&regs->gaddr5, 0);
	gfar_write(&regs->gaddr6, 0);
	gfar_write(&regs->gaddr7, 0);

	if (priv->extended_hash)
		gfar_clear_exact_match(priv->ndev);

	gfar_mac_rx_config(priv);

	gfar_mac_tx_config(priv);

	gfar_set_mac_address(priv->ndev);

	gfar_set_multi(priv->ndev);

	/* clear ievent and imask before configuring coalescing */
	gfar_ints_disable(priv);

	/* Configure the coalescing support */
	gfar_configure_coalescing_all(priv);
}

static void gfar_hw_init(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	u32 attrs;

	/* Stop the DMA engine now, in case it was running before
	 * (The firmware could have used it, and left it running).
	 */
	gfar_halt(priv);

	gfar_mac_reset(priv);

	/* Zero out the rmon mib registers if it has them */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
		memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));

		/* Mask off the CAM interrupts */
		gfar_write(&regs->rmon.cam1, 0xffffffff);
		gfar_write(&regs->rmon.cam2, 0xffffffff);
	}

1221 1222 1223
	/* Initialize ECNTRL */
	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing
	 * depending on driver parameters
	 */
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	/* FIFO configs */
	gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
	gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
	gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);

1248 1249 1250 1251 1252
	/* Program the interrupt steering regs, only for MG devices */
	if (priv->num_grps > 1)
		gfar_write_isrg(priv);
}

1253
static void gfar_init_addr_hash_table(struct gfar_private *priv)
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;

	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
		priv->extended_hash = 1;
		priv->hash_width = 9;

		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

		priv->hash_regs[0] = &regs->gaddr0;
		priv->hash_regs[1] = &regs->gaddr1;
		priv->hash_regs[2] = &regs->gaddr2;
		priv->hash_regs[3] = &regs->gaddr3;
		priv->hash_regs[4] = &regs->gaddr4;
		priv->hash_regs[5] = &regs->gaddr5;
		priv->hash_regs[6] = &regs->gaddr6;
		priv->hash_regs[7] = &regs->gaddr7;
	}
}

1293
/* Set up the ethernet device structure, private data,
J
Jan Ceuleers 已提交
1294 1295
 * and anything else we need before we start
 */
1296
static int gfar_probe(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1297 1298 1299
{
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
1300
	int err = 0, i;
L
Linus Torvalds 已提交
1301

1302
	err = gfar_of_init(ofdev, &dev);
L
Linus Torvalds 已提交
1303

1304 1305
	if (err)
		return err;
L
Linus Torvalds 已提交
1306 1307

	priv = netdev_priv(dev);
1308 1309
	priv->ndev = dev;
	priv->ofdev = ofdev;
1310
	priv->dev = &ofdev->dev;
1311
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
1312

1313
	spin_lock_init(&priv->bflock);
1314
	INIT_WORK(&priv->reset_task, gfar_reset_task);
L
Linus Torvalds 已提交
1315

1316
	platform_set_drvdata(ofdev, priv);
L
Linus Torvalds 已提交
1317

1318 1319
	gfar_detect_errata(priv);

L
Linus Torvalds 已提交
1320
	/* Set the dev->base_addr to the gfar reg region */
1321
	dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1322 1323 1324 1325

	/* Fill in the dev structure */
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->mtu = 1500;
1326
	dev->netdev_ops = &gfar_netdev_ops;
1327 1328
	dev->ethtool_ops = &gfar_ethtool_ops;

1329
	/* Register for napi ...We are registering NAPI for each grp */
1330 1331 1332 1333 1334 1335 1336
	for (i = 0; i < priv->num_grps; i++) {
		if (priv->poll_mode == GFAR_SQ_POLLING) {
			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
				       gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
				       gfar_poll_tx_sq, 2);
		} else {
1337 1338 1339 1340 1341 1342
			netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
				       gfar_poll_rx, GFAR_DEV_WEIGHT);
			netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
				       gfar_poll_tx, 2);
		}
	}
1343

1344
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1345
		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1346
				   NETIF_F_RXCSUM;
1347
		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1348
				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1349
	}
1350

J
Jiri Pirko 已提交
1351
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1352 1353 1354
		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
				    NETIF_F_HW_VLAN_CTAG_RX;
		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
J
Jiri Pirko 已提交
1355
	}
1356

1357
	gfar_init_addr_hash_table(priv);
1358

1359 1360 1361
	/* Insert receive time stamps into padding alignment bytes */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
		priv->padding = 8;
1362

1363
	if (dev->features & NETIF_F_IP_CSUM ||
1364
	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1365
		dev->needed_headroom = GMAC_FCB_LEN;
L
Linus Torvalds 已提交
1366 1367 1368

	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;

1369
	/* Initializing some of the rx/tx queue level parameters */
1370 1371 1372 1373 1374 1375
	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
		priv->tx_queue[i]->txic = DEFAULT_TXIC;
	}
1376

1377 1378 1379 1380 1381
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
	}
L
Linus Torvalds 已提交
1382

J
Jan Ceuleers 已提交
1383
	/* always enable rx filer */
S
Sebastian Poehn 已提交
1384
	priv->rx_filer_enable = 1;
1385 1386
	/* Enable most messages by default */
	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1387 1388 1389
	/* use pritority h/w tx queue scheduling for single queue devices */
	if (priv->num_tx_queues == 1)
		priv->prio_sched_en = 1;
1390

1391 1392
	set_bit(GFAR_DOWN, &priv->state);

1393
	gfar_hw_init(priv);
1394

1395 1396 1397
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(dev);

L
Linus Torvalds 已提交
1398 1399 1400
	err = register_netdev(dev);

	if (err) {
1401
		pr_err("%s: Cannot register net device, aborting\n", dev->name);
L
Linus Torvalds 已提交
1402 1403 1404
		goto register_fail;
	}

1405
	device_init_wakeup(&dev->dev,
1406 1407
			   priv->device_flags &
			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1408

1409
	/* fill out IRQ number and name fields */
1410
	for (i = 0; i < priv->num_grps; i++) {
1411
		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1412
		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1413
			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1414
				dev->name, "_g", '0' + i, "_tx");
1415
			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1416
				dev->name, "_g", '0' + i, "_rx");
1417
			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1418
				dev->name, "_g", '0' + i, "_er");
1419
		} else
1420
			strcpy(gfar_irq(grp, TX)->name, dev->name);
1421
	}
1422

1423 1424 1425
	/* Initialize the filer table */
	gfar_init_filer_table(priv);

L
Linus Torvalds 已提交
1426
	/* Print out the device info */
1427
	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
L
Linus Torvalds 已提交
1428

J
Jan Ceuleers 已提交
1429 1430 1431
	/* Even more device info helps when determining which kernel
	 * provided which set of benchmarks.
	 */
1432
	netdev_info(dev, "Running with NAPI enabled\n");
1433
	for (i = 0; i < priv->num_rx_queues; i++)
1434 1435
		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
			    i, priv->rx_queue[i]->rx_ring_size);
1436
	for (i = 0; i < priv->num_tx_queues; i++)
1437 1438
		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
			    i, priv->tx_queue[i]->tx_ring_size);
L
Linus Torvalds 已提交
1439 1440 1441 1442

	return 0;

register_fail:
1443
	unmap_group_regs(priv);
1444 1445
	gfar_free_rx_queues(priv);
	gfar_free_tx_queues(priv);
1446 1447
	of_node_put(priv->phy_node);
	of_node_put(priv->tbi_node);
1448
	free_gfar_dev(priv);
1449
	return err;
L
Linus Torvalds 已提交
1450 1451
}

1452
static int gfar_remove(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1453
{
1454
	struct gfar_private *priv = platform_get_drvdata(ofdev);
L
Linus Torvalds 已提交
1455

1456 1457
	of_node_put(priv->phy_node);
	of_node_put(priv->tbi_node);
1458

D
David S. Miller 已提交
1459
	unregister_netdev(priv->ndev);
1460
	unmap_group_regs(priv);
1461 1462
	gfar_free_rx_queues(priv);
	gfar_free_tx_queues(priv);
1463
	free_gfar_dev(priv);
L
Linus Torvalds 已提交
1464 1465 1466 1467

	return 0;
}

1468
#ifdef CONFIG_PM
1469 1470

static int gfar_suspend(struct device *dev)
1471
{
1472 1473
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1474
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1475 1476 1477 1478
	unsigned long flags;
	u32 tempval;

	int magic_packet = priv->wol_en &&
1479 1480
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1481

1482
	netif_device_detach(ndev);
1483

1484
	if (netif_running(ndev)) {
1485 1486 1487

		local_irq_save(flags);
		lock_tx_qs(priv);
1488

1489
		gfar_halt_nodisable(priv);
1490 1491

		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1492
		tempval = gfar_read(&regs->maccfg1);
1493 1494 1495 1496 1497 1498

		tempval &= ~MACCFG1_TX_EN;

		if (!magic_packet)
			tempval &= ~MACCFG1_RX_EN;

1499
		gfar_write(&regs->maccfg1, tempval);
1500

1501 1502
		unlock_tx_qs(priv);
		local_irq_restore(flags);
1503

1504
		disable_napi(priv);
1505 1506 1507

		if (magic_packet) {
			/* Enable interrupt on Magic Packet */
1508
			gfar_write(&regs->imask, IMASK_MAG);
1509 1510

			/* Enable Magic Packet mode */
1511
			tempval = gfar_read(&regs->maccfg2);
1512
			tempval |= MACCFG2_MPEN;
1513
			gfar_write(&regs->maccfg2, tempval);
1514 1515 1516 1517 1518 1519 1520 1521
		} else {
			phy_stop(priv->phydev);
		}
	}

	return 0;
}

1522
static int gfar_resume(struct device *dev)
1523
{
1524 1525
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1526
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1527 1528 1529
	unsigned long flags;
	u32 tempval;
	int magic_packet = priv->wol_en &&
1530 1531
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1532

1533 1534
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);
1535 1536 1537 1538 1539 1540 1541 1542 1543
		return 0;
	}

	if (!magic_packet && priv->phydev)
		phy_start(priv->phydev);

	/* Disable Magic Packet mode, in case something
	 * else woke us up.
	 */
1544 1545
	local_irq_save(flags);
	lock_tx_qs(priv);
1546

1547
	tempval = gfar_read(&regs->maccfg2);
1548
	tempval &= ~MACCFG2_MPEN;
1549
	gfar_write(&regs->maccfg2, tempval);
1550

1551
	gfar_start(priv);
1552

1553 1554
	unlock_tx_qs(priv);
	local_irq_restore(flags);
1555

1556 1557
	netif_device_attach(ndev);

1558
	enable_napi(priv);
1559 1560 1561 1562 1563 1564 1565 1566 1567

	return 0;
}

static int gfar_restore(struct device *dev)
{
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;

1568 1569 1570
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);

1571
		return 0;
1572
	}
1573

1574 1575 1576 1577 1578
	if (gfar_init_bds(ndev)) {
		free_skb_resources(priv);
		return -ENOMEM;
	}

1579 1580 1581 1582
	gfar_mac_reset(priv);

	gfar_init_tx_rx_base(priv);

1583
	gfar_start(priv);
1584 1585 1586 1587 1588 1589 1590

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

	if (priv->phydev)
		phy_start(priv->phydev);
1591

1592
	netif_device_attach(ndev);
1593
	enable_napi(priv);
1594 1595 1596

	return 0;
}
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607

static struct dev_pm_ops gfar_pm_ops = {
	.suspend = gfar_suspend,
	.resume = gfar_resume,
	.freeze = gfar_suspend,
	.thaw = gfar_resume,
	.restore = gfar_restore,
};

#define GFAR_PM_OPS (&gfar_pm_ops)

1608
#else
1609 1610 1611

#define GFAR_PM_OPS NULL

1612
#endif
L
Linus Torvalds 已提交
1613

1614 1615 1616 1617 1618 1619
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1620
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1621 1622 1623
	u32 ecntrl;

	ecntrl = gfar_read(&regs->ecntrl);
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
1636
		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1637
			return PHY_INTERFACE_MODE_RMII;
1638
		}
A
Andy Fleming 已提交
1639
		else {
1640
			phy_interface_t interface = priv->interface;
A
Andy Fleming 已提交
1641

J
Jan Ceuleers 已提交
1642
			/* This isn't autodetected right now, so it must
A
Andy Fleming 已提交
1643 1644 1645 1646 1647
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

1648
			return PHY_INTERFACE_MODE_RGMII;
A
Andy Fleming 已提交
1649
		}
1650 1651
	}

1652
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1653 1654 1655 1656 1657 1658
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}


1659 1660
/* Initializes driver's PHY state, and attaches to the PHY.
 * Returns 0 on success.
L
Linus Torvalds 已提交
1661 1662 1663 1664
 */
static int init_phy(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1665
	uint gigabit_support =
1666
		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1667
		GFAR_SUPPORTED_GBIT : 0;
1668
	phy_interface_t interface;
L
Linus Torvalds 已提交
1669 1670 1671 1672 1673

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

1674 1675
	interface = gfar_get_interface(dev);

1676 1677 1678 1679 1680
	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
				      interface);
	if (!priv->phydev) {
		dev_err(&dev->dev, "could not attach to PHY\n");
		return -ENODEV;
1681
	}
L
Linus Torvalds 已提交
1682

K
Kapil Juneja 已提交
1683 1684 1685
	if (interface == PHY_INTERFACE_MODE_SGMII)
		gfar_configure_serdes(dev);

1686
	/* Remove any features not supported by the controller */
1687 1688
	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
	priv->phydev->advertising = priv->phydev->supported;
L
Linus Torvalds 已提交
1689

1690 1691 1692
	/* Add support for flow control, but don't advertise it by default */
	priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);

L
Linus Torvalds 已提交
1693 1694 1695
	return 0;
}

J
Jan Ceuleers 已提交
1696
/* Initialize TBI PHY interface for communicating with the
1697 1698 1699 1700 1701 1702 1703
 * SERDES lynx PHY on the chip.  We communicate with this PHY
 * through the MDIO bus on each controller, treating it as a
 * "normal" PHY at the address found in the TBIPA register.  We assume
 * that the TBIPA register is valid.  Either the MDIO bus code will set
 * it to a value that doesn't conflict with other PHYs on the bus, or the
 * value doesn't matter, as there are no other PHYs on the bus.
 */
K
Kapil Juneja 已提交
1704 1705 1706
static void gfar_configure_serdes(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1707 1708 1709 1710 1711 1712 1713
	struct phy_device *tbiphy;

	if (!priv->tbi_node) {
		dev_warn(&dev->dev, "error: SGMII mode requires that the "
				    "device tree specify a tbi-handle\n");
		return;
	}
1714

1715 1716 1717
	tbiphy = of_phy_find_device(priv->tbi_node);
	if (!tbiphy) {
		dev_err(&dev->dev, "error: Could not get TBI device\n");
1718 1719
		return;
	}
K
Kapil Juneja 已提交
1720

J
Jan Ceuleers 已提交
1721
	/* If the link is already up, we must already be ok, and don't need to
1722 1723 1724 1725
	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
	 * everything for us?  Resetting it takes the link down and requires
	 * several seconds for it to come back.
	 */
1726
	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1727
		return;
K
Kapil Juneja 已提交
1728

1729
	/* Single clk mode, mii mode off(for serdes communication) */
1730
	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
K
Kapil Juneja 已提交
1731

1732
	phy_write(tbiphy, MII_ADVERTISE,
1733 1734
		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
		  ADVERTISE_1000XPSE_ASYM);
K
Kapil Juneja 已提交
1735

1736 1737 1738
	phy_write(tbiphy, MII_BMCR,
		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
		  BMCR_SPEED1000);
K
Kapil Juneja 已提交
1739 1740
}

1741 1742 1743 1744
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
	u32 res;

J
Jan Ceuleers 已提交
1745
	/* Normaly TSEC should not hang on GRS commands, so we should
1746 1747
	 * actually wait for IEVENT_GRSC flag.
	 */
1748
	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1749 1750
		return 0;

J
Jan Ceuleers 已提交
1751
	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
	 * and the Rx can be safely reset.
	 */
	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
	res &= 0x7f807f80;
	if ((res & 0xffff) == (res >> 16))
		return 1;

	return 0;
}
1762 1763

/* Halt the receive and transmit queues */
1764
static void gfar_halt_nodisable(struct gfar_private *priv)
L
Linus Torvalds 已提交
1765
{
1766
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1767
	u32 tempval;
1768 1769
	unsigned int timeout;
	int stopped;
L
Linus Torvalds 已提交
1770

1771
	gfar_ints_disable(priv);
L
Linus Torvalds 已提交
1772

1773 1774 1775
	if (gfar_is_dma_stopped(priv))
		return;

L
Linus Torvalds 已提交
1776
	/* Stop the DMA, and wait for it to stop */
1777
	tempval = gfar_read(&regs->dmactrl);
1778 1779 1780 1781 1782 1783 1784 1785
	tempval |= (DMACTRL_GRS | DMACTRL_GTS);
	gfar_write(&regs->dmactrl, tempval);

retry:
	timeout = 1000;
	while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
		cpu_relax();
		timeout--;
L
Linus Torvalds 已提交
1786
	}
1787 1788 1789 1790 1791 1792 1793

	if (!timeout)
		stopped = gfar_is_dma_stopped(priv);

	if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
	    !__gfar_is_rx_idle(priv))
		goto retry;
1794 1795 1796
}

/* Halt the receive and transmit queues */
1797
void gfar_halt(struct gfar_private *priv)
1798
{
1799
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1800
	u32 tempval;
L
Linus Torvalds 已提交
1801

1802 1803 1804
	/* Dissable the Rx/Tx hw queues */
	gfar_write(&regs->rqueue, 0);
	gfar_write(&regs->tqueue, 0);
1805

1806 1807 1808 1809 1810
	mdelay(10);

	gfar_halt_nodisable(priv);

	/* Disable Rx/Tx DMA */
L
Linus Torvalds 已提交
1811 1812 1813
	tempval = gfar_read(&regs->maccfg1);
	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);
1814 1815 1816 1817 1818 1819
}

void stop_gfar(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

1820
	netif_tx_stop_all_queues(dev);
1821

1822
	smp_mb__before_atomic();
1823
	set_bit(GFAR_DOWN, &priv->state);
1824
	smp_mb__after_atomic();
1825

1826
	disable_napi(priv);
1827

1828
	/* disable ints and gracefully shut down Rx/Tx DMA */
1829
	gfar_halt(priv);
L
Linus Torvalds 已提交
1830

1831
	phy_stop(priv->phydev);
L
Linus Torvalds 已提交
1832 1833 1834 1835

	free_skb_resources(priv);
}

1836
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
1837 1838
{
	struct txbd8 *txbdp;
1839
	struct gfar_private *priv = netdev_priv(tx_queue->dev);
D
Dai Haruki 已提交
1840
	int i, j;
L
Linus Torvalds 已提交
1841

1842
	txbdp = tx_queue->tx_bd_base;
L
Linus Torvalds 已提交
1843

1844 1845
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
		if (!tx_queue->tx_skbuff[i])
D
Dai Haruki 已提交
1846
			continue;
L
Linus Torvalds 已提交
1847

1848
		dma_unmap_single(priv->dev, txbdp->bufPtr,
1849
				 txbdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
1850
		txbdp->lstatus = 0;
1851
		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1852
		     j++) {
D
Dai Haruki 已提交
1853
			txbdp++;
1854
			dma_unmap_page(priv->dev, txbdp->bufPtr,
1855
				       txbdp->length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1856
		}
1857
		txbdp++;
1858 1859
		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
		tx_queue->tx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1860
	}
1861
	kfree(tx_queue->tx_skbuff);
1862
	tx_queue->tx_skbuff = NULL;
1863
}
L
Linus Torvalds 已提交
1864

1865 1866 1867 1868 1869
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
	struct rxbd8 *rxbdp;
	struct gfar_private *priv = netdev_priv(rx_queue->dev);
	int i;
L
Linus Torvalds 已提交
1870

1871
	rxbdp = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1872

1873 1874
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		if (rx_queue->rx_skbuff[i]) {
1875 1876
			dma_unmap_single(priv->dev, rxbdp->bufPtr,
					 priv->rx_buffer_size,
1877
					 DMA_FROM_DEVICE);
1878 1879
			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
			rx_queue->rx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1880
		}
1881 1882 1883
		rxbdp->lstatus = 0;
		rxbdp->bufPtr = 0;
		rxbdp++;
L
Linus Torvalds 已提交
1884
	}
1885
	kfree(rx_queue->rx_skbuff);
1886
	rx_queue->rx_skbuff = NULL;
1887
}
1888

1889
/* If there are any tx skbs or rx skbs still around, free them.
J
Jan Ceuleers 已提交
1890 1891
 * Then free tx_skbuff and rx_skbuff
 */
1892 1893 1894 1895 1896 1897 1898 1899
static void free_skb_resources(struct gfar_private *priv)
{
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
	int i;

	/* Go through all the buffer descriptors and free their data buffers */
	for (i = 0; i < priv->num_tx_queues; i++) {
1900
		struct netdev_queue *txq;
1901

1902
		tx_queue = priv->tx_queue[i];
1903
		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1904
		if (tx_queue->tx_skbuff)
1905
			free_skb_tx_queue(tx_queue);
1906
		netdev_tx_reset_queue(txq);
1907 1908 1909 1910
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
1911
		if (rx_queue->rx_skbuff)
1912 1913 1914
			free_skb_rx_queue(rx_queue);
	}

1915
	dma_free_coherent(priv->dev,
1916 1917 1918 1919
			  sizeof(struct txbd8) * priv->total_tx_ring_size +
			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
			  priv->tx_queue[0]->tx_bd_base,
			  priv->tx_queue[0]->tx_bd_dma_base);
L
Linus Torvalds 已提交
1920 1921
}

1922
void gfar_start(struct gfar_private *priv)
1923
{
1924
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1925
	u32 tempval;
1926
	int i = 0;
1927

1928 1929 1930
	/* Enable Rx/Tx hw queues */
	gfar_write(&regs->rqueue, priv->rqueue);
	gfar_write(&regs->tqueue, priv->tqueue);
1931 1932

	/* Initialize DMACTRL to have WWR and WOP */
1933
	tempval = gfar_read(&regs->dmactrl);
1934
	tempval |= DMACTRL_INIT_SETTINGS;
1935
	gfar_write(&regs->dmactrl, tempval);
1936 1937

	/* Make sure we aren't stopped */
1938
	tempval = gfar_read(&regs->dmactrl);
1939
	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1940
	gfar_write(&regs->dmactrl, tempval);
1941

1942 1943 1944 1945 1946 1947
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear THLT/RHLT, so that the DMA starts polling now */
		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
	}
1948

1949 1950 1951 1952 1953
	/* Enable Rx/Tx DMA */
	tempval = gfar_read(&regs->maccfg1);
	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);

1954 1955
	gfar_ints_enable(priv);

1956
	priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1957 1958
}

1959 1960 1961 1962 1963 1964 1965
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
	free_irq(gfar_irq(grp, TX)->irq, grp);
	free_irq(gfar_irq(grp, RX)->irq, grp);
	free_irq(gfar_irq(grp, ER)->irq, grp);
}

1966 1967 1968 1969 1970
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
	struct gfar_private *priv = grp->priv;
	struct net_device *dev = priv->ndev;
	int err;
L
Linus Torvalds 已提交
1971 1972

	/* If the device has multiple interrupts, register for
J
Jan Ceuleers 已提交
1973 1974
	 * them.  Otherwise, only register for the one
	 */
1975
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1976
		/* Install our interrupt handlers for Error,
J
Jan Ceuleers 已提交
1977 1978
		 * Transmit, and Receive
		 */
1979 1980 1981
		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
				  gfar_irq(grp, ER)->name, grp);
		if (err < 0) {
1982
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1983
				  gfar_irq(grp, ER)->irq);
1984

1985
			goto err_irq_fail;
L
Linus Torvalds 已提交
1986
		}
1987 1988 1989
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1990
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1991
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1992 1993
			goto tx_irq_fail;
		}
1994 1995 1996
		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
				  gfar_irq(grp, RX)->name, grp);
		if (err < 0) {
1997
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1998
				  gfar_irq(grp, RX)->irq);
L
Linus Torvalds 已提交
1999 2000 2001
			goto rx_irq_fail;
		}
	} else {
2002 2003 2004
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
2005
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2006
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
2007 2008 2009 2010
			goto err_irq_fail;
		}
	}

2011 2012 2013
	return 0;

rx_irq_fail:
2014
	free_irq(gfar_irq(grp, TX)->irq, grp);
2015
tx_irq_fail:
2016
	free_irq(gfar_irq(grp, ER)->irq, grp);
2017 2018 2019 2020 2021
err_irq_fail:
	return err;

}

2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
static void gfar_free_irq(struct gfar_private *priv)
{
	int i;

	/* Free the IRQs */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
		for (i = 0; i < priv->num_grps; i++)
			free_grp_irqs(&priv->gfargrp[i]);
	} else {
		for (i = 0; i < priv->num_grps; i++)
			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
				 &priv->gfargrp[i]);
	}
}

static int gfar_request_irq(struct gfar_private *priv)
{
	int err, i, j;

	for (i = 0; i < priv->num_grps; i++) {
		err = register_grp_irqs(&priv->gfargrp[i]);
		if (err) {
			for (j = 0; j < i; j++)
				free_grp_irqs(&priv->gfargrp[j]);
			return err;
		}
	}

	return 0;
}

2053 2054 2055 2056
/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
2057
	int err;
2058

2059
	gfar_mac_reset(priv);
2060 2061 2062 2063 2064

	err = gfar_alloc_skb_resources(ndev);
	if (err)
		return err;

2065
	gfar_init_tx_rx_base(priv);
2066

2067
	smp_mb__before_atomic();
2068
	clear_bit(GFAR_DOWN, &priv->state);
2069
	smp_mb__after_atomic();
2070 2071

	/* Start Rx/Tx DMA and enable the interrupts */
2072
	gfar_start(priv);
L
Linus Torvalds 已提交
2073

2074 2075
	phy_start(priv->phydev);

2076 2077 2078 2079
	enable_napi(priv);

	netif_tx_wake_all_queues(ndev);

L
Linus Torvalds 已提交
2080 2081 2082
	return 0;
}

J
Jan Ceuleers 已提交
2083 2084 2085
/* Called when something needs to use the ethernet device
 * Returns 0 for success.
 */
L
Linus Torvalds 已提交
2086 2087
static int gfar_enet_open(struct net_device *dev)
{
2088
	struct gfar_private *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
2089 2090 2091
	int err;

	err = init_phy(dev);
2092
	if (err)
L
Linus Torvalds 已提交
2093 2094
		return err;

2095 2096 2097 2098
	err = gfar_request_irq(priv);
	if (err)
		return err;

L
Linus Torvalds 已提交
2099
	err = startup_gfar(dev);
2100
	if (err)
2101
		return err;
L
Linus Torvalds 已提交
2102

2103 2104
	device_set_wakeup_enable(&dev->dev, priv->wol_en);

L
Linus Torvalds 已提交
2105 2106 2107
	return err;
}

2108
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2109
{
2110
	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2111 2112

	memset(fcb, 0, GMAC_FCB_LEN);
2113 2114 2115 2116

	return fcb;
}

2117
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2118
				    int fcb_length)
2119 2120 2121 2122 2123
{
	/* If we're here, it's a IP packet with a TCP or UDP
	 * payload.  We set it to checksum, using a pseudo-header
	 * we provide
	 */
2124
	u8 flags = TXFCB_DEFAULT;
2125

J
Jan Ceuleers 已提交
2126 2127 2128
	/* Tell the controller what the protocol is
	 * And provide the already calculated phcs
	 */
2129
	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2130
		flags |= TXFCB_UDP;
2131
		fcb->phcs = udp_hdr(skb)->check;
2132
	} else
2133
		fcb->phcs = tcp_hdr(skb)->check;
2134 2135 2136 2137

	/* l3os is the distance between the start of the
	 * frame (skb->data) and the start of the IP hdr.
	 * l4os is the distance between the start of the
J
Jan Ceuleers 已提交
2138 2139
	 * l3 hdr and the l4 hdr
	 */
2140
	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2141
	fcb->l4os = skb_network_header_len(skb);
2142

2143
	fcb->flags = flags;
2144 2145
}

2146
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2147
{
2148
	fcb->flags |= TXFCB_VLN;
2149 2150 2151
	fcb->vlctl = vlan_tx_tag_get(skb);
}

D
Dai Haruki 已提交
2152
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2153
				      struct txbd8 *base, int ring_size)
D
Dai Haruki 已提交
2154 2155 2156 2157 2158 2159 2160
{
	struct txbd8 *new_bd = bdp + stride;

	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
}

static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2161
				      int ring_size)
D
Dai Haruki 已提交
2162 2163 2164 2165
{
	return skip_txbd(bdp, 1, base, ring_size);
}

2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
/* eTSEC12: csum generation not supported for some fcb offsets */
static inline bool gfar_csum_errata_12(struct gfar_private *priv,
				       unsigned long fcb_addr)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
	       (fcb_addr % 0x20) > 0x18);
}

/* eTSEC76: csum generation for frames larger than 2500 may
 * cause excess delays before start of transmission
 */
static inline bool gfar_csum_errata_76(struct gfar_private *priv,
				       unsigned int len)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
	       (len > 2500));
}

J
Jan Ceuleers 已提交
2184 2185 2186
/* This is called by the kernel when a frame is ready for transmission.
 * It is pointed to by the dev->hard_start_xmit function pointer
 */
L
Linus Torvalds 已提交
2187 2188 2189
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2190
	struct gfar_priv_tx_q *tx_queue = NULL;
2191
	struct netdev_queue *txq;
2192
	struct gfar __iomem *regs = NULL;
2193
	struct txfcb *fcb = NULL;
2194
	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2195
	u32 lstatus;
2196 2197
	int i, rq = 0;
	int do_tstamp, do_csum, do_vlan;
D
Dai Haruki 已提交
2198
	u32 bufaddr;
A
Andy Fleming 已提交
2199
	unsigned long flags;
2200
	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2201 2202 2203 2204

	rq = skb->queue_mapping;
	tx_queue = priv->tx_queue[rq];
	txq = netdev_get_tx_queue(dev, rq);
2205
	base = tx_queue->tx_bd_base;
2206
	regs = tx_queue->grp->regs;
2207

2208 2209 2210 2211 2212 2213 2214 2215
	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
	do_vlan = vlan_tx_tag_present(skb);
	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		    priv->hwts_tx_en;

	if (do_csum || do_vlan)
		fcb_len = GMAC_FCB_LEN;

2216
	/* check if time stamp should be generated */
2217 2218
	if (unlikely(do_tstamp))
		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
D
Dai Haruki 已提交
2219

2220
	/* make space for additional header when fcb is needed */
2221
	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2222 2223
		struct sk_buff *skb_new;

2224
		skb_new = skb_realloc_headroom(skb, fcb_len);
2225 2226
		if (!skb_new) {
			dev->stats.tx_errors++;
2227
			dev_kfree_skb_any(skb);
2228 2229
			return NETDEV_TX_OK;
		}
2230

2231 2232
		if (skb->sk)
			skb_set_owner_w(skb_new, skb->sk);
2233
		dev_consume_skb_any(skb);
2234 2235 2236
		skb = skb_new;
	}

D
Dai Haruki 已提交
2237 2238 2239
	/* total number of fragments in the SKB */
	nr_frags = skb_shinfo(skb)->nr_frags;

2240 2241 2242 2243 2244 2245
	/* calculate the required number of TxBDs for this skb */
	if (unlikely(do_tstamp))
		nr_txbds = nr_frags + 2;
	else
		nr_txbds = nr_frags + 1;

D
Dai Haruki 已提交
2246
	/* check if there is space to queue this packet */
2247
	if (nr_txbds > tx_queue->num_txbdfree) {
D
Dai Haruki 已提交
2248
		/* no space, stop the queue */
2249
		netif_tx_stop_queue(txq);
D
Dai Haruki 已提交
2250 2251 2252
		dev->stats.tx_fifo_errors++;
		return NETDEV_TX_BUSY;
	}
L
Linus Torvalds 已提交
2253 2254

	/* Update transmit stats */
2255 2256 2257 2258
	bytes_sent = skb->len;
	tx_queue->stats.tx_bytes += bytes_sent;
	/* keep Tx bytes on wire for BQL accounting */
	GFAR_CB(skb)->bytes_sent = bytes_sent;
E
Eric Dumazet 已提交
2259
	tx_queue->stats.tx_packets++;
L
Linus Torvalds 已提交
2260

2261
	txbdp = txbdp_start = tx_queue->cur_tx;
2262 2263 2264 2265 2266
	lstatus = txbdp->lstatus;

	/* Time stamp insertion requires one additional TxBD */
	if (unlikely(do_tstamp))
		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2267
						 tx_queue->tx_ring_size);
L
Linus Torvalds 已提交
2268

D
Dai Haruki 已提交
2269
	if (nr_frags == 0) {
2270 2271
		if (unlikely(do_tstamp))
			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2272
							  TXBD_INTERRUPT);
2273 2274
		else
			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
D
Dai Haruki 已提交
2275 2276 2277
	} else {
		/* Place the fragment addresses and lengths into the TxBDs */
		for (i = 0; i < nr_frags; i++) {
2278
			unsigned int frag_len;
D
Dai Haruki 已提交
2279
			/* Point at the next BD, wrapping as needed */
2280
			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2281

2282
			frag_len = skb_shinfo(skb)->frags[i].size;
D
Dai Haruki 已提交
2283

2284
			lstatus = txbdp->lstatus | frag_len |
2285
				  BD_LFLAG(TXBD_READY);
D
Dai Haruki 已提交
2286 2287 2288 2289

			/* Handle the last BD specially */
			if (i == nr_frags - 1)
				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
L
Linus Torvalds 已提交
2290

2291
			bufaddr = skb_frag_dma_map(priv->dev,
2292 2293
						   &skb_shinfo(skb)->frags[i],
						   0,
2294
						   frag_len,
2295
						   DMA_TO_DEVICE);
D
Dai Haruki 已提交
2296 2297 2298 2299 2300 2301 2302 2303

			/* set the TxBD length and buffer pointer */
			txbdp->bufPtr = bufaddr;
			txbdp->lstatus = lstatus;
		}

		lstatus = txbdp_start->lstatus;
	}
L
Linus Torvalds 已提交
2304

2305 2306 2307 2308 2309 2310
	/* Add TxPAL between FCB and frame if required */
	if (unlikely(do_tstamp)) {
		skb_push(skb, GMAC_TXPAL_LEN);
		memset(skb->data, 0, GMAC_TXPAL_LEN);
	}

2311 2312
	/* Add TxFCB if required */
	if (fcb_len) {
2313
		fcb = gfar_add_fcb(skb);
2314
		lstatus |= BD_LFLAG(TXBD_TOE);
2315 2316 2317 2318 2319
	}

	/* Set up checksumming */
	if (do_csum) {
		gfar_tx_checksum(skb, fcb, fcb_len);
2320 2321 2322

		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2323 2324
			__skb_pull(skb, GMAC_FCB_LEN);
			skb_checksum_help(skb);
2325 2326 2327 2328 2329 2330 2331 2332
			if (do_vlan || do_tstamp) {
				/* put back a new fcb for vlan/tstamp TOE */
				fcb = gfar_add_fcb(skb);
			} else {
				/* Tx TOE not used */
				lstatus &= ~(BD_LFLAG(TXBD_TOE));
				fcb = NULL;
			}
2333
		}
2334 2335
	}

2336
	if (do_vlan)
2337
		gfar_tx_vlan(skb, fcb);
2338

2339 2340
	/* Setup tx hardware time stamping if requested */
	if (unlikely(do_tstamp)) {
2341
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2342 2343 2344
		fcb->ptp = 1;
	}

2345
	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2346
					     skb_headlen(skb), DMA_TO_DEVICE);
L
Linus Torvalds 已提交
2347

J
Jan Ceuleers 已提交
2348
	/* If time stamping is requested one additional TxBD must be set up. The
2349 2350 2351 2352 2353
	 * first TxBD points to the FCB and must have a data length of
	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
	 * the full frame length.
	 */
	if (unlikely(do_tstamp)) {
2354
		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2355
		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2356
					 (skb_headlen(skb) - fcb_len);
2357 2358 2359 2360
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
	} else {
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
	}
L
Linus Torvalds 已提交
2361

2362
	netdev_tx_sent_queue(txq, bytes_sent);
2363

J
Jan Ceuleers 已提交
2364
	/* We can work in parallel with gfar_clean_tx_ring(), except
A
Anton Vorontsov 已提交
2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376
	 * when modifying num_txbdfree. Note that we didn't grab the lock
	 * when we were reading the num_txbdfree and checking for available
	 * space, that's because outside of this function it can only grow,
	 * and once we've got needed space, it cannot suddenly disappear.
	 *
	 * The lock also protects us from gfar_error(), which can modify
	 * regs->tstat and thus retrigger the transfers, which is why we
	 * also must grab the lock before setting ready bit for the first
	 * to be transmitted BD.
	 */
	spin_lock_irqsave(&tx_queue->txlock, flags);

2377
	gfar_wmb();
2378

D
Dai Haruki 已提交
2379 2380
	txbdp_start->lstatus = lstatus;

2381
	gfar_wmb(); /* force lstatus write before tx_skbuff */
2382 2383 2384

	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;

D
Dai Haruki 已提交
2385
	/* Update the current skb pointer to the next entry we will use
J
Jan Ceuleers 已提交
2386 2387
	 * (wrapping if necessary)
	 */
2388
	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2389
			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2390

2391
	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2392 2393

	/* reduce TxBD free count */
2394
	tx_queue->num_txbdfree -= (nr_txbds);
L
Linus Torvalds 已提交
2395 2396

	/* If the next BD still needs to be cleaned up, then the bds
J
Jan Ceuleers 已提交
2397 2398
	 * are full.  We need to tell the kernel to stop sending us stuff.
	 */
2399
	if (!tx_queue->num_txbdfree) {
2400
		netif_tx_stop_queue(txq);
L
Linus Torvalds 已提交
2401

2402
		dev->stats.tx_fifo_errors++;
L
Linus Torvalds 已提交
2403 2404 2405
	}

	/* Tell the DMA to go go go */
2406
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
L
Linus Torvalds 已提交
2407 2408

	/* Unlock priv */
2409
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
2410

2411
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
2412 2413 2414 2415 2416 2417
}

/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2418

2419
	cancel_work_sync(&priv->reset_task);
L
Linus Torvalds 已提交
2420 2421
	stop_gfar(dev);

2422 2423 2424
	/* Disconnect from the PHY */
	phy_disconnect(priv->phydev);
	priv->phydev = NULL;
L
Linus Torvalds 已提交
2425

2426 2427
	gfar_free_irq(priv);

L
Linus Torvalds 已提交
2428 2429 2430 2431
	return 0;
}

/* Changes the mac address if the controller is not running. */
2432
static int gfar_set_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
2433
{
2434
	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
L
Linus Torvalds 已提交
2435 2436 2437 2438 2439 2440 2441

	return 0;
}

static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
	struct gfar_private *priv = netdev_priv(dev);
2442 2443
	int frame_size = new_mtu + ETH_HLEN;

L
Linus Torvalds 已提交
2444
	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2445
		netif_err(priv, drv, dev, "Invalid MTU setting\n");
L
Linus Torvalds 已提交
2446 2447 2448
		return -EINVAL;
	}

2449 2450 2451
	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
		cpu_relax();

2452
	if (dev->flags & IFF_UP)
L
Linus Torvalds 已提交
2453 2454 2455 2456
		stop_gfar(dev);

	dev->mtu = new_mtu;

2457
	if (dev->flags & IFF_UP)
L
Linus Torvalds 已提交
2458 2459
		startup_gfar(dev);

2460 2461
	clear_bit_unlock(GFAR_RESETTING, &priv->state);

L
Linus Torvalds 已提交
2462 2463 2464
	return 0;
}

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
void reset_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);

	while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
		cpu_relax();

	stop_gfar(ndev);
	startup_gfar(ndev);

	clear_bit_unlock(GFAR_RESETTING, &priv->state);
}

2478
/* gfar_reset_task gets scheduled when a packet has not been
L
Linus Torvalds 已提交
2479 2480
 * transmitted after a set amount of time.
 * For now, assume that clearing out all the structures, and
2481 2482 2483
 * starting over will fix the problem.
 */
static void gfar_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
2484
{
2485
	struct gfar_private *priv = container_of(work, struct gfar_private,
2486
						 reset_task);
2487
	reset_gfar(priv->ndev);
L
Linus Torvalds 已提交
2488 2489
}

2490 2491 2492 2493 2494 2495 2496 2497
static void gfar_timeout(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	dev->stats.tx_errors++;
	schedule_work(&priv->reset_task);
}

E
Eran Liberty 已提交
2498 2499 2500 2501 2502 2503
static void gfar_align_skb(struct sk_buff *skb)
{
	/* We need the data buffer to be aligned properly.  We will reserve
	 * as many bytes as needed to align the data properly
	 */
	skb_reserve(skb, RXBUF_ALIGNMENT -
2504
		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
E
Eran Liberty 已提交
2505 2506
}

L
Linus Torvalds 已提交
2507
/* Interrupt Handler for Transmit complete */
C
Claudiu Manoil 已提交
2508
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
2509
{
2510
	struct net_device *dev = tx_queue->dev;
2511
	struct netdev_queue *txq;
D
Dai Haruki 已提交
2512
	struct gfar_private *priv = netdev_priv(dev);
2513
	struct txbd8 *bdp, *next = NULL;
D
Dai Haruki 已提交
2514
	struct txbd8 *lbdp = NULL;
2515
	struct txbd8 *base = tx_queue->tx_bd_base;
D
Dai Haruki 已提交
2516 2517
	struct sk_buff *skb;
	int skb_dirtytx;
2518
	int tx_ring_size = tx_queue->tx_ring_size;
2519
	int frags = 0, nr_txbds = 0;
D
Dai Haruki 已提交
2520
	int i;
D
Dai Haruki 已提交
2521
	int howmany = 0;
2522 2523
	int tqi = tx_queue->qindex;
	unsigned int bytes_sent = 0;
D
Dai Haruki 已提交
2524
	u32 lstatus;
2525
	size_t buflen;
L
Linus Torvalds 已提交
2526

2527
	txq = netdev_get_tx_queue(dev, tqi);
2528 2529
	bdp = tx_queue->dirty_tx;
	skb_dirtytx = tx_queue->skb_dirtytx;
L
Linus Torvalds 已提交
2530

2531
	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
A
Anton Vorontsov 已提交
2532 2533
		unsigned long flags;

D
Dai Haruki 已提交
2534
		frags = skb_shinfo(skb)->nr_frags;
2535

J
Jan Ceuleers 已提交
2536
		/* When time stamping, one additional TxBD must be freed.
2537 2538
		 * Also, we need to dma_unmap_single() the TxPAL.
		 */
2539
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2540 2541 2542 2543 2544
			nr_txbds = frags + 2;
		else
			nr_txbds = frags + 1;

		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
L
Linus Torvalds 已提交
2545

D
Dai Haruki 已提交
2546
		lstatus = lbdp->lstatus;
L
Linus Torvalds 已提交
2547

D
Dai Haruki 已提交
2548 2549
		/* Only clean completed frames */
		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2550
		    (lstatus & BD_LENGTH_MASK))
D
Dai Haruki 已提交
2551 2552
			break;

2553
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2554
			next = next_txbd(bdp, base, tx_ring_size);
2555
			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2556 2557 2558
		} else
			buflen = bdp->length;

2559
		dma_unmap_single(priv->dev, bdp->bufPtr,
2560
				 buflen, DMA_TO_DEVICE);
2561

2562
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2563 2564
			struct skb_shared_hwtstamps shhwtstamps;
			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2565

2566 2567
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2568
			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2569 2570 2571 2572
			skb_tstamp_tx(skb, &shhwtstamps);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next;
		}
A
Andy Fleming 已提交
2573

D
Dai Haruki 已提交
2574 2575
		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
		bdp = next_txbd(bdp, base, tx_ring_size);
D
Dai Haruki 已提交
2576

D
Dai Haruki 已提交
2577
		for (i = 0; i < frags; i++) {
2578
			dma_unmap_page(priv->dev, bdp->bufPtr,
2579
				       bdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
2580 2581 2582
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next_txbd(bdp, base, tx_ring_size);
		}
L
Linus Torvalds 已提交
2583

2584
		bytes_sent += GFAR_CB(skb)->bytes_sent;
2585

E
Eric Dumazet 已提交
2586
		dev_kfree_skb_any(skb);
2587

2588
		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
D
Dai Haruki 已提交
2589

D
Dai Haruki 已提交
2590
		skb_dirtytx = (skb_dirtytx + 1) &
2591
			      TX_RING_MOD_MASK(tx_ring_size);
D
Dai Haruki 已提交
2592 2593

		howmany++;
A
Anton Vorontsov 已提交
2594
		spin_lock_irqsave(&tx_queue->txlock, flags);
2595
		tx_queue->num_txbdfree += nr_txbds;
A
Anton Vorontsov 已提交
2596
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
2597
	}
L
Linus Torvalds 已提交
2598

D
Dai Haruki 已提交
2599
	/* If we freed a buffer, we can restart transmission, if necessary */
2600 2601 2602 2603
	if (tx_queue->num_txbdfree &&
	    netif_tx_queue_stopped(txq) &&
	    !(test_bit(GFAR_DOWN, &priv->state)))
		netif_wake_subqueue(priv->ndev, tqi);
L
Linus Torvalds 已提交
2604

D
Dai Haruki 已提交
2605
	/* Update dirty indicators */
2606 2607
	tx_queue->skb_dirtytx = skb_dirtytx;
	tx_queue->dirty_tx = bdp;
L
Linus Torvalds 已提交
2608

2609
	netdev_tx_completed_queue(txq, howmany, bytes_sent);
D
Dai Haruki 已提交
2610 2611
}

2612
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2613
			   struct sk_buff *skb)
2614
{
2615
	struct net_device *dev = rx_queue->dev;
2616
	struct gfar_private *priv = netdev_priv(dev);
2617
	dma_addr_t buf;
2618

2619
	buf = dma_map_single(priv->dev, skb->data,
2620
			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2621
	gfar_init_rxbdp(rx_queue, bdp, buf);
2622 2623
}

2624
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
L
Linus Torvalds 已提交
2625 2626
{
	struct gfar_private *priv = netdev_priv(dev);
E
Eric Dumazet 已提交
2627
	struct sk_buff *skb;
L
Linus Torvalds 已提交
2628

E
Eran Liberty 已提交
2629
	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2630
	if (!skb)
L
Linus Torvalds 已提交
2631 2632
		return NULL;

E
Eran Liberty 已提交
2633
	gfar_align_skb(skb);
2634

E
Eran Liberty 已提交
2635 2636 2637
	return skb;
}

2638
struct sk_buff *gfar_new_skb(struct net_device *dev)
E
Eran Liberty 已提交
2639
{
E
Eric Dumazet 已提交
2640
	return gfar_alloc_skb(dev);
L
Linus Torvalds 已提交
2641 2642
}

2643
static inline void count_errors(unsigned short status, struct net_device *dev)
L
Linus Torvalds 已提交
2644
{
2645
	struct gfar_private *priv = netdev_priv(dev);
2646
	struct net_device_stats *stats = &dev->stats;
L
Linus Torvalds 已提交
2647 2648
	struct gfar_extra_stats *estats = &priv->extra_stats;

J
Jan Ceuleers 已提交
2649
	/* If the packet was truncated, none of the other errors matter */
L
Linus Torvalds 已提交
2650 2651 2652
	if (status & RXBD_TRUNCATED) {
		stats->rx_length_errors++;

2653
		atomic64_inc(&estats->rx_trunc);
L
Linus Torvalds 已提交
2654 2655 2656 2657 2658 2659 2660 2661

		return;
	}
	/* Count the errors, if there were any */
	if (status & (RXBD_LARGE | RXBD_SHORT)) {
		stats->rx_length_errors++;

		if (status & RXBD_LARGE)
2662
			atomic64_inc(&estats->rx_large);
L
Linus Torvalds 已提交
2663
		else
2664
			atomic64_inc(&estats->rx_short);
L
Linus Torvalds 已提交
2665 2666 2667
	}
	if (status & RXBD_NONOCTET) {
		stats->rx_frame_errors++;
2668
		atomic64_inc(&estats->rx_nonoctet);
L
Linus Torvalds 已提交
2669 2670
	}
	if (status & RXBD_CRCERR) {
2671
		atomic64_inc(&estats->rx_crcerr);
L
Linus Torvalds 已提交
2672 2673 2674
		stats->rx_crc_errors++;
	}
	if (status & RXBD_OVERRUN) {
2675
		atomic64_inc(&estats->rx_overrun);
L
Linus Torvalds 已提交
2676 2677 2678 2679
		stats->rx_crc_errors++;
	}
}

2680
irqreturn_t gfar_receive(int irq, void *grp_id)
L
Linus Torvalds 已提交
2681
{
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
	unsigned long flags;
	u32 imask;

	if (likely(napi_schedule_prep(&grp->napi_rx))) {
		spin_lock_irqsave(&grp->grplock, flags);
		imask = gfar_read(&grp->regs->imask);
		imask &= IMASK_RX_DISABLED;
		gfar_write(&grp->regs->imask, imask);
		spin_unlock_irqrestore(&grp->grplock, flags);
		__napi_schedule(&grp->napi_rx);
	} else {
		/* Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
		gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
	}

	return IRQ_HANDLED;
}

/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *grp_id)
{
	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
	unsigned long flags;
	u32 imask;

	if (likely(napi_schedule_prep(&grp->napi_tx))) {
		spin_lock_irqsave(&grp->grplock, flags);
		imask = gfar_read(&grp->regs->imask);
		imask &= IMASK_TX_DISABLED;
		gfar_write(&grp->regs->imask, imask);
		spin_unlock_irqrestore(&grp->grplock, flags);
		__napi_schedule(&grp->napi_tx);
	} else {
		/* Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
		gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
	}

L
Linus Torvalds 已提交
2724 2725 2726
	return IRQ_HANDLED;
}

2727 2728 2729 2730
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
	/* If valid headers were found, and valid sums
	 * were verified, then we tell the kernel that no
J
Jan Ceuleers 已提交
2731 2732
	 * checksumming is necessary.  Otherwise, it is [FIXME]
	 */
2733
	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2734 2735
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
2736
		skb_checksum_none_assert(skb);
2737 2738 2739
}


J
Jan Ceuleers 已提交
2740
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2741 2742
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi)
L
Linus Torvalds 已提交
2743 2744
{
	struct gfar_private *priv = netdev_priv(dev);
2745
	struct rxfcb *fcb = NULL;
L
Linus Torvalds 已提交
2746

2747 2748
	/* fcb is at the beginning if exists */
	fcb = (struct rxfcb *)skb->data;
2749

J
Jan Ceuleers 已提交
2750 2751 2752
	/* Remove the FCB from the skb
	 * Remove the padded bytes, if there are any
	 */
2753 2754
	if (amount_pull) {
		skb_record_rx_queue(skb, fcb->rq);
2755
		skb_pull(skb, amount_pull);
2756
	}
2757

2758 2759 2760 2761
	/* Get receive timestamp from the skb */
	if (priv->hwts_rx_en) {
		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
		u64 *ns = (u64 *) skb->data;
2762

2763 2764 2765 2766 2767 2768 2769
		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
	}

	if (priv->padding)
		skb_pull(skb, priv->padding);

2770
	if (dev->features & NETIF_F_RXCSUM)
2771
		gfar_rx_checksum(skb, fcb);
2772

2773 2774
	/* Tell the skb what kind of packet this is */
	skb->protocol = eth_type_trans(skb, dev);
L
Linus Torvalds 已提交
2775

2776
	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2777 2778 2779
	 * Even if vlan rx accel is disabled, on some chips
	 * RXFCB_VLN is pseudo randomly set.
	 */
2780
	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2781
	    fcb->flags & RXFCB_VLN)
2782
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
J
Jiri Pirko 已提交
2783

2784
	/* Send the packet up the stack */
2785
	napi_gro_receive(napi, skb);
2786

L
Linus Torvalds 已提交
2787 2788 2789
}

/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2790 2791
 * until the budget/quota has been reached. Returns the number
 * of frames handled
L
Linus Torvalds 已提交
2792
 */
2793
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
L
Linus Torvalds 已提交
2794
{
2795
	struct net_device *dev = rx_queue->dev;
2796
	struct rxbd8 *bdp, *base;
L
Linus Torvalds 已提交
2797
	struct sk_buff *skb;
2798 2799
	int pkt_len;
	int amount_pull;
L
Linus Torvalds 已提交
2800 2801 2802 2803
	int howmany = 0;
	struct gfar_private *priv = netdev_priv(dev);

	/* Get the first full descriptor */
2804 2805
	bdp = rx_queue->cur_rx;
	base = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
2806

2807
	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2808

L
Linus Torvalds 已提交
2809
	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2810
		struct sk_buff *newskb;
2811

2812
		rmb();
2813 2814 2815 2816

		/* Add another skb for the future */
		newskb = gfar_new_skb(dev);

2817
		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
L
Linus Torvalds 已提交
2818

2819
		dma_unmap_single(priv->dev, bdp->bufPtr,
2820
				 priv->rx_buffer_size, DMA_FROM_DEVICE);
A
Andy Fleming 已提交
2821

2822
		if (unlikely(!(bdp->status & RXBD_ERR) &&
2823
			     bdp->length > priv->rx_buffer_size))
2824 2825
			bdp->status = RXBD_LARGE;

2826 2827
		/* We drop the frame if we failed to allocate a new buffer */
		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2828
			     bdp->status & RXBD_ERR)) {
2829 2830 2831 2832
			count_errors(bdp->status, dev);

			if (unlikely(!newskb))
				newskb = skb;
E
Eran Liberty 已提交
2833
			else if (skb)
E
Eric Dumazet 已提交
2834
				dev_kfree_skb(skb);
2835
		} else {
L
Linus Torvalds 已提交
2836
			/* Increment the number of packets */
S
Sandeep Gopalpet 已提交
2837
			rx_queue->stats.rx_packets++;
L
Linus Torvalds 已提交
2838 2839
			howmany++;

2840 2841 2842 2843
			if (likely(skb)) {
				pkt_len = bdp->length - ETH_FCS_LEN;
				/* Remove the FCS from the packet length */
				skb_put(skb, pkt_len);
S
Sandeep Gopalpet 已提交
2844
				rx_queue->stats.rx_bytes += pkt_len;
2845
				skb_record_rx_queue(skb, rx_queue->qindex);
W
Wu Jiajun-B06378 已提交
2846
				gfar_process_frame(dev, skb, amount_pull,
2847
						   &rx_queue->grp->napi_rx);
2848 2849

			} else {
2850
				netif_warn(priv, rx_err, dev, "Missing skb!\n");
S
Sandeep Gopalpet 已提交
2851
				rx_queue->stats.rx_dropped++;
2852
				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2853
			}
L
Linus Torvalds 已提交
2854 2855 2856

		}

2857
		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
L
Linus Torvalds 已提交
2858

2859
		/* Setup the new bdp */
2860
		gfar_new_rxbdp(rx_queue, bdp, newskb);
L
Linus Torvalds 已提交
2861 2862

		/* Update to the next pointer */
2863
		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2864 2865

		/* update to point at the next skb */
2866 2867
		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2868 2869 2870
	}

	/* Update the current rxbd pointer to be the next one */
2871
	rx_queue->cur_rx = bdp;
L
Linus Torvalds 已提交
2872 2873 2874 2875

	return howmany;
}

2876
static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2877 2878
{
	struct gfar_priv_grp *gfargrp =
2879
		container_of(napi, struct gfar_priv_grp, napi_rx);
2880
	struct gfar __iomem *regs = gfargrp->regs;
2881
	struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2882 2883 2884 2885 2886
	int work_done = 0;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
2887
	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2888 2889 2890 2891

	work_done = gfar_clean_rx_ring(rx_queue, budget);

	if (work_done < budget) {
2892
		u32 imask;
2893 2894 2895 2896
		napi_complete(napi);
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);

2897 2898 2899 2900 2901
		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_RX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
2902 2903 2904 2905 2906
	}

	return work_done;
}

2907
static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
L
Linus Torvalds 已提交
2908
{
2909
	struct gfar_priv_grp *gfargrp =
2910 2911
		container_of(napi, struct gfar_priv_grp, napi_tx);
	struct gfar __iomem *regs = gfargrp->regs;
2912
	struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938
	u32 imask;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_TX_MASK);

	/* run Tx cleanup to completion */
	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
		gfar_clean_tx_ring(tx_queue);

	napi_complete(napi);

	spin_lock_irq(&gfargrp->grplock);
	imask = gfar_read(&regs->imask);
	imask |= IMASK_TX_DEFAULT;
	gfar_write(&regs->imask, imask);
	spin_unlock_irq(&gfargrp->grplock);

	return 0;
}

static int gfar_poll_rx(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi_rx);
2939
	struct gfar_private *priv = gfargrp->priv;
2940
	struct gfar __iomem *regs = gfargrp->regs;
2941
	struct gfar_priv_rx_q *rx_queue = NULL;
C
Claudiu Manoil 已提交
2942
	int work_done = 0, work_done_per_q = 0;
2943
	int i, budget_per_q = 0;
2944 2945
	unsigned long rstat_rxf;
	int num_act_queues;
2946

2947
	/* Clear IEVENT, so interrupts aren't called again
J
Jan Ceuleers 已提交
2948 2949
	 * because of the packets that have already arrived
	 */
2950
	gfar_write(&regs->ievent, IEVENT_RX_MASK);
2951

2952 2953 2954 2955 2956 2957
	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;

	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
	if (num_act_queues)
		budget_per_q = budget/num_act_queues;

2958 2959 2960 2961
	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
		/* skip queue if not active */
		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
			continue;
L
Linus Torvalds 已提交
2962

2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
		rx_queue = priv->rx_queue[i];
		work_done_per_q =
			gfar_clean_rx_ring(rx_queue, budget_per_q);
		work_done += work_done_per_q;

		/* finished processing this queue */
		if (work_done_per_q < budget_per_q) {
			/* clear active queue hw indication */
			gfar_write(&regs->rstat,
				   RSTAT_CLEAR_RXF0 >> i);
			num_act_queues--;

			if (!num_act_queues)
				break;
		}
	}
2979

2980 2981
	if (!num_act_queues) {
		u32 imask;
2982
		napi_complete(napi);
L
Linus Torvalds 已提交
2983

2984 2985
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);
L
Linus Torvalds 已提交
2986

2987 2988 2989 2990 2991
		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_RX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
L
Linus Torvalds 已提交
2992 2993
	}

C
Claudiu Manoil 已提交
2994
	return work_done;
L
Linus Torvalds 已提交
2995 2996
}

2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035
static int gfar_poll_tx(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi_tx);
	struct gfar_private *priv = gfargrp->priv;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_priv_tx_q *tx_queue = NULL;
	int has_tx_work = 0;
	int i;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_TX_MASK);

	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
		tx_queue = priv->tx_queue[i];
		/* run Tx cleanup to completion */
		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
			gfar_clean_tx_ring(tx_queue);
			has_tx_work = 1;
		}
	}

	if (!has_tx_work) {
		u32 imask;
		napi_complete(napi);

		spin_lock_irq(&gfargrp->grplock);
		imask = gfar_read(&regs->imask);
		imask |= IMASK_TX_DEFAULT;
		gfar_write(&regs->imask, imask);
		spin_unlock_irq(&gfargrp->grplock);
	}

	return 0;
}


3036
#ifdef CONFIG_NET_POLL_CONTROLLER
J
Jan Ceuleers 已提交
3037
/* Polling 'interrupt' - used by things like netconsole to send skbs
3038 3039 3040 3041 3042 3043
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void gfar_netpoll(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3044
	int i;
3045 3046

	/* If the device has multiple interrupts, run tx/rx */
3047
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3048
		for (i = 0; i < priv->num_grps; i++) {
3049 3050 3051 3052 3053 3054 3055 3056 3057
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			disable_irq(gfar_irq(grp, RX)->irq);
			disable_irq(gfar_irq(grp, ER)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, ER)->irq);
			enable_irq(gfar_irq(grp, RX)->irq);
			enable_irq(gfar_irq(grp, TX)->irq);
3058
		}
3059
	} else {
3060
		for (i = 0; i < priv->num_grps; i++) {
3061 3062 3063 3064 3065
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, TX)->irq);
3066
		}
3067 3068 3069 3070
	}
}
#endif

L
Linus Torvalds 已提交
3071
/* The interrupt handler for devices with one interrupt */
3072
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
L
Linus Torvalds 已提交
3073
{
3074
	struct gfar_priv_grp *gfargrp = grp_id;
L
Linus Torvalds 已提交
3075 3076

	/* Save ievent for future reference */
3077
	u32 events = gfar_read(&gfargrp->regs->ievent);
L
Linus Torvalds 已提交
3078 3079

	/* Check for reception */
3080
	if (events & IEVENT_RX_MASK)
3081
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3082 3083

	/* Check for transmit completion */
3084
	if (events & IEVENT_TX_MASK)
3085
		gfar_transmit(irq, grp_id);
L
Linus Torvalds 已提交
3086

3087 3088
	/* Check for errors */
	if (events & IEVENT_ERR_MASK)
3089
		gfar_error(irq, grp_id);
L
Linus Torvalds 已提交
3090 3091 3092 3093 3094 3095

	return IRQ_HANDLED;
}

/* Called every time the controller might need to be made
 * aware of new link state.  The PHY code conveys this
3096
 * information through variables in the phydev structure, and this
L
Linus Torvalds 已提交
3097 3098 3099 3100 3101 3102
 * function converts those variables into the appropriate
 * register values, and can bring down the device if needed.
 */
static void adjust_link(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3103 3104
	struct phy_device *phydev = priv->phydev;

3105 3106 3107 3108
	if (unlikely(phydev->link != priv->oldlink ||
		     phydev->duplex != priv->oldduplex ||
		     phydev->speed != priv->oldspeed))
		gfar_update_link_state(priv);
3109
}
L
Linus Torvalds 已提交
3110 3111 3112 3113

/* Update the hash table based on the current list of multicast
 * addresses we subscribe to.  Also, change the promiscuity of
 * the device based on the flags (this function is called
J
Jan Ceuleers 已提交
3114 3115
 * whenever dev->flags is changed
 */
L
Linus Torvalds 已提交
3116 3117
static void gfar_set_multi(struct net_device *dev)
{
3118
	struct netdev_hw_addr *ha;
L
Linus Torvalds 已提交
3119
	struct gfar_private *priv = netdev_priv(dev);
3120
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
3121 3122
	u32 tempval;

3123
	if (dev->flags & IFF_PROMISC) {
L
Linus Torvalds 已提交
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133
		/* Set RCTRL to PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval |= RCTRL_PROM;
		gfar_write(&regs->rctrl, tempval);
	} else {
		/* Set RCTRL to not PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval &= ~(RCTRL_PROM);
		gfar_write(&regs->rctrl, tempval);
	}
3134

3135
	if (dev->flags & IFF_ALLMULTI) {
L
Linus Torvalds 已提交
3136
		/* Set the hash to rx all multicast frames */
3137 3138 3139 3140 3141 3142 3143 3144
		gfar_write(&regs->igaddr0, 0xffffffff);
		gfar_write(&regs->igaddr1, 0xffffffff);
		gfar_write(&regs->igaddr2, 0xffffffff);
		gfar_write(&regs->igaddr3, 0xffffffff);
		gfar_write(&regs->igaddr4, 0xffffffff);
		gfar_write(&regs->igaddr5, 0xffffffff);
		gfar_write(&regs->igaddr6, 0xffffffff);
		gfar_write(&regs->igaddr7, 0xffffffff);
L
Linus Torvalds 已提交
3145 3146 3147 3148 3149 3150 3151 3152 3153
		gfar_write(&regs->gaddr0, 0xffffffff);
		gfar_write(&regs->gaddr1, 0xffffffff);
		gfar_write(&regs->gaddr2, 0xffffffff);
		gfar_write(&regs->gaddr3, 0xffffffff);
		gfar_write(&regs->gaddr4, 0xffffffff);
		gfar_write(&regs->gaddr5, 0xffffffff);
		gfar_write(&regs->gaddr6, 0xffffffff);
		gfar_write(&regs->gaddr7, 0xffffffff);
	} else {
3154 3155 3156
		int em_num;
		int idx;

L
Linus Torvalds 已提交
3157
		/* zero out the hash */
3158 3159 3160 3161 3162 3163 3164 3165
		gfar_write(&regs->igaddr0, 0x0);
		gfar_write(&regs->igaddr1, 0x0);
		gfar_write(&regs->igaddr2, 0x0);
		gfar_write(&regs->igaddr3, 0x0);
		gfar_write(&regs->igaddr4, 0x0);
		gfar_write(&regs->igaddr5, 0x0);
		gfar_write(&regs->igaddr6, 0x0);
		gfar_write(&regs->igaddr7, 0x0);
L
Linus Torvalds 已提交
3166 3167 3168 3169 3170 3171 3172 3173 3174
		gfar_write(&regs->gaddr0, 0x0);
		gfar_write(&regs->gaddr1, 0x0);
		gfar_write(&regs->gaddr2, 0x0);
		gfar_write(&regs->gaddr3, 0x0);
		gfar_write(&regs->gaddr4, 0x0);
		gfar_write(&regs->gaddr5, 0x0);
		gfar_write(&regs->gaddr6, 0x0);
		gfar_write(&regs->gaddr7, 0x0);

3175 3176
		/* If we have extended hash tables, we need to
		 * clear the exact match registers to prepare for
J
Jan Ceuleers 已提交
3177 3178
		 * setting them
		 */
3179 3180 3181 3182 3183 3184 3185 3186 3187
		if (priv->extended_hash) {
			em_num = GFAR_EM_NUM + 1;
			gfar_clear_exact_match(dev);
			idx = 1;
		} else {
			idx = 0;
			em_num = 0;
		}

3188
		if (netdev_mc_empty(dev))
L
Linus Torvalds 已提交
3189 3190 3191
			return;

		/* Parse the list, and set the appropriate bits */
3192
		netdev_for_each_mc_addr(ha, dev) {
3193
			if (idx < em_num) {
3194
				gfar_set_mac_for_addr(dev, idx, ha->addr);
3195 3196
				idx++;
			} else
3197
				gfar_set_hash_for_addr(dev, ha->addr);
L
Linus Torvalds 已提交
3198 3199 3200 3201
		}
	}
}

3202 3203

/* Clears each of the exact match registers to zero, so they
J
Jan Ceuleers 已提交
3204 3205
 * don't interfere with normal reception
 */
3206 3207 3208
static void gfar_clear_exact_match(struct net_device *dev)
{
	int idx;
3209
	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3210

3211
	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
J
Joe Perches 已提交
3212
		gfar_set_mac_for_addr(dev, idx, zero_arr);
3213 3214
}

L
Linus Torvalds 已提交
3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
J
Jan Ceuleers 已提交
3227 3228
 * the entry.
 */
L
Linus Torvalds 已提交
3229 3230 3231 3232
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
3233
	u32 result = ether_crc(ETH_ALEN, addr);
3234 3235 3236
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
L
Linus Torvalds 已提交
3237 3238
	u32 value = (1 << (31-whichbit));

3239
	tempval = gfar_read(priv->hash_regs[whichreg]);
L
Linus Torvalds 已提交
3240
	tempval |= value;
3241
	gfar_write(priv->hash_regs[whichreg], tempval);
L
Linus Torvalds 已提交
3242 3243
}

3244 3245 3246 3247

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
J
Joe Perches 已提交
3248 3249
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr)
3250 3251
{
	struct gfar_private *priv = netdev_priv(dev);
3252
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3253
	u32 tempval;
3254
	u32 __iomem *macptr = &regs->macstnaddr1;
3255 3256 3257

	macptr += num*2;

3258 3259 3260
	/* For a station address of 0x12345678ABCD in transmission
	 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
	 * MACnADDR2 is set to 0x34120000.
J
Jan Ceuleers 已提交
3261
	 */
3262 3263
	tempval = (addr[5] << 24) | (addr[4] << 16) |
		  (addr[3] << 8)  |  addr[2];
3264

3265
	gfar_write(macptr, tempval);
3266

3267
	tempval = (addr[1] << 24) | (addr[0] << 16);
3268 3269 3270 3271

	gfar_write(macptr+1, tempval);
}

L
Linus Torvalds 已提交
3272
/* GFAR error interrupt handler */
3273
static irqreturn_t gfar_error(int irq, void *grp_id)
L
Linus Torvalds 已提交
3274
{
3275 3276 3277 3278
	struct gfar_priv_grp *gfargrp = grp_id;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_private *priv= gfargrp->priv;
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
3279 3280

	/* Save ievent for future reference */
3281
	u32 events = gfar_read(&regs->ievent);
L
Linus Torvalds 已提交
3282 3283

	/* Clear IEVENT */
3284
	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3285 3286

	/* Magic Packet is not an error. */
3287
	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3288 3289
	    (events & IEVENT_MAG))
		events &= ~IEVENT_MAG;
L
Linus Torvalds 已提交
3290 3291

	/* Hmm... */
3292
	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3293 3294
		netdev_dbg(dev,
			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3295
			   events, gfar_read(&regs->imask));
L
Linus Torvalds 已提交
3296 3297 3298

	/* Update the error counters */
	if (events & IEVENT_TXE) {
3299
		dev->stats.tx_errors++;
L
Linus Torvalds 已提交
3300 3301

		if (events & IEVENT_LC)
3302
			dev->stats.tx_window_errors++;
L
Linus Torvalds 已提交
3303
		if (events & IEVENT_CRL)
3304
			dev->stats.tx_aborted_errors++;
L
Linus Torvalds 已提交
3305
		if (events & IEVENT_XFUN) {
3306 3307
			unsigned long flags;

3308 3309
			netif_dbg(priv, tx_err, dev,
				  "TX FIFO underrun, packet dropped\n");
3310
			dev->stats.tx_dropped++;
3311
			atomic64_inc(&priv->extra_stats.tx_underrun);
L
Linus Torvalds 已提交
3312

3313 3314 3315
			local_irq_save(flags);
			lock_tx_qs(priv);

L
Linus Torvalds 已提交
3316
			/* Reactivate the Tx Queues */
3317
			gfar_write(&regs->tstat, gfargrp->tstat);
3318 3319 3320

			unlock_tx_qs(priv);
			local_irq_restore(flags);
L
Linus Torvalds 已提交
3321
		}
3322
		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
L
Linus Torvalds 已提交
3323 3324
	}
	if (events & IEVENT_BSY) {
3325
		dev->stats.rx_errors++;
3326
		atomic64_inc(&priv->extra_stats.rx_bsy);
L
Linus Torvalds 已提交
3327

3328
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3329

3330 3331
		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
			  gfar_read(&regs->rstat));
L
Linus Torvalds 已提交
3332 3333
	}
	if (events & IEVENT_BABR) {
3334
		dev->stats.rx_errors++;
3335
		atomic64_inc(&priv->extra_stats.rx_babr);
L
Linus Torvalds 已提交
3336

3337
		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
L
Linus Torvalds 已提交
3338 3339
	}
	if (events & IEVENT_EBERR) {
3340
		atomic64_inc(&priv->extra_stats.eberr);
3341
		netif_dbg(priv, rx_err, dev, "bus error\n");
L
Linus Torvalds 已提交
3342
	}
3343 3344
	if (events & IEVENT_RXC)
		netif_dbg(priv, rx_status, dev, "control frame\n");
L
Linus Torvalds 已提交
3345 3346

	if (events & IEVENT_BABT) {
3347
		atomic64_inc(&priv->extra_stats.tx_babt);
3348
		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
L
Linus Torvalds 已提交
3349 3350 3351 3352
	}
	return IRQ_HANDLED;
}

3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
	struct phy_device *phydev = priv->phydev;
	u32 val = 0;

	if (!phydev->duplex)
		return val;

	if (!priv->pause_aneg_en) {
		if (priv->tx_pause_en)
			val |= MACCFG1_TX_FLOW;
		if (priv->rx_pause_en)
			val |= MACCFG1_RX_FLOW;
	} else {
		u16 lcl_adv, rmt_adv;
		u8 flowctrl;
		/* get link partner capabilities */
		rmt_adv = 0;
		if (phydev->pause)
			rmt_adv = LPA_PAUSE_CAP;
		if (phydev->asym_pause)
			rmt_adv |= LPA_PAUSE_ASYM;

		lcl_adv = mii_advertise_flowctrl(phydev->advertising);

		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
		if (flowctrl & FLOW_CTRL_TX)
			val |= MACCFG1_TX_FLOW;
		if (flowctrl & FLOW_CTRL_RX)
			val |= MACCFG1_RX_FLOW;
	}

	return val;
}

static noinline void gfar_update_link_state(struct gfar_private *priv)
{
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
	struct phy_device *phydev = priv->phydev;

	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
		return;

	if (phydev->link) {
		u32 tempval1 = gfar_read(&regs->maccfg1);
		u32 tempval = gfar_read(&regs->maccfg2);
		u32 ecntrl = gfar_read(&regs->ecntrl);

		if (phydev->duplex != priv->oldduplex) {
			if (!(phydev->duplex))
				tempval &= ~(MACCFG2_FULL_DUPLEX);
			else
				tempval |= MACCFG2_FULL_DUPLEX;

			priv->oldduplex = phydev->duplex;
		}

		if (phydev->speed != priv->oldspeed) {
			switch (phydev->speed) {
			case 1000:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);

				ecntrl &= ~(ECNTRL_R100);
				break;
			case 100:
			case 10:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);

				/* Reduced mode distinguishes
				 * between 10 and 100
				 */
				if (phydev->speed == SPEED_100)
					ecntrl |= ECNTRL_R100;
				else
					ecntrl &= ~(ECNTRL_R100);
				break;
			default:
				netif_warn(priv, link, priv->ndev,
					   "Ack!  Speed (%d) is not 10/100/1000!\n",
					   phydev->speed);
				break;
			}

			priv->oldspeed = phydev->speed;
		}

		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
		tempval1 |= gfar_get_flowctrl_cfg(priv);

		gfar_write(&regs->maccfg1, tempval1);
		gfar_write(&regs->maccfg2, tempval);
		gfar_write(&regs->ecntrl, ecntrl);

		if (!priv->oldlink)
			priv->oldlink = 1;

	} else if (priv->oldlink) {
		priv->oldlink = 0;
		priv->oldspeed = 0;
		priv->oldduplex = -1;
	}

	if (netif_msg_link(priv))
		phy_print_status(phydev);
}

3461 3462 3463 3464 3465 3466
static struct of_device_id gfar_match[] =
{
	{
		.type = "network",
		.compatible = "gianfar",
	},
3467 3468 3469
	{
		.compatible = "fsl,etsec2",
	},
3470 3471
	{},
};
3472
MODULE_DEVICE_TABLE(of, gfar_match);
3473

L
Linus Torvalds 已提交
3474
/* Structure for a device driver */
3475
static struct platform_driver gfar_driver = {
3476 3477 3478 3479 3480 3481
	.driver = {
		.name = "fsl-gianfar",
		.owner = THIS_MODULE,
		.pm = GFAR_PM_OPS,
		.of_match_table = gfar_match,
	},
L
Linus Torvalds 已提交
3482 3483 3484 3485
	.probe = gfar_probe,
	.remove = gfar_remove,
};

3486
module_platform_driver(gfar_driver);