gianfar.c 87.1 KB
Newer Older
1
/*
2
 * drivers/net/ethernet/freescale/gianfar.c
L
Linus Torvalds 已提交
3 4
 *
 * Gianfar Ethernet Driver
5 6
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
L
Linus Torvalds 已提交
7 8 9
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
10
 * Maintainer: Kumar Gala
11
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
L
Linus Torvalds 已提交
12
 *
W
Wu Jiajun-B06378 已提交
13
 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14
 * Copyright 2007 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
28
 *
29 30
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
L
Linus Torvalds 已提交
31 32 33
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
34 35
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
L
Linus Torvalds 已提交
36 37 38
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
39
 *  IEVENT register is set, triggering an interrupt when the
L
Linus Torvalds 已提交
40 41 42
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
43
 *  of frames or amount of time have passed).  In NAPI, the
L
Linus Torvalds 已提交
44
 *  interrupt handler will signal there is work to be done, and
45
 *  exit. This method will start at the last known empty
46
 *  descriptor, and process every subsequent descriptor until there
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

65 66 67
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

L
Linus Torvalds 已提交
68 69 70
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
71
#include <linux/unistd.h>
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
79
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
80 81
#include <linux/spinlock.h>
#include <linux/mm.h>
82
#include <linux/of_mdio.h>
83
#include <linux/of_platform.h>
84 85 86
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
K
Kumar Gala 已提交
87
#include <linux/in.h>
88
#include <linux/net_tstamp.h>
L
Linus Torvalds 已提交
89 90

#include <asm/io.h>
91
#include <asm/reg.h>
L
Linus Torvalds 已提交
92 93 94 95 96
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
97 98
#include <linux/mii.h>
#include <linux/phy.h>
99 100
#include <linux/phy_fixed.h>
#include <linux/of.h>
101
#include <linux/of_net.h>
L
Linus Torvalds 已提交
102 103

#include "gianfar.h"
104
#include "fsl_pq_mdio.h"
L
Linus Torvalds 已提交
105 106 107

#define TX_TIMEOUT      (1*HZ)

108
const char gfar_driver_version[] = "1.3";
L
Linus Torvalds 已提交
109 110 111

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112
static void gfar_reset_task(struct work_struct *work);
L
Linus Torvalds 已提交
113 114
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
115
struct sk_buff *gfar_new_skb(struct net_device *dev);
116
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117
		struct sk_buff *skb);
L
Linus Torvalds 已提交
118 119
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 121 122
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
L
Linus Torvalds 已提交
123 124 125
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
126
static int gfar_probe(struct platform_device *ofdev);
127
static int gfar_remove(struct platform_device *ofdev);
128
static void free_skb_resources(struct gfar_private *priv);
L
Linus Torvalds 已提交
129 130
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
K
Kapil Juneja 已提交
131
static void gfar_configure_serdes(struct net_device *dev);
132
static int gfar_poll(struct napi_struct *napi, int budget);
133 134 135
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
136 137
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
138 139
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			      int amount_pull);
140
void gfar_halt(struct net_device *dev);
141
static void gfar_halt_nodisable(struct net_device *dev);
142 143
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
J
Joe Perches 已提交
144 145
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr);
146
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
L
Linus Torvalds 已提交
147 148 149 150 151

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

152
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 154 155 156 157 158 159
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 162 163 164 165 166 167
		lstatus |= BD_LFLAG(RXBD_WRAP);

	eieio();

	bdp->lstatus = lstatus;
}

168
static int gfar_init_bds(struct net_device *ndev)
169
{
170
	struct gfar_private *priv = netdev_priv(ndev);
171 172
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
173 174
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
175
	int i, j;
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		/* Initialize some variables in our dev structure */
		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
		tx_queue->dirty_tx = tx_queue->tx_bd_base;
		tx_queue->cur_tx = tx_queue->tx_bd_base;
		tx_queue->skb_curtx = 0;
		tx_queue->skb_dirtytx = 0;

		/* Initialize Transmit Descriptor Ring */
		txbdp = tx_queue->tx_bd_base;
		for (j = 0; j < tx_queue->tx_ring_size; j++) {
			txbdp->lstatus = 0;
			txbdp->bufPtr = 0;
			txbdp++;
		}
193

194 195 196
		/* Set the last descriptor in the ring to indicate wrap */
		txbdp--;
		txbdp->status |= TXBD_WRAP;
197 198
	}

199 200 201 202 203
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->cur_rx = rx_queue->rx_bd_base;
		rx_queue->skb_currx = 0;
		rxbdp = rx_queue->rx_bd_base;
204

205 206
		for (j = 0; j < rx_queue->rx_ring_size; j++) {
			struct sk_buff *skb = rx_queue->rx_skbuff[j];
207

208 209 210 211 212 213
			if (skb) {
				gfar_init_rxbdp(rx_queue, rxbdp,
						rxbdp->bufPtr);
			} else {
				skb = gfar_new_skb(ndev);
				if (!skb) {
214
					netdev_err(ndev, "Can't allocate RX buffers\n");
215 216 217 218 219
					goto err_rxalloc_fail;
				}
				rx_queue->rx_skbuff[j] = skb;

				gfar_new_rxbdp(rx_queue, rxbdp, skb);
220 221
			}

222
			rxbdp++;
223 224 225 226 227
		}

	}

	return 0;
228 229 230 231

err_rxalloc_fail:
	free_skb_resources(priv);
	return -ENOMEM;
232 233 234 235
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
236
	void *vaddr;
237 238
	dma_addr_t addr;
	int i, j, k;
239 240
	struct gfar_private *priv = netdev_priv(ndev);
	struct device *dev = &priv->ofdev->dev;
241 242 243
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

244 245 246 247 248 249 250
	priv->total_tx_ring_size = 0;
	for (i = 0; i < priv->num_tx_queues; i++)
		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;

	priv->total_rx_ring_size = 0;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
251 252

	/* Allocate memory for the buffer descriptors */
253
	vaddr = dma_alloc_coherent(dev,
254 255 256
			sizeof(struct txbd8) * priv->total_tx_ring_size +
			sizeof(struct rxbd8) * priv->total_rx_ring_size,
			&addr, GFP_KERNEL);
257
	if (!vaddr) {
258 259
		netif_err(priv, ifup, ndev,
			  "Could not allocate buffer descriptors!\n");
260 261 262
		return -ENOMEM;
	}

263 264
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
265
		tx_queue->tx_bd_base = vaddr;
266 267 268 269 270 271
		tx_queue->tx_bd_dma_base = addr;
		tx_queue->dev = ndev;
		/* enet DMA only understands physical addresses */
		addr    += sizeof(struct txbd8) *tx_queue->tx_ring_size;
		vaddr   += sizeof(struct txbd8) *tx_queue->tx_ring_size;
	}
272 273

	/* Start the rx descriptor ring where the tx ring leaves off */
274 275
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
276
		rx_queue->rx_bd_base = vaddr;
277 278 279 280 281
		rx_queue->rx_bd_dma_base = addr;
		rx_queue->dev = ndev;
		addr    += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
		vaddr   += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
	}
282 283

	/* Setup the skbuff rings */
284 285 286
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
287
				  tx_queue->tx_ring_size, GFP_KERNEL);
288
		if (!tx_queue->tx_skbuff) {
289 290
			netif_err(priv, ifup, ndev,
				  "Could not allocate tx_skbuff\n");
291 292
			goto cleanup;
		}
293

294 295 296
		for (k = 0; k < tx_queue->tx_ring_size; k++)
			tx_queue->tx_skbuff[k] = NULL;
	}
297

298 299 300
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
301
				  rx_queue->rx_ring_size, GFP_KERNEL);
302

303
		if (!rx_queue->rx_skbuff) {
304 305
			netif_err(priv, ifup, ndev,
				  "Could not allocate rx_skbuff\n");
306 307 308 309 310 311
			goto cleanup;
		}

		for (j = 0; j < rx_queue->rx_ring_size; j++)
			rx_queue->rx_skbuff[j] = NULL;
	}
312

313 314
	if (gfar_init_bds(ndev))
		goto cleanup;
315 316 317 318 319 320 321 322

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

323 324
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
325
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
326
	u32 __iomem *baddr;
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	int i;

	baddr = &regs->tbase0;
	for(i = 0; i < priv->num_tx_queues; i++) {
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
		baddr	+= 2;
	}

	baddr = &regs->rbase0;
	for(i = 0; i < priv->num_rx_queues; i++) {
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
		baddr   += 2;
	}
}

342 343 344
static void gfar_init_mac(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
345
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
346 347 348 349
	u32 rctrl = 0;
	u32 tctrl = 0;
	u32 attrs = 0;

350 351
	/* write the tx/rx base registers */
	gfar_init_tx_rx_base(priv);
352

353
	/* Configure the coalescing support */
354
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
355

S
Sandeep Gopalpet 已提交
356
	if (priv->rx_filer_enable) {
357
		rctrl |= RCTRL_FILREN;
S
Sandeep Gopalpet 已提交
358 359 360
		/* Program the RIR0 reg with the required distribution */
		gfar_write(&regs->rir0, DEFAULT_RIR0);
	}
361

362
	if (ndev->features & NETIF_F_RXCSUM)
363 364 365 366 367 368 369 370 371 372 373 374 375 376
		rctrl |= RCTRL_CHECKSUMMING;

	if (priv->extended_hash) {
		rctrl |= RCTRL_EXTHASH;

		gfar_clear_exact_match(ndev);
		rctrl |= RCTRL_EMEN;
	}

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

377 378 379
	/* Insert receive time stamps into padding alignment bytes */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
		rctrl &= ~RCTRL_PAL_MASK;
380
		rctrl |= RCTRL_PADDING(8);
381 382 383
		priv->padding = 8;
	}

384 385 386 387
	/* Enable HW time stamping if requested from user space */
	if (priv->hwts_rx_en)
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;

J
Jiri Pirko 已提交
388
	if (ndev->features & NETIF_F_HW_VLAN_RX)
389
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
390 391 392 393 394 395 396

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);

	if (ndev->features & NETIF_F_IP_CSUM)
		tctrl |= TCTRL_INIT_CSUM;

397 398
	tctrl |= TCTRL_TXSCHED_PRIO;

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	gfar_write(&regs->tctrl, tctrl);

	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing or locking
	 * depending on the approprate variables */
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}

S
Sandeep Gopalpet 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
	int i = 0;

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
		rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
	dev->stats.rx_bytes = rx_bytes;
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
E
Eric Dumazet 已提交
442 443
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
S
Sandeep Gopalpet 已提交
444 445 446 447 448 449 450 451
	}

	dev->stats.tx_bytes = tx_bytes;
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

452 453 454 455 456
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
457
	.ndo_set_features = gfar_set_features,
458
	.ndo_set_rx_mode = gfar_set_multi,
459 460
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
S
Sandeep Gopalpet 已提交
461
	.ndo_get_stats = gfar_get_stats,
462 463
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
464 465 466 467 468
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
void lock_rx_qs(struct gfar_private *priv)
{
	int i = 0x0;

	for (i = 0; i < priv->num_rx_queues; i++)
		spin_lock(&priv->rx_queue[i]->rxlock);
}

void lock_tx_qs(struct gfar_private *priv)
{
	int i = 0x0;

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_lock(&priv->tx_queue[i]->txlock);
}

void unlock_rx_qs(struct gfar_private *priv)
{
	int i = 0x0;

	for (i = 0; i < priv->num_rx_queues; i++)
		spin_unlock(&priv->rx_queue[i]->rxlock);
}

void unlock_tx_qs(struct gfar_private *priv)
{
	int i = 0x0;

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_unlock(&priv->tx_queue[i]->txlock);
}

J
Jiri Pirko 已提交
501 502 503 504 505 506
static bool gfar_is_vlan_on(struct gfar_private *priv)
{
	return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
	       (priv->ndev->features & NETIF_F_HW_VLAN_TX);
}

507 508
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
509
{
J
Jiri Pirko 已提交
510 511
	return gfar_is_vlan_on(priv) ||
		(priv->ndev->features & NETIF_F_RXCSUM) ||
512
		(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
513
}
514

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
static void free_tx_pointers(struct gfar_private *priv)
{
	int i = 0;

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

static void free_rx_pointers(struct gfar_private *priv)
{
	int i = 0;

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
static void unmap_group_regs(struct gfar_private *priv)
{
	int i = 0;

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

static void disable_napi(struct gfar_private *priv)
{
	int i = 0;

	for (i = 0; i < priv->num_grps; i++)
		napi_disable(&priv->gfargrp[i].napi);
}

static void enable_napi(struct gfar_private *priv)
{
	int i = 0;

	for (i = 0; i < priv->num_grps; i++)
		napi_enable(&priv->gfargrp[i].napi);
}

static int gfar_parse_group(struct device_node *np,
		struct gfar_private *priv, const char *model)
{
	u32 *queue_mask;

561
	priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
562 563 564 565 566 567 568 569 570 571 572 573
	if (!priv->gfargrp[priv->num_grps].regs)
		return -ENOMEM;

	priv->gfargrp[priv->num_grps].interruptTransmit =
			irq_of_parse_and_map(np, 0);

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
		priv->gfargrp[priv->num_grps].interruptReceive =
			irq_of_parse_and_map(np, 1);
		priv->gfargrp[priv->num_grps].interruptError =
			irq_of_parse_and_map(np,2);
N
Nicolas Kaiser 已提交
574 575 576
		if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
		    priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
		    priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
			return -EINVAL;
	}

	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
	priv->gfargrp[priv->num_grps].priv = priv;
	spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
	if(priv->mode == MQ_MG_MODE) {
		queue_mask = (u32 *)of_get_property(np,
					"fsl,rx-bit-map", NULL);
		priv->gfargrp[priv->num_grps].rx_bit_map =
			queue_mask ?  *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
		queue_mask = (u32 *)of_get_property(np,
					"fsl,tx-bit-map", NULL);
		priv->gfargrp[priv->num_grps].tx_bit_map =
			queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
	} else {
		priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
		priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
	}
	priv->num_grps++;

	return 0;
}

601
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
602 603 604 605
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
606 607 608
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
609
	struct device_node *np = ofdev->dev.of_node;
610
	struct device_node *child = NULL;
A
Andy Fleming 已提交
611 612 613
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
614 615
	unsigned int num_tx_qs, num_rx_qs;
	u32 *tx_queues, *rx_queues;
616 617 618 619

	if (!np || !of_device_is_available(np))
		return -ENODEV;

620 621 622 623 624
	/* parse the num of tx and rx queues */
	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
	num_tx_qs = tx_queues ? *tx_queues : 1;

	if (num_tx_qs > MAX_TX_QS) {
625 626 627
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
628 629 630 631 632 633 634
		return -EINVAL;
	}

	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
	num_rx_qs = rx_queues ? *rx_queues : 1;

	if (num_rx_qs > MAX_RX_QS) {
635 636 637
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
638 639 640 641 642 643 644 645 646
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
647
	priv->node = ofdev->dev.of_node;
648 649 650
	priv->ndev = dev;

	priv->num_tx_queues = num_tx_qs;
651
	netif_set_real_num_rx_queues(dev, num_rx_qs);
652
	priv->num_rx_queues = num_rx_qs;
653
	priv->num_grps = 0x0;
654

S
Sebastian Poehn 已提交
655 656 657 658 659
	/* Init Rx queue filer rule set linked list*/
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

660 661
	model = of_get_property(np, "model", NULL);

662 663
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
664

665 666 667 668 669 670 671
	/* Parse and initialize group specific information */
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		priv->mode = MQ_MG_MODE;
		for_each_child_of_node(np, child) {
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
672
		}
673 674 675 676 677
	} else {
		priv->mode = SQ_SG_MODE;
		err = gfar_parse_group(np, priv, model);
		if(err)
			goto err_grp_init;
678 679
	}

680 681 682 683 684 685
	for (i = 0; i < priv->num_tx_queues; i++)
	       priv->tx_queue[i] = NULL;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->rx_queue[i] = NULL;

	for (i = 0; i < priv->num_tx_queues; i++) {
686 687
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
688 689 690 691 692 693 694 695 696 697 698
		if (!priv->tx_queue[i]) {
			err = -ENOMEM;
			goto tx_alloc_failed;
		}
		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = dev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
699 700
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
701 702 703 704 705 706 707 708 709 710 711
		if (!priv->rx_queue[i]) {
			err = -ENOMEM;
			goto rx_alloc_failed;
		}
		priv->rx_queue[i]->rx_skbuff = NULL;
		priv->rx_queue[i]->qindex = i;
		priv->rx_queue[i]->dev = dev;
		spin_lock_init(&(priv->rx_queue[i]->rxlock));
	}


A
Andy Fleming 已提交
712 713
	stash = of_get_property(np, "bd-stash", NULL);

714
	if (stash) {
A
Andy Fleming 已提交
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

732 733
	mac_addr = of_get_mac_address(np);
	if (mac_addr)
734
		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
735 736 737 738 739 740 741 742 743 744 745 746 747

	if (model && !strcasecmp(model, "TSEC"))
		priv->device_flags =
			FSL_GIANFAR_DEV_HAS_GIGABIT |
			FSL_GIANFAR_DEV_HAS_COALESCE |
			FSL_GIANFAR_DEV_HAS_RMON |
			FSL_GIANFAR_DEV_HAS_MULTI_INTR;
	if (model && !strcasecmp(model, "eTSEC"))
		priv->device_flags =
			FSL_GIANFAR_DEV_HAS_GIGABIT |
			FSL_GIANFAR_DEV_HAS_COALESCE |
			FSL_GIANFAR_DEV_HAS_RMON |
			FSL_GIANFAR_DEV_HAS_MULTI_INTR |
748
			FSL_GIANFAR_DEV_HAS_PADDING |
749 750 751
			FSL_GIANFAR_DEV_HAS_CSUM |
			FSL_GIANFAR_DEV_HAS_VLAN |
			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
752 753
			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
			FSL_GIANFAR_DEV_HAS_TIMER;
754 755 756 757 758 759 760 761 762 763 764 765

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

766
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
767 768

	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
769
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
770 771 772

	return 0;

773 774 775 776
rx_alloc_failed:
	free_rx_pointers(priv);
tx_alloc_failed:
	free_tx_pointers(priv);
777 778
err_grp_init:
	unmap_group_regs(priv);
779
	free_netdev(dev);
780 781 782
	return err;
}

783 784 785 786 787 788 789 790 791 792 793 794 795
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
			struct ifreq *ifr, int cmd)
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

796 797 798 799 800 801 802 803 804 805
	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->hwts_tx_en = 0;
		break;
	case HWTSTAMP_TX_ON:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
		priv->hwts_tx_en = 1;
		break;
	default:
806
		return -ERANGE;
807
	}
808 809 810

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
811 812 813 814 815
		if (priv->hwts_rx_en) {
			stop_gfar(netdev);
			priv->hwts_rx_en = 0;
			startup_gfar(netdev);
		}
816 817 818 819
		break;
	default:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
820 821 822 823 824
		if (!priv->hwts_rx_en) {
			stop_gfar(netdev);
			priv->hwts_rx_en = 1;
			startup_gfar(netdev);
		}
825 826 827 828 829 830 831 832
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
	}

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

833 834 835 836 837 838 839 840
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

841 842 843
	if (cmd == SIOCSHWTSTAMP)
		return gfar_hwtstamp_ioctl(dev, rq, cmd);

844 845 846
	if (!priv->phydev)
		return -ENODEV;

847
	return phy_mii_ioctl(priv->phydev, rq, cmd);
848 849
}

850 851 852 853 854 855 856 857 858 859 860
static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
	unsigned int new_bit_map = 0x0;
	int mask = 0x1 << (max_qs - 1), i;
	for (i = 0; i < max_qs; i++) {
		if (bit_map & mask)
			new_bit_map = new_bit_map + (1 << i);
		mask = mask >> 0x1;
	}
	return new_bit_map;
}
861

862 863
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
864 865 866 867 868 869
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
W
Wu Jiajun-B06378 已提交
870 871
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
872 873 874 875
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
W
Wu Jiajun-B06378 已提交
876 877
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
878 879 880 881 882
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
883 884
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
885 886 887 888 889
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
890 891
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
892 893 894 895 896 897 898 899 900 901 902 903 904 905
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
W
Wu Jiajun-B06378 已提交
906 907
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
908 909 910 911 912 913 914 915 916
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

U
Uwe Kleine-König 已提交
917
	/* cur_filer_idx indicated the first non-masked rule */
918 919 920 921 922
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
W
Wu Jiajun-B06378 已提交
923 924
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
925 926 927 928
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

929 930 931 932 933 934 935 936 937 938 939 940 941
static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
		priv->errata |= GFAR_ERRATA_74;

942 943 944 945 946
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
		priv->errata |= GFAR_ERRATA_76;

947 948 949 950 951
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
			(pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
		priv->errata |= GFAR_ERRATA_A002;

952 953 954 955 956
	/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
			(pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
		priv->errata |= GFAR_ERRATA_12;

957 958 959 960 961
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

962 963
/* Set up the ethernet device structure, private data,
 * and anything else we need before we start */
964
static int gfar_probe(struct platform_device *ofdev)
L
Linus Torvalds 已提交
965 966 967 968
{
	u32 tempval;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
969
	struct gfar __iomem *regs = NULL;
970
	int err = 0, i, grp_idx = 0;
971
	int len_devname;
972
	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
973
	u32 isrg = 0;
974
	u32 __iomem *baddr;
L
Linus Torvalds 已提交
975

976
	err = gfar_of_init(ofdev, &dev);
L
Linus Torvalds 已提交
977

978 979
	if (err)
		return err;
L
Linus Torvalds 已提交
980 981

	priv = netdev_priv(dev);
982 983
	priv->ndev = dev;
	priv->ofdev = ofdev;
984
	priv->node = ofdev->dev.of_node;
985
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
986

987
	spin_lock_init(&priv->bflock);
988
	INIT_WORK(&priv->reset_task, gfar_reset_task);
L
Linus Torvalds 已提交
989

990
	dev_set_drvdata(&ofdev->dev, priv);
991
	regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
992

993 994
	gfar_detect_errata(priv);

L
Linus Torvalds 已提交
995 996
	/* Stop the DMA engine now, in case it was running before */
	/* (The firmware could have used it, and left it running). */
997
	gfar_halt(dev);
L
Linus Torvalds 已提交
998 999

	/* Reset MAC layer */
1000
	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
L
Linus Torvalds 已提交
1001

1002 1003 1004
	/* We need to delay at least 3 TX clocks */
	udelay(2);

L
Linus Torvalds 已提交
1005
	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1006
	gfar_write(&regs->maccfg1, tempval);
L
Linus Torvalds 已提交
1007 1008

	/* Initialize MACCFG2. */
1009 1010 1011 1012
	tempval = MACCFG2_INIT_SETTINGS;
	if (gfar_has_errata(priv, GFAR_ERRATA_74))
		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
	gfar_write(&regs->maccfg2, tempval);
L
Linus Torvalds 已提交
1013 1014

	/* Initialize ECNTRL */
1015
	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
L
Linus Torvalds 已提交
1016 1017

	/* Set the dev->base_addr to the gfar reg region */
1018
	dev->base_addr = (unsigned long) regs;
L
Linus Torvalds 已提交
1019

1020
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
1021 1022 1023 1024

	/* Fill in the dev structure */
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->mtu = 1500;
1025
	dev->netdev_ops = &gfar_netdev_ops;
1026 1027
	dev->ethtool_ops = &gfar_ethtool_ops;

1028
	/* Register for napi ...We are registering NAPI for each grp */
1029 1030
	for (i = 0; i < priv->num_grps; i++)
		netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
1031

1032
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1033 1034 1035 1036 1037
		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
			NETIF_F_RXCSUM;
		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
			NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
	}
1038

J
Jiri Pirko 已提交
1039 1040
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
		dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1041
		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
J
Jiri Pirko 已提交
1042
	}
1043

1044
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1045 1046 1047
		priv->extended_hash = 1;
		priv->hash_width = 9;

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;
1064 1065 1066 1067 1068

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

1069 1070 1071 1072 1073 1074 1075 1076
		priv->hash_regs[0] = &regs->gaddr0;
		priv->hash_regs[1] = &regs->gaddr1;
		priv->hash_regs[2] = &regs->gaddr2;
		priv->hash_regs[3] = &regs->gaddr3;
		priv->hash_regs[4] = &regs->gaddr4;
		priv->hash_regs[5] = &regs->gaddr5;
		priv->hash_regs[6] = &regs->gaddr6;
		priv->hash_regs[7] = &regs->gaddr7;
1077 1078
	}

1079
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1080 1081 1082 1083
		priv->padding = DEFAULT_PADDING;
	else
		priv->padding = 0;

1084 1085
	if (dev->features & NETIF_F_IP_CSUM ||
			priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1086
		dev->hard_header_len += GMAC_FCB_LEN;
L
Linus Torvalds 已提交
1087

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	/* Program the isrg regs only if number of grps > 1 */
	if (priv->num_grps > 1) {
		baddr = &regs->isrg0;
		for (i = 0; i < priv->num_grps; i++) {
			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
			gfar_write(baddr, isrg);
			baddr++;
			isrg = 0x0;
		}
	}

1100
	/* Need to reverse the bit maps as  bit_map's MSB is q0
1101
	 * but, for_each_set_bit parses from right to left, which
1102
	 * basically reverses the queue numbers */
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	for (i = 0; i< priv->num_grps; i++) {
		priv->gfargrp[i].tx_bit_map = reverse_bitmap(
				priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
		priv->gfargrp[i].rx_bit_map = reverse_bitmap(
				priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
	}

	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
	 * also assign queues to groups */
	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1114
		for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1115 1116 1117 1118 1119 1120 1121
				priv->num_rx_queues) {
			priv->gfargrp[grp_idx].num_rx_queues++;
			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
		}
		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1122
		for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1123 1124 1125 1126 1127 1128 1129 1130 1131
				priv->num_tx_queues) {
			priv->gfargrp[grp_idx].num_tx_queues++;
			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
			tqueue = tqueue | (TQUEUE_EN0 >> i);
		}
		priv->gfargrp[grp_idx].rstat = rstat;
		priv->gfargrp[grp_idx].tstat = tstat;
		rstat = tstat =0;
1132 1133 1134 1135 1136
	}

	gfar_write(&regs->rqueue, rqueue);
	gfar_write(&regs->tqueue, tqueue);

L
Linus Torvalds 已提交
1137 1138
	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;

1139
	/* Initializing some of the rx/tx queue level parameters */
1140 1141 1142 1143 1144 1145
	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
		priv->tx_queue[i]->txic = DEFAULT_TXIC;
	}
1146

1147 1148 1149 1150 1151
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
	}
L
Linus Torvalds 已提交
1152

S
Sebastian Poehn 已提交
1153 1154
	/* always enable rx filer*/
	priv->rx_filer_enable = 1;
1155 1156 1157
	/* Enable most messages by default */
	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;

1158 1159 1160
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(dev);

L
Linus Torvalds 已提交
1161 1162 1163
	err = register_netdev(dev);

	if (err) {
1164
		pr_err("%s: Cannot register net device, aborting\n", dev->name);
L
Linus Torvalds 已提交
1165 1166 1167
		goto register_fail;
	}

1168 1169 1170
	device_init_wakeup(&dev->dev,
		priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);

1171 1172
	/* fill out IRQ number and name fields */
	len_devname = strlen(dev->name);
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	for (i = 0; i < priv->num_grps; i++) {
		strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
				len_devname);
		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
			strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
				"_g", sizeof("_g"));
			priv->gfargrp[i].int_name_tx[
				strlen(priv->gfargrp[i].int_name_tx)] = i+48;
			strncpy(&priv->gfargrp[i].int_name_tx[strlen(
				priv->gfargrp[i].int_name_tx)],
				"_tx", sizeof("_tx") + 1);

			strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
					len_devname);
			strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
					"_g", sizeof("_g"));
			priv->gfargrp[i].int_name_rx[
				strlen(priv->gfargrp[i].int_name_rx)] = i+48;
			strncpy(&priv->gfargrp[i].int_name_rx[strlen(
				priv->gfargrp[i].int_name_rx)],
				"_rx", sizeof("_rx") + 1);

			strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
					len_devname);
			strncpy(&priv->gfargrp[i].int_name_er[len_devname],
				"_g", sizeof("_g"));
			priv->gfargrp[i].int_name_er[strlen(
					priv->gfargrp[i].int_name_er)] = i+48;
			strncpy(&priv->gfargrp[i].int_name_er[strlen(\
				priv->gfargrp[i].int_name_er)],
				"_er", sizeof("_er") + 1);
		} else
			priv->gfargrp[i].int_name_tx[len_devname] = '\0';
	}
1207

1208 1209 1210
	/* Initialize the filer table */
	gfar_init_filer_table(priv);

1211 1212 1213
	/* Create all the sysfs files */
	gfar_init_sysfs(dev);

L
Linus Torvalds 已提交
1214
	/* Print out the device info */
1215
	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
L
Linus Torvalds 已提交
1216 1217

	/* Even more device info helps when determining which kernel */
1218
	/* provided which set of benchmarks. */
1219
	netdev_info(dev, "Running with NAPI enabled\n");
1220
	for (i = 0; i < priv->num_rx_queues; i++)
1221 1222
		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
			    i, priv->rx_queue[i]->rx_ring_size);
1223
	for(i = 0; i < priv->num_tx_queues; i++)
1224 1225
		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
			    i, priv->tx_queue[i]->tx_ring_size);
L
Linus Torvalds 已提交
1226 1227 1228 1229

	return 0;

register_fail:
1230
	unmap_group_regs(priv);
1231 1232
	free_tx_pointers(priv);
	free_rx_pointers(priv);
1233 1234 1235 1236
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);
L
Linus Torvalds 已提交
1237
	free_netdev(dev);
1238
	return err;
L
Linus Torvalds 已提交
1239 1240
}

1241
static int gfar_remove(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1242
{
1243
	struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
L
Linus Torvalds 已提交
1244

1245 1246 1247 1248 1249
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);

1250
	dev_set_drvdata(&ofdev->dev, NULL);
L
Linus Torvalds 已提交
1251

D
David S. Miller 已提交
1252
	unregister_netdev(priv->ndev);
1253
	unmap_group_regs(priv);
1254
	free_netdev(priv->ndev);
L
Linus Torvalds 已提交
1255 1256 1257 1258

	return 0;
}

1259
#ifdef CONFIG_PM
1260 1261

static int gfar_suspend(struct device *dev)
1262
{
1263 1264
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1265
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1266 1267 1268 1269
	unsigned long flags;
	u32 tempval;

	int magic_packet = priv->wol_en &&
1270
		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1271

1272
	netif_device_detach(ndev);
1273

1274
	if (netif_running(ndev)) {
1275 1276 1277 1278

		local_irq_save(flags);
		lock_tx_qs(priv);
		lock_rx_qs(priv);
1279

1280
		gfar_halt_nodisable(ndev);
1281 1282

		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1283
		tempval = gfar_read(&regs->maccfg1);
1284 1285 1286 1287 1288 1289

		tempval &= ~MACCFG1_TX_EN;

		if (!magic_packet)
			tempval &= ~MACCFG1_RX_EN;

1290
		gfar_write(&regs->maccfg1, tempval);
1291

1292 1293 1294
		unlock_rx_qs(priv);
		unlock_tx_qs(priv);
		local_irq_restore(flags);
1295

1296
		disable_napi(priv);
1297 1298 1299

		if (magic_packet) {
			/* Enable interrupt on Magic Packet */
1300
			gfar_write(&regs->imask, IMASK_MAG);
1301 1302

			/* Enable Magic Packet mode */
1303
			tempval = gfar_read(&regs->maccfg2);
1304
			tempval |= MACCFG2_MPEN;
1305
			gfar_write(&regs->maccfg2, tempval);
1306 1307 1308 1309 1310 1311 1312 1313
		} else {
			phy_stop(priv->phydev);
		}
	}

	return 0;
}

1314
static int gfar_resume(struct device *dev)
1315
{
1316 1317
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1318
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1319 1320 1321
	unsigned long flags;
	u32 tempval;
	int magic_packet = priv->wol_en &&
1322
		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1323

1324 1325
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);
1326 1327 1328 1329 1330 1331 1332 1333 1334
		return 0;
	}

	if (!magic_packet && priv->phydev)
		phy_start(priv->phydev);

	/* Disable Magic Packet mode, in case something
	 * else woke us up.
	 */
1335 1336 1337
	local_irq_save(flags);
	lock_tx_qs(priv);
	lock_rx_qs(priv);
1338

1339
	tempval = gfar_read(&regs->maccfg2);
1340
	tempval &= ~MACCFG2_MPEN;
1341
	gfar_write(&regs->maccfg2, tempval);
1342

1343
	gfar_start(ndev);
1344

1345 1346 1347
	unlock_rx_qs(priv);
	unlock_tx_qs(priv);
	local_irq_restore(flags);
1348

1349 1350
	netif_device_attach(ndev);

1351
	enable_napi(priv);
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375

	return 0;
}

static int gfar_restore(struct device *dev)
{
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;

	if (!netif_running(ndev))
		return 0;

	gfar_init_bds(ndev);
	init_registers(ndev);
	gfar_set_mac_address(ndev);
	gfar_init_mac(ndev);
	gfar_start(ndev);

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

	if (priv->phydev)
		phy_start(priv->phydev);
1376

1377
	netif_device_attach(ndev);
1378
	enable_napi(priv);
1379 1380 1381

	return 0;
}
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392

static struct dev_pm_ops gfar_pm_ops = {
	.suspend = gfar_suspend,
	.resume = gfar_resume,
	.freeze = gfar_suspend,
	.thaw = gfar_resume,
	.restore = gfar_restore,
};

#define GFAR_PM_OPS (&gfar_pm_ops)

1393
#else
1394 1395 1396

#define GFAR_PM_OPS NULL

1397
#endif
L
Linus Torvalds 已提交
1398

1399 1400 1401 1402 1403 1404
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1405
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1406 1407 1408
	u32 ecntrl;

	ecntrl = gfar_read(&regs->ecntrl);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MII_MODE)
			return PHY_INTERFACE_MODE_RMII;
A
Andy Fleming 已提交
1423
		else {
1424
			phy_interface_t interface = priv->interface;
A
Andy Fleming 已提交
1425 1426 1427 1428 1429 1430 1431 1432

			/*
			 * This isn't autodetected right now, so it must
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

1433
			return PHY_INTERFACE_MODE_RGMII;
A
Andy Fleming 已提交
1434
		}
1435 1436
	}

1437
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1438 1439 1440 1441 1442 1443
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}


1444 1445
/* Initializes driver's PHY state, and attaches to the PHY.
 * Returns 0 on success.
L
Linus Torvalds 已提交
1446 1447 1448 1449
 */
static int init_phy(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1450
	uint gigabit_support =
1451
		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1452
		SUPPORTED_1000baseT_Full : 0;
1453
	phy_interface_t interface;
L
Linus Torvalds 已提交
1454 1455 1456 1457 1458

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

1459 1460
	interface = gfar_get_interface(dev);

1461 1462 1463 1464 1465 1466 1467 1468
	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
				      interface);
	if (!priv->phydev)
		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
							 interface);
	if (!priv->phydev) {
		dev_err(&dev->dev, "could not attach to PHY\n");
		return -ENODEV;
1469
	}
L
Linus Torvalds 已提交
1470

K
Kapil Juneja 已提交
1471 1472 1473
	if (interface == PHY_INTERFACE_MODE_SGMII)
		gfar_configure_serdes(dev);

1474
	/* Remove any features not supported by the controller */
1475 1476
	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
	priv->phydev->advertising = priv->phydev->supported;
L
Linus Torvalds 已提交
1477 1478 1479 1480

	return 0;
}

1481 1482 1483 1484 1485 1486 1487 1488 1489
/*
 * Initialize TBI PHY interface for communicating with the
 * SERDES lynx PHY on the chip.  We communicate with this PHY
 * through the MDIO bus on each controller, treating it as a
 * "normal" PHY at the address found in the TBIPA register.  We assume
 * that the TBIPA register is valid.  Either the MDIO bus code will set
 * it to a value that doesn't conflict with other PHYs on the bus, or the
 * value doesn't matter, as there are no other PHYs on the bus.
 */
K
Kapil Juneja 已提交
1490 1491 1492
static void gfar_configure_serdes(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1493 1494 1495 1496 1497 1498 1499
	struct phy_device *tbiphy;

	if (!priv->tbi_node) {
		dev_warn(&dev->dev, "error: SGMII mode requires that the "
				    "device tree specify a tbi-handle\n");
		return;
	}
1500

1501 1502 1503
	tbiphy = of_phy_find_device(priv->tbi_node);
	if (!tbiphy) {
		dev_err(&dev->dev, "error: Could not get TBI device\n");
1504 1505
		return;
	}
K
Kapil Juneja 已提交
1506

1507 1508
	/*
	 * If the link is already up, we must already be ok, and don't need to
1509 1510 1511 1512
	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
	 * everything for us?  Resetting it takes the link down and requires
	 * several seconds for it to come back.
	 */
1513
	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1514
		return;
K
Kapil Juneja 已提交
1515

1516
	/* Single clk mode, mii mode off(for serdes communication) */
1517
	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
K
Kapil Juneja 已提交
1518

1519
	phy_write(tbiphy, MII_ADVERTISE,
K
Kapil Juneja 已提交
1520 1521 1522
			ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
			ADVERTISE_1000XPSE_ASYM);

1523
	phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
K
Kapil Juneja 已提交
1524 1525 1526
			BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
}

L
Linus Torvalds 已提交
1527 1528 1529
static void init_registers(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1530
	struct gfar __iomem *regs = NULL;
1531
	int i = 0;
L
Linus Torvalds 已提交
1532

1533 1534 1535 1536
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear IEVENT */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
L
Linus Torvalds 已提交
1537

1538 1539 1540
		/* Initialize IMASK */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}
L
Linus Torvalds 已提交
1541

1542
	regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1543
	/* Init hash registers to zero */
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
	gfar_write(&regs->igaddr0, 0);
	gfar_write(&regs->igaddr1, 0);
	gfar_write(&regs->igaddr2, 0);
	gfar_write(&regs->igaddr3, 0);
	gfar_write(&regs->igaddr4, 0);
	gfar_write(&regs->igaddr5, 0);
	gfar_write(&regs->igaddr6, 0);
	gfar_write(&regs->igaddr7, 0);

	gfar_write(&regs->gaddr0, 0);
	gfar_write(&regs->gaddr1, 0);
	gfar_write(&regs->gaddr2, 0);
	gfar_write(&regs->gaddr3, 0);
	gfar_write(&regs->gaddr4, 0);
	gfar_write(&regs->gaddr5, 0);
	gfar_write(&regs->gaddr6, 0);
	gfar_write(&regs->gaddr7, 0);
L
Linus Torvalds 已提交
1561 1562

	/* Zero out the rmon mib registers if it has them */
1563
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1564
		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
L
Linus Torvalds 已提交
1565 1566

		/* Mask off the CAM interrupts */
1567 1568
		gfar_write(&regs->rmon.cam1, 0xffffffff);
		gfar_write(&regs->rmon.cam2, 0xffffffff);
L
Linus Torvalds 已提交
1569 1570 1571
	}

	/* Initialize the max receive buffer length */
1572
	gfar_write(&regs->mrblr, priv->rx_buffer_size);
L
Linus Torvalds 已提交
1573 1574

	/* Initialize the Minimum Frame Length Register */
1575
	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
L
Linus Torvalds 已提交
1576 1577
}

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
	u32 res;

	/*
	 * Normaly TSEC should not hang on GRS commands, so we should
	 * actually wait for IEVENT_GRSC flag.
	 */
	if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
		return 0;

	/*
	 * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
	 * and the Rx can be safely reset.
	 */
	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
	res &= 0x7f807f80;
	if ((res & 0xffff) == (res >> 16))
		return 1;

	return 0;
}
1601 1602

/* Halt the receive and transmit queues */
1603
static void gfar_halt_nodisable(struct net_device *dev)
L
Linus Torvalds 已提交
1604 1605
{
	struct gfar_private *priv = netdev_priv(dev);
1606
	struct gfar __iomem *regs = NULL;
L
Linus Torvalds 已提交
1607
	u32 tempval;
1608
	int i = 0;
L
Linus Torvalds 已提交
1609

1610 1611 1612 1613
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Mask all interrupts */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
L
Linus Torvalds 已提交
1614

1615 1616 1617
		/* Clear all interrupts */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
	}
L
Linus Torvalds 已提交
1618

1619
	regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1620
	/* Stop the DMA, and wait for it to stop */
1621
	tempval = gfar_read(&regs->dmactrl);
L
Linus Torvalds 已提交
1622 1623
	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
	    != (DMACTRL_GRS | DMACTRL_GTS)) {
1624 1625
		int ret;

L
Linus Torvalds 已提交
1626
		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1627
		gfar_write(&regs->dmactrl, tempval);
L
Linus Torvalds 已提交
1628

1629 1630 1631 1632 1633 1634 1635
		do {
			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
				 (IEVENT_GRSC | IEVENT_GTSC)) ==
				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
				ret = __gfar_is_rx_idle(priv);
		} while (!ret);
L
Linus Torvalds 已提交
1636
	}
1637 1638 1639 1640 1641 1642
}

/* Halt the receive and transmit queues */
void gfar_halt(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1643
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1644
	u32 tempval;
L
Linus Torvalds 已提交
1645

1646 1647
	gfar_halt_nodisable(dev);

L
Linus Torvalds 已提交
1648 1649 1650 1651
	/* Disable Rx and Tx */
	tempval = gfar_read(&regs->maccfg1);
	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);
1652 1653
}

1654 1655 1656 1657 1658 1659 1660
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
	free_irq(grp->interruptError, grp);
	free_irq(grp->interruptTransmit, grp);
	free_irq(grp->interruptReceive, grp);
}

1661 1662 1663 1664
void stop_gfar(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long flags;
1665
	int i;
1666

1667 1668
	phy_stop(priv->phydev);

1669

1670
	/* Lock it down */
1671 1672 1673
	local_irq_save(flags);
	lock_tx_qs(priv);
	lock_rx_qs(priv);
1674 1675

	gfar_halt(dev);
L
Linus Torvalds 已提交
1676

1677 1678 1679
	unlock_rx_qs(priv);
	unlock_tx_qs(priv);
	local_irq_restore(flags);
L
Linus Torvalds 已提交
1680 1681

	/* Free the IRQs */
1682
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1683 1684
		for (i = 0; i < priv->num_grps; i++)
			free_grp_irqs(&priv->gfargrp[i]);
L
Linus Torvalds 已提交
1685
	} else {
1686 1687 1688
		for (i = 0; i < priv->num_grps; i++)
			free_irq(priv->gfargrp[i].interruptTransmit,
					&priv->gfargrp[i]);
L
Linus Torvalds 已提交
1689 1690 1691 1692 1693
	}

	free_skb_resources(priv);
}

1694
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
1695 1696
{
	struct txbd8 *txbdp;
1697
	struct gfar_private *priv = netdev_priv(tx_queue->dev);
D
Dai Haruki 已提交
1698
	int i, j;
L
Linus Torvalds 已提交
1699

1700
	txbdp = tx_queue->tx_bd_base;
L
Linus Torvalds 已提交
1701

1702 1703
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
		if (!tx_queue->tx_skbuff[i])
D
Dai Haruki 已提交
1704
			continue;
L
Linus Torvalds 已提交
1705

1706
		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
D
Dai Haruki 已提交
1707 1708
				txbdp->length, DMA_TO_DEVICE);
		txbdp->lstatus = 0;
1709 1710
		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
				j++) {
D
Dai Haruki 已提交
1711
			txbdp++;
1712
			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
D
Dai Haruki 已提交
1713
					txbdp->length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1714
		}
1715
		txbdp++;
1716 1717
		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
		tx_queue->tx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1718
	}
1719
	kfree(tx_queue->tx_skbuff);
1720
}
L
Linus Torvalds 已提交
1721

1722 1723 1724 1725 1726
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
	struct rxbd8 *rxbdp;
	struct gfar_private *priv = netdev_priv(rx_queue->dev);
	int i;
L
Linus Torvalds 已提交
1727

1728
	rxbdp = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1729

1730 1731
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		if (rx_queue->rx_skbuff[i]) {
1732 1733
			dma_unmap_single(&priv->ofdev->dev,
					rxbdp->bufPtr, priv->rx_buffer_size,
1734
					DMA_FROM_DEVICE);
1735 1736
			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
			rx_queue->rx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1737
		}
1738 1739 1740
		rxbdp->lstatus = 0;
		rxbdp->bufPtr = 0;
		rxbdp++;
L
Linus Torvalds 已提交
1741
	}
1742
	kfree(rx_queue->rx_skbuff);
1743
}
1744

1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
/* If there are any tx skbs or rx skbs still around, free them.
 * Then free tx_skbuff and rx_skbuff */
static void free_skb_resources(struct gfar_private *priv)
{
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
	int i;

	/* Go through all the buffer descriptors and free their data buffers */
	for (i = 0; i < priv->num_tx_queues; i++) {
1755
		struct netdev_queue *txq;
1756
		tx_queue = priv->tx_queue[i];
1757
		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1758
		if(tx_queue->tx_skbuff)
1759
			free_skb_tx_queue(tx_queue);
1760
		netdev_tx_reset_queue(txq);
1761 1762 1763 1764
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
1765
		if(rx_queue->rx_skbuff)
1766 1767 1768 1769 1770 1771 1772 1773
			free_skb_rx_queue(rx_queue);
	}

	dma_free_coherent(&priv->ofdev->dev,
			sizeof(struct txbd8) * priv->total_tx_ring_size +
			sizeof(struct rxbd8) * priv->total_rx_ring_size,
			priv->tx_queue[0]->tx_bd_base,
			priv->tx_queue[0]->tx_bd_dma_base);
1774
	skb_queue_purge(&priv->rx_recycle);
L
Linus Torvalds 已提交
1775 1776
}

1777 1778 1779
void gfar_start(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1780
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1781
	u32 tempval;
1782
	int i = 0;
1783 1784 1785 1786 1787 1788 1789

	/* Enable Rx and Tx in MACCFG1 */
	tempval = gfar_read(&regs->maccfg1);
	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);

	/* Initialize DMACTRL to have WWR and WOP */
1790
	tempval = gfar_read(&regs->dmactrl);
1791
	tempval |= DMACTRL_INIT_SETTINGS;
1792
	gfar_write(&regs->dmactrl, tempval);
1793 1794

	/* Make sure we aren't stopped */
1795
	tempval = gfar_read(&regs->dmactrl);
1796
	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1797
	gfar_write(&regs->dmactrl, tempval);
1798

1799 1800 1801 1802 1803 1804 1805 1806
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear THLT/RHLT, so that the DMA starts polling now */
		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
		/* Unmask the interrupts we look for */
		gfar_write(&regs->imask, IMASK_DEFAULT);
	}
1807

E
Eric Dumazet 已提交
1808
	dev->trans_start = jiffies; /* prevent tx timeout */
1809 1810
}

1811
void gfar_configure_coalescing(struct gfar_private *priv,
1812
	unsigned long tx_mask, unsigned long rx_mask)
L
Linus Torvalds 已提交
1813
{
1814
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1815
	u32 __iomem *baddr;
1816
	int i = 0;
L
Linus Torvalds 已提交
1817

1818 1819 1820 1821 1822 1823
	/* Backward compatible case ---- even if we enable
	 * multiple queues, there's only single reg to program
	 */
	gfar_write(&regs->txic, 0);
	if(likely(priv->tx_queue[0]->txcoalescing))
		gfar_write(&regs->txic, priv->tx_queue[0]->txic);
L
Linus Torvalds 已提交
1824

1825 1826 1827
	gfar_write(&regs->rxic, 0);
	if(unlikely(priv->rx_queue[0]->rxcoalescing))
		gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1828

1829 1830
	if (priv->mode == MQ_MG_MODE) {
		baddr = &regs->txic0;
1831
		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1832 1833 1834 1835 1836 1837 1838
			if (likely(priv->tx_queue[i]->txcoalescing)) {
				gfar_write(baddr + i, 0);
				gfar_write(baddr + i, priv->tx_queue[i]->txic);
			}
		}

		baddr = &regs->rxic0;
1839
		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
			if (likely(priv->rx_queue[i]->rxcoalescing)) {
				gfar_write(baddr + i, 0);
				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
			}
		}
	}
}

static int register_grp_irqs(struct gfar_priv_grp *grp)
{
	struct gfar_private *priv = grp->priv;
	struct net_device *dev = priv->ndev;
	int err;
L
Linus Torvalds 已提交
1853 1854 1855

	/* If the device has multiple interrupts, register for
	 * them.  Otherwise, only register for the one */
1856
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1857
		/* Install our interrupt handlers for Error,
L
Linus Torvalds 已提交
1858
		 * Transmit, and Receive */
1859 1860
		if ((err = request_irq(grp->interruptError, gfar_error, 0,
				grp->int_name_er,grp)) < 0) {
1861 1862
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
				  grp->interruptError);
1863

1864
			goto err_irq_fail;
L
Linus Torvalds 已提交
1865 1866
		}

1867 1868
		if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
				0, grp->int_name_tx, grp)) < 0) {
1869 1870
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
				  grp->interruptTransmit);
L
Linus Torvalds 已提交
1871 1872 1873
			goto tx_irq_fail;
		}

1874 1875
		if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
				grp->int_name_rx, grp)) < 0) {
1876 1877
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
				  grp->interruptReceive);
L
Linus Torvalds 已提交
1878 1879 1880
			goto rx_irq_fail;
		}
	} else {
1881 1882
		if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
				grp->int_name_tx, grp)) < 0) {
1883 1884
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
				  grp->interruptTransmit);
L
Linus Torvalds 已提交
1885 1886 1887 1888
			goto err_irq_fail;
		}
	}

1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
	return 0;

rx_irq_fail:
	free_irq(grp->interruptTransmit, grp);
tx_irq_fail:
	free_irq(grp->interruptError, grp);
err_irq_fail:
	return err;

}

/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
	struct gfar __iomem *regs = NULL;
	int err, i, j;

	for (i = 0; i < priv->num_grps; i++) {
		regs= priv->gfargrp[i].regs;
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}

	regs= priv->gfargrp[0].regs;
	err = gfar_alloc_skb_resources(ndev);
	if (err)
		return err;

	gfar_init_mac(ndev);

	for (i = 0; i < priv->num_grps; i++) {
		err = register_grp_irqs(&priv->gfargrp[i]);
		if (err) {
			for (j = 0; j < i; j++)
				free_grp_irqs(&priv->gfargrp[j]);
1924
			goto irq_fail;
1925 1926 1927
		}
	}

1928
	/* Start the controller */
1929
	gfar_start(ndev);
L
Linus Torvalds 已提交
1930

1931 1932
	phy_start(priv->phydev);

1933 1934
	gfar_configure_coalescing(priv, 0xFF, 0xFF);

L
Linus Torvalds 已提交
1935 1936
	return 0;

1937
irq_fail:
1938
	free_skb_resources(priv);
L
Linus Torvalds 已提交
1939 1940 1941 1942 1943 1944 1945
	return err;
}

/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int gfar_enet_open(struct net_device *dev)
{
1946
	struct gfar_private *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
1947 1948
	int err;

1949
	enable_napi(priv);
1950

1951 1952
	skb_queue_head_init(&priv->rx_recycle);

L
Linus Torvalds 已提交
1953 1954 1955 1956 1957 1958 1959
	/* Initialize a bunch of registers */
	init_registers(dev);

	gfar_set_mac_address(dev);

	err = init_phy(dev);

1960
	if (err) {
1961
		disable_napi(priv);
L
Linus Torvalds 已提交
1962
		return err;
1963
	}
L
Linus Torvalds 已提交
1964 1965

	err = startup_gfar(dev);
1966
	if (err) {
1967
		disable_napi(priv);
1968 1969
		return err;
	}
L
Linus Torvalds 已提交
1970

1971
	netif_tx_start_all_queues(dev);
L
Linus Torvalds 已提交
1972

1973 1974
	device_set_wakeup_enable(&dev->dev, priv->wol_en);

L
Linus Torvalds 已提交
1975 1976 1977
	return err;
}

1978
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1979
{
1980
	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1981 1982

	memset(fcb, 0, GMAC_FCB_LEN);
1983 1984 1985 1986

	return fcb;
}

1987 1988
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
		int fcb_length)
1989
{
1990
	u8 flags = 0;
1991 1992 1993 1994 1995

	/* If we're here, it's a IP packet with a TCP or UDP
	 * payload.  We set it to checksum, using a pseudo-header
	 * we provide
	 */
1996
	flags = TXFCB_DEFAULT;
1997

1998 1999
	/* Tell the controller what the protocol is */
	/* And provide the already calculated phcs */
2000
	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2001
		flags |= TXFCB_UDP;
2002
		fcb->phcs = udp_hdr(skb)->check;
2003
	} else
2004
		fcb->phcs = tcp_hdr(skb)->check;
2005 2006 2007 2008 2009

	/* l3os is the distance between the start of the
	 * frame (skb->data) and the start of the IP hdr.
	 * l4os is the distance between the start of the
	 * l3 hdr and the l4 hdr */
2010
	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2011
	fcb->l4os = skb_network_header_len(skb);
2012

2013
	fcb->flags = flags;
2014 2015
}

2016
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2017
{
2018
	fcb->flags |= TXFCB_VLN;
2019 2020 2021
	fcb->vlctl = vlan_tx_tag_get(skb);
}

D
Dai Haruki 已提交
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
			       struct txbd8 *base, int ring_size)
{
	struct txbd8 *new_bd = bdp + stride;

	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
}

static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
		int ring_size)
{
	return skip_txbd(bdp, 1, base, ring_size);
}

L
Linus Torvalds 已提交
2036 2037 2038 2039 2040
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2041
	struct gfar_priv_tx_q *tx_queue = NULL;
2042
	struct netdev_queue *txq;
2043
	struct gfar __iomem *regs = NULL;
2044
	struct txfcb *fcb = NULL;
2045
	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2046
	u32 lstatus;
2047
	int i, rq = 0, do_tstamp = 0;
D
Dai Haruki 已提交
2048
	u32 bufaddr;
A
Andy Fleming 已提交
2049
	unsigned long flags;
2050
	unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
2051

2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065
	/*
	 * TOE=1 frames larger than 2500 bytes may see excess delays
	 * before start of transmission.
	 */
	if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
			skb->ip_summed == CHECKSUM_PARTIAL &&
			skb->len > 2500)) {
		int ret;

		ret = skb_checksum_help(skb);
		if (ret)
			return ret;
	}

2066 2067 2068
	rq = skb->queue_mapping;
	tx_queue = priv->tx_queue[rq];
	txq = netdev_get_tx_queue(dev, rq);
2069
	base = tx_queue->tx_bd_base;
2070
	regs = tx_queue->grp->regs;
2071 2072

	/* check if time stamp should be generated */
2073
	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074
			priv->hwts_tx_en)) {
2075
		do_tstamp = 1;
2076 2077
		fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
	}
D
Dai Haruki 已提交
2078

2079 2080
	/* make space for additional header when fcb is needed */
	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2081
			vlan_tx_tag_present(skb) ||
2082
			unlikely(do_tstamp)) &&
2083
			(skb_headroom(skb) < fcb_length)) {
2084 2085
		struct sk_buff *skb_new;

2086
		skb_new = skb_realloc_headroom(skb, fcb_length);
2087 2088
		if (!skb_new) {
			dev->stats.tx_errors++;
D
David S. Miller 已提交
2089
			kfree_skb(skb);
2090 2091
			return NETDEV_TX_OK;
		}
2092 2093 2094 2095

		/* Steal sock reference for processing TX time stamps */
		swap(skb_new->sk, skb->sk);
		swap(skb_new->destructor, skb->destructor);
2096 2097 2098 2099
		kfree_skb(skb);
		skb = skb_new;
	}

D
Dai Haruki 已提交
2100 2101 2102
	/* total number of fragments in the SKB */
	nr_frags = skb_shinfo(skb)->nr_frags;

2103 2104 2105 2106 2107 2108
	/* calculate the required number of TxBDs for this skb */
	if (unlikely(do_tstamp))
		nr_txbds = nr_frags + 2;
	else
		nr_txbds = nr_frags + 1;

D
Dai Haruki 已提交
2109
	/* check if there is space to queue this packet */
2110
	if (nr_txbds > tx_queue->num_txbdfree) {
D
Dai Haruki 已提交
2111
		/* no space, stop the queue */
2112
		netif_tx_stop_queue(txq);
D
Dai Haruki 已提交
2113 2114 2115
		dev->stats.tx_fifo_errors++;
		return NETDEV_TX_BUSY;
	}
L
Linus Torvalds 已提交
2116 2117

	/* Update transmit stats */
E
Eric Dumazet 已提交
2118 2119
	tx_queue->stats.tx_bytes += skb->len;
	tx_queue->stats.tx_packets++;
L
Linus Torvalds 已提交
2120

2121
	txbdp = txbdp_start = tx_queue->cur_tx;
2122 2123 2124 2125 2126 2127
	lstatus = txbdp->lstatus;

	/* Time stamp insertion requires one additional TxBD */
	if (unlikely(do_tstamp))
		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
				tx_queue->tx_ring_size);
L
Linus Torvalds 已提交
2128

D
Dai Haruki 已提交
2129
	if (nr_frags == 0) {
2130 2131 2132 2133 2134
		if (unlikely(do_tstamp))
			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
					TXBD_INTERRUPT);
		else
			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
D
Dai Haruki 已提交
2135 2136 2137 2138
	} else {
		/* Place the fragment addresses and lengths into the TxBDs */
		for (i = 0; i < nr_frags; i++) {
			/* Point at the next BD, wrapping as needed */
2139
			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2140 2141 2142 2143 2144 2145 2146 2147 2148

			length = skb_shinfo(skb)->frags[i].size;

			lstatus = txbdp->lstatus | length |
				BD_LFLAG(TXBD_READY);

			/* Handle the last BD specially */
			if (i == nr_frags - 1)
				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
L
Linus Torvalds 已提交
2149

2150 2151 2152 2153 2154
			bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
						   &skb_shinfo(skb)->frags[i],
						   0,
						   length,
						   DMA_TO_DEVICE);
D
Dai Haruki 已提交
2155 2156 2157 2158 2159 2160 2161 2162

			/* set the TxBD length and buffer pointer */
			txbdp->bufPtr = bufaddr;
			txbdp->lstatus = lstatus;
		}

		lstatus = txbdp_start->lstatus;
	}
L
Linus Torvalds 已提交
2163

2164 2165 2166 2167 2168 2169
	/* Add TxPAL between FCB and frame if required */
	if (unlikely(do_tstamp)) {
		skb_push(skb, GMAC_TXPAL_LEN);
		memset(skb->data, 0, GMAC_TXPAL_LEN);
	}

2170
	/* Set up checksumming */
2171
	if (CHECKSUM_PARTIAL == skb->ip_summed) {
2172
		fcb = gfar_add_fcb(skb);
2173 2174 2175 2176 2177 2178 2179
		/* as specified by errata */
		if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
			     && ((unsigned long)fcb % 0x20) > 0x18)) {
			__skb_pull(skb, GMAC_FCB_LEN);
			skb_checksum_help(skb);
		} else {
			lstatus |= BD_LFLAG(TXBD_TOE);
2180
			gfar_tx_checksum(skb, fcb, fcb_length);
2181
		}
2182 2183
	}

2184
	if (vlan_tx_tag_present(skb)) {
2185 2186
		if (unlikely(NULL == fcb)) {
			fcb = gfar_add_fcb(skb);
2187
			lstatus |= BD_LFLAG(TXBD_TOE);
2188
		}
2189 2190

		gfar_tx_vlan(skb, fcb);
2191 2192
	}

2193 2194
	/* Setup tx hardware time stamping if requested */
	if (unlikely(do_tstamp)) {
2195
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2196 2197 2198 2199 2200 2201
		if (fcb == NULL)
			fcb = gfar_add_fcb(skb);
		fcb->ptp = 1;
		lstatus |= BD_LFLAG(TXBD_TOE);
	}

2202
	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
D
Dai Haruki 已提交
2203
			skb_headlen(skb), DMA_TO_DEVICE);
L
Linus Torvalds 已提交
2204

2205 2206 2207 2208 2209 2210 2211
	/*
	 * If time stamping is requested one additional TxBD must be set up. The
	 * first TxBD points to the FCB and must have a data length of
	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
	 * the full frame length.
	 */
	if (unlikely(do_tstamp)) {
2212
		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
2213
		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2214
				(skb_headlen(skb) - fcb_length);
2215 2216 2217 2218
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
	} else {
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
	}
L
Linus Torvalds 已提交
2219

2220 2221
	netdev_tx_sent_queue(txq, skb->len);

A
Anton Vorontsov 已提交
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
	/*
	 * We can work in parallel with gfar_clean_tx_ring(), except
	 * when modifying num_txbdfree. Note that we didn't grab the lock
	 * when we were reading the num_txbdfree and checking for available
	 * space, that's because outside of this function it can only grow,
	 * and once we've got needed space, it cannot suddenly disappear.
	 *
	 * The lock also protects us from gfar_error(), which can modify
	 * regs->tstat and thus retrigger the transfers, which is why we
	 * also must grab the lock before setting ready bit for the first
	 * to be transmitted BD.
	 */
	spin_lock_irqsave(&tx_queue->txlock, flags);

D
Dai Haruki 已提交
2236 2237
	/*
	 * The powerpc-specific eieio() is used, as wmb() has too strong
2238 2239 2240 2241 2242 2243 2244
	 * semantics (it requires synchronization between cacheable and
	 * uncacheable mappings, which eieio doesn't provide and which we
	 * don't need), thus requiring a more expensive sync instruction.  At
	 * some point, the set of architecture-independent barrier functions
	 * should be expanded to include weaker barriers.
	 */
	eieio();
2245

D
Dai Haruki 已提交
2246 2247
	txbdp_start->lstatus = lstatus;

2248 2249 2250 2251
	eieio(); /* force lstatus write before tx_skbuff */

	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;

D
Dai Haruki 已提交
2252 2253
	/* Update the current skb pointer to the next entry we will use
	 * (wrapping if necessary) */
2254 2255
	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
		TX_RING_MOD_MASK(tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2256

2257
	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2258 2259

	/* reduce TxBD free count */
2260
	tx_queue->num_txbdfree -= (nr_txbds);
L
Linus Torvalds 已提交
2261 2262 2263

	/* If the next BD still needs to be cleaned up, then the bds
	   are full.  We need to tell the kernel to stop sending us stuff. */
2264
	if (!tx_queue->num_txbdfree) {
2265
		netif_tx_stop_queue(txq);
L
Linus Torvalds 已提交
2266

2267
		dev->stats.tx_fifo_errors++;
L
Linus Torvalds 已提交
2268 2269 2270
	}

	/* Tell the DMA to go go go */
2271
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
L
Linus Torvalds 已提交
2272 2273

	/* Unlock priv */
2274
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
2275

2276
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
2277 2278 2279 2280 2281 2282
}

/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2283

2284
	disable_napi(priv);
2285

2286
	cancel_work_sync(&priv->reset_task);
L
Linus Torvalds 已提交
2287 2288
	stop_gfar(dev);

2289 2290 2291
	/* Disconnect from the PHY */
	phy_disconnect(priv->phydev);
	priv->phydev = NULL;
L
Linus Torvalds 已提交
2292

2293
	netif_tx_stop_all_queues(dev);
L
Linus Torvalds 已提交
2294 2295 2296 2297 2298

	return 0;
}

/* Changes the mac address if the controller is not running. */
2299
static int gfar_set_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
2300
{
2301
	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
L
Linus Torvalds 已提交
2302 2303 2304 2305

	return 0;
}

S
Sebastian Pöhn 已提交
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322
/* Check if rx parser should be activated */
void gfar_check_rx_parser_mode(struct gfar_private *priv)
{
	struct gfar __iomem *regs;
	u32 tempval;

	regs = priv->gfargrp[0].regs;

	tempval = gfar_read(&regs->rctrl);
	/* If parse is no longer required, then disable parser */
	if (tempval & RCTRL_REQ_PARSER)
		tempval |= RCTRL_PRSDEP_INIT;
	else
		tempval &= ~RCTRL_PRSDEP_INIT;
	gfar_write(&regs->rctrl, tempval);
}

2323
/* Enables and disables VLAN insertion/extraction */
2324
void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2325 2326
{
	struct gfar_private *priv = netdev_priv(dev);
2327
	struct gfar __iomem *regs = NULL;
2328 2329 2330
	unsigned long flags;
	u32 tempval;

2331
	regs = priv->gfargrp[0].regs;
2332 2333
	local_irq_save(flags);
	lock_rx_qs(priv);
2334

J
Jiri Pirko 已提交
2335
	if (features & NETIF_F_HW_VLAN_TX) {
2336
		/* Enable VLAN tag insertion */
2337
		tempval = gfar_read(&regs->tctrl);
2338
		tempval |= TCTRL_VLINS;
2339
		gfar_write(&regs->tctrl, tempval);
2340 2341
	} else {
		/* Disable VLAN tag insertion */
2342
		tempval = gfar_read(&regs->tctrl);
2343
		tempval &= ~TCTRL_VLINS;
2344
		gfar_write(&regs->tctrl, tempval);
J
Jiri Pirko 已提交
2345
	}
2346

J
Jiri Pirko 已提交
2347 2348 2349 2350 2351 2352
	if (features & NETIF_F_HW_VLAN_RX) {
		/* Enable VLAN tag extraction */
		tempval = gfar_read(&regs->rctrl);
		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
		gfar_write(&regs->rctrl, tempval);
	} else {
2353
		/* Disable VLAN tag extraction */
2354
		tempval = gfar_read(&regs->rctrl);
2355
		tempval &= ~RCTRL_VLEX;
2356
		gfar_write(&regs->rctrl, tempval);
S
Sebastian Pöhn 已提交
2357 2358

		gfar_check_rx_parser_mode(priv);
2359 2360
	}

2361 2362
	gfar_change_mtu(dev, dev->mtu);

2363 2364
	unlock_rx_qs(priv);
	local_irq_restore(flags);
2365 2366
}

L
Linus Torvalds 已提交
2367 2368 2369 2370
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
	int tempsize, tempval;
	struct gfar_private *priv = netdev_priv(dev);
2371
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
2372
	int oldsize = priv->rx_buffer_size;
2373 2374
	int frame_size = new_mtu + ETH_HLEN;

J
Jiri Pirko 已提交
2375
	if (gfar_is_vlan_on(priv))
2376
		frame_size += VLAN_HLEN;
2377

L
Linus Torvalds 已提交
2378
	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2379
		netif_err(priv, drv, dev, "Invalid MTU setting\n");
L
Linus Torvalds 已提交
2380 2381 2382
		return -EINVAL;
	}

2383 2384 2385 2386 2387
	if (gfar_uses_fcb(priv))
		frame_size += GMAC_FCB_LEN;

	frame_size += priv->padding;

L
Linus Torvalds 已提交
2388 2389 2390 2391 2392
	tempsize =
	    (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
	    INCREMENTAL_BUFFER_SIZE;

	/* Only stop and start the controller if it isn't already
2393
	 * stopped, and we changed something */
L
Linus Torvalds 已提交
2394 2395 2396 2397 2398 2399 2400
	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
		stop_gfar(dev);

	priv->rx_buffer_size = tempsize;

	dev->mtu = new_mtu;

2401 2402
	gfar_write(&regs->mrblr, priv->rx_buffer_size);
	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
L
Linus Torvalds 已提交
2403 2404 2405 2406

	/* If the mtu is larger than the max size for standard
	 * ethernet frames (ie, a jumbo frame), then set maccfg2
	 * to allow huge frames, and to check the length */
2407
	tempval = gfar_read(&regs->maccfg2);
L
Linus Torvalds 已提交
2408

2409 2410
	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
			gfar_has_errata(priv, GFAR_ERRATA_74))
L
Linus Torvalds 已提交
2411 2412 2413 2414
		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
	else
		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);

2415
	gfar_write(&regs->maccfg2, tempval);
L
Linus Torvalds 已提交
2416 2417 2418 2419 2420 2421 2422

	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
		startup_gfar(dev);

	return 0;
}

2423
/* gfar_reset_task gets scheduled when a packet has not been
L
Linus Torvalds 已提交
2424 2425
 * transmitted after a set amount of time.
 * For now, assume that clearing out all the structures, and
2426 2427 2428
 * starting over will fix the problem.
 */
static void gfar_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
2429
{
2430 2431
	struct gfar_private *priv = container_of(work, struct gfar_private,
			reset_task);
2432
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
2433 2434

	if (dev->flags & IFF_UP) {
2435
		netif_tx_stop_all_queues(dev);
L
Linus Torvalds 已提交
2436 2437
		stop_gfar(dev);
		startup_gfar(dev);
2438
		netif_tx_start_all_queues(dev);
L
Linus Torvalds 已提交
2439 2440
	}

2441
	netif_tx_schedule_all(dev);
L
Linus Torvalds 已提交
2442 2443
}

2444 2445 2446 2447 2448 2449 2450 2451
static void gfar_timeout(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	dev->stats.tx_errors++;
	schedule_work(&priv->reset_task);
}

E
Eran Liberty 已提交
2452 2453 2454 2455 2456 2457 2458 2459 2460
static void gfar_align_skb(struct sk_buff *skb)
{
	/* We need the data buffer to be aligned properly.  We will reserve
	 * as many bytes as needed to align the data properly
	 */
	skb_reserve(skb, RXBUF_ALIGNMENT -
		(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
}

L
Linus Torvalds 已提交
2461
/* Interrupt Handler for Transmit complete */
2462
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
2463
{
2464
	struct net_device *dev = tx_queue->dev;
2465
	struct netdev_queue *txq;
D
Dai Haruki 已提交
2466
	struct gfar_private *priv = netdev_priv(dev);
2467
	struct gfar_priv_rx_q *rx_queue = NULL;
2468
	struct txbd8 *bdp, *next = NULL;
D
Dai Haruki 已提交
2469
	struct txbd8 *lbdp = NULL;
2470
	struct txbd8 *base = tx_queue->tx_bd_base;
D
Dai Haruki 已提交
2471 2472
	struct sk_buff *skb;
	int skb_dirtytx;
2473
	int tx_ring_size = tx_queue->tx_ring_size;
2474
	int frags = 0, nr_txbds = 0;
D
Dai Haruki 已提交
2475
	int i;
D
Dai Haruki 已提交
2476
	int howmany = 0;
2477 2478
	int tqi = tx_queue->qindex;
	unsigned int bytes_sent = 0;
D
Dai Haruki 已提交
2479
	u32 lstatus;
2480
	size_t buflen;
L
Linus Torvalds 已提交
2481

2482 2483
	rx_queue = priv->rx_queue[tqi];
	txq = netdev_get_tx_queue(dev, tqi);
2484 2485
	bdp = tx_queue->dirty_tx;
	skb_dirtytx = tx_queue->skb_dirtytx;
L
Linus Torvalds 已提交
2486

2487
	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
A
Anton Vorontsov 已提交
2488 2489
		unsigned long flags;

D
Dai Haruki 已提交
2490
		frags = skb_shinfo(skb)->nr_frags;
2491 2492 2493 2494 2495

		/*
		 * When time stamping, one additional TxBD must be freed.
		 * Also, we need to dma_unmap_single() the TxPAL.
		 */
2496
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2497 2498 2499 2500 2501
			nr_txbds = frags + 2;
		else
			nr_txbds = frags + 1;

		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
L
Linus Torvalds 已提交
2502

D
Dai Haruki 已提交
2503
		lstatus = lbdp->lstatus;
L
Linus Torvalds 已提交
2504

D
Dai Haruki 已提交
2505 2506 2507 2508 2509
		/* Only clean completed frames */
		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
				(lstatus & BD_LENGTH_MASK))
			break;

2510
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2511
			next = next_txbd(bdp, base, tx_ring_size);
2512
			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2513 2514 2515 2516 2517 2518
		} else
			buflen = bdp->length;

		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
				buflen, DMA_TO_DEVICE);

2519
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2520 2521 2522 2523
			struct skb_shared_hwtstamps shhwtstamps;
			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2524
			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2525 2526 2527 2528
			skb_tstamp_tx(skb, &shhwtstamps);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next;
		}
A
Andy Fleming 已提交
2529

D
Dai Haruki 已提交
2530 2531
		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
		bdp = next_txbd(bdp, base, tx_ring_size);
D
Dai Haruki 已提交
2532

D
Dai Haruki 已提交
2533
		for (i = 0; i < frags; i++) {
2534
			dma_unmap_page(&priv->ofdev->dev,
D
Dai Haruki 已提交
2535 2536 2537 2538 2539 2540
					bdp->bufPtr,
					bdp->length,
					DMA_TO_DEVICE);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next_txbd(bdp, base, tx_ring_size);
		}
L
Linus Torvalds 已提交
2541

2542 2543
		bytes_sent += skb->len;

2544 2545 2546 2547
		/*
		 * If there's room in the queue (limit it to rx_buffer_size)
		 * we add this skb back into the pool, if it's the right size
		 */
2548
		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2549
				skb_recycle_check(skb, priv->rx_buffer_size +
E
Eran Liberty 已提交
2550 2551
					RXBUF_ALIGNMENT)) {
			gfar_align_skb(skb);
2552
			skb_queue_head(&priv->rx_recycle, skb);
E
Eran Liberty 已提交
2553
		} else
2554 2555
			dev_kfree_skb_any(skb);

2556
		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
D
Dai Haruki 已提交
2557

D
Dai Haruki 已提交
2558 2559 2560 2561
		skb_dirtytx = (skb_dirtytx + 1) &
			TX_RING_MOD_MASK(tx_ring_size);

		howmany++;
A
Anton Vorontsov 已提交
2562
		spin_lock_irqsave(&tx_queue->txlock, flags);
2563
		tx_queue->num_txbdfree += nr_txbds;
A
Anton Vorontsov 已提交
2564
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
2565
	}
L
Linus Torvalds 已提交
2566

D
Dai Haruki 已提交
2567
	/* If we freed a buffer, we can restart transmission, if necessary */
2568 2569
	if (__netif_subqueue_stopped(dev, tqi) && tx_queue->num_txbdfree)
		netif_wake_subqueue(dev, tqi);
L
Linus Torvalds 已提交
2570

D
Dai Haruki 已提交
2571
	/* Update dirty indicators */
2572 2573
	tx_queue->skb_dirtytx = skb_dirtytx;
	tx_queue->dirty_tx = bdp;
L
Linus Torvalds 已提交
2574

2575 2576
	netdev_tx_completed_queue(txq, howmany, bytes_sent);

D
Dai Haruki 已提交
2577 2578 2579
	return howmany;
}

2580
static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
D
Dai Haruki 已提交
2581
{
2582 2583
	unsigned long flags;

2584 2585
	spin_lock_irqsave(&gfargrp->grplock, flags);
	if (napi_schedule_prep(&gfargrp->napi)) {
2586
		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2587
		__napi_schedule(&gfargrp->napi);
2588 2589 2590 2591 2592
	} else {
		/*
		 * Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
2593
		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2594
	}
2595
	spin_unlock_irqrestore(&gfargrp->grplock, flags);
2596

2597
}
L
Linus Torvalds 已提交
2598

2599
/* Interrupt Handler for Transmit complete */
2600
static irqreturn_t gfar_transmit(int irq, void *grp_id)
2601
{
2602
	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
L
Linus Torvalds 已提交
2603 2604 2605
	return IRQ_HANDLED;
}

2606
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2607 2608
		struct sk_buff *skb)
{
2609
	struct net_device *dev = rx_queue->dev;
2610
	struct gfar_private *priv = netdev_priv(dev);
2611
	dma_addr_t buf;
2612

2613 2614
	buf = dma_map_single(&priv->ofdev->dev, skb->data,
			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2615
	gfar_init_rxbdp(rx_queue, bdp, buf);
2616 2617
}

E
Eran Liberty 已提交
2618
static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
L
Linus Torvalds 已提交
2619 2620 2621 2622
{
	struct gfar_private *priv = netdev_priv(dev);
	struct sk_buff *skb = NULL;

E
Eran Liberty 已提交
2623
	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2624
	if (!skb)
L
Linus Torvalds 已提交
2625 2626
		return NULL;

E
Eran Liberty 已提交
2627
	gfar_align_skb(skb);
2628

E
Eran Liberty 已提交
2629 2630 2631 2632 2633 2634 2635 2636
	return skb;
}

struct sk_buff * gfar_new_skb(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	struct sk_buff *skb = NULL;

2637
	skb = skb_dequeue(&priv->rx_recycle);
E
Eran Liberty 已提交
2638 2639
	if (!skb)
		skb = gfar_alloc_skb(dev);
L
Linus Torvalds 已提交
2640 2641 2642 2643

	return skb;
}

2644
static inline void count_errors(unsigned short status, struct net_device *dev)
L
Linus Torvalds 已提交
2645
{
2646
	struct gfar_private *priv = netdev_priv(dev);
2647
	struct net_device_stats *stats = &dev->stats;
L
Linus Torvalds 已提交
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
	struct gfar_extra_stats *estats = &priv->extra_stats;

	/* If the packet was truncated, none of the other errors
	 * matter */
	if (status & RXBD_TRUNCATED) {
		stats->rx_length_errors++;

		estats->rx_trunc++;

		return;
	}
	/* Count the errors, if there were any */
	if (status & (RXBD_LARGE | RXBD_SHORT)) {
		stats->rx_length_errors++;

		if (status & RXBD_LARGE)
			estats->rx_large++;
		else
			estats->rx_short++;
	}
	if (status & RXBD_NONOCTET) {
		stats->rx_frame_errors++;
		estats->rx_nonoctet++;
	}
	if (status & RXBD_CRCERR) {
		estats->rx_crcerr++;
		stats->rx_crc_errors++;
	}
	if (status & RXBD_OVERRUN) {
		estats->rx_overrun++;
		stats->rx_crc_errors++;
	}
}

2682
irqreturn_t gfar_receive(int irq, void *grp_id)
L
Linus Torvalds 已提交
2683
{
2684
	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
L
Linus Torvalds 已提交
2685 2686 2687
	return IRQ_HANDLED;
}

2688 2689 2690 2691 2692
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
	/* If valid headers were found, and valid sums
	 * were verified, then we tell the kernel that no
	 * checksumming is necessary.  Otherwise, it is */
2693
	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2694 2695
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
2696
		skb_checksum_none_assert(skb);
2697 2698 2699
}


L
Linus Torvalds 已提交
2700 2701 2702
/* gfar_process_frame() -- handle one incoming packet if skb
 * isn't NULL.  */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2703
			      int amount_pull)
L
Linus Torvalds 已提交
2704 2705
{
	struct gfar_private *priv = netdev_priv(dev);
2706
	struct rxfcb *fcb = NULL;
L
Linus Torvalds 已提交
2707

2708
	int ret;
L
Linus Torvalds 已提交
2709

2710 2711
	/* fcb is at the beginning if exists */
	fcb = (struct rxfcb *)skb->data;
2712

2713 2714
	/* Remove the FCB from the skb */
	/* Remove the padded bytes, if there are any */
2715 2716
	if (amount_pull) {
		skb_record_rx_queue(skb, fcb->rq);
2717
		skb_pull(skb, amount_pull);
2718
	}
2719

2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730
	/* Get receive timestamp from the skb */
	if (priv->hwts_rx_en) {
		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
		u64 *ns = (u64 *) skb->data;
		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
	}

	if (priv->padding)
		skb_pull(skb, priv->padding);

2731
	if (dev->features & NETIF_F_RXCSUM)
2732
		gfar_rx_checksum(skb, fcb);
2733

2734 2735
	/* Tell the skb what kind of packet this is */
	skb->protocol = eth_type_trans(skb, dev);
L
Linus Torvalds 已提交
2736

2737 2738 2739 2740 2741 2742 2743
	/*
	 * There's need to check for NETIF_F_HW_VLAN_RX here.
	 * Even if vlan rx accel is disabled, on some chips
	 * RXFCB_VLN is pseudo randomly set.
	 */
	if (dev->features & NETIF_F_HW_VLAN_RX &&
	    fcb->flags & RXFCB_VLN)
J
Jiri Pirko 已提交
2744 2745
		__vlan_hwaccel_put_tag(skb, fcb->vlctl);

2746
	/* Send the packet up the stack */
J
Jiri Pirko 已提交
2747
	ret = netif_receive_skb(skb);
2748

2749 2750
	if (NET_RX_DROP == ret)
		priv->extra_stats.kernel_dropped++;
L
Linus Torvalds 已提交
2751 2752 2753 2754 2755

	return 0;
}

/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2756
 *   until the budget/quota has been reached. Returns the number
L
Linus Torvalds 已提交
2757 2758
 *   of frames handled
 */
2759
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
L
Linus Torvalds 已提交
2760
{
2761
	struct net_device *dev = rx_queue->dev;
2762
	struct rxbd8 *bdp, *base;
L
Linus Torvalds 已提交
2763
	struct sk_buff *skb;
2764 2765
	int pkt_len;
	int amount_pull;
L
Linus Torvalds 已提交
2766 2767 2768 2769
	int howmany = 0;
	struct gfar_private *priv = netdev_priv(dev);

	/* Get the first full descriptor */
2770 2771
	bdp = rx_queue->cur_rx;
	base = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
2772

2773
	amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2774

L
Linus Torvalds 已提交
2775
	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2776
		struct sk_buff *newskb;
2777
		rmb();
2778 2779 2780 2781

		/* Add another skb for the future */
		newskb = gfar_new_skb(dev);

2782
		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
L
Linus Torvalds 已提交
2783

2784
		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
A
Andy Fleming 已提交
2785 2786
				priv->rx_buffer_size, DMA_FROM_DEVICE);

2787 2788 2789 2790
		if (unlikely(!(bdp->status & RXBD_ERR) &&
				bdp->length > priv->rx_buffer_size))
			bdp->status = RXBD_LARGE;

2791 2792 2793 2794 2795 2796 2797
		/* We drop the frame if we failed to allocate a new buffer */
		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
				 bdp->status & RXBD_ERR)) {
			count_errors(bdp->status, dev);

			if (unlikely(!newskb))
				newskb = skb;
E
Eran Liberty 已提交
2798
			else if (skb)
2799
				skb_queue_head(&priv->rx_recycle, skb);
2800
		} else {
L
Linus Torvalds 已提交
2801
			/* Increment the number of packets */
S
Sandeep Gopalpet 已提交
2802
			rx_queue->stats.rx_packets++;
L
Linus Torvalds 已提交
2803 2804
			howmany++;

2805 2806 2807 2808
			if (likely(skb)) {
				pkt_len = bdp->length - ETH_FCS_LEN;
				/* Remove the FCS from the packet length */
				skb_put(skb, pkt_len);
S
Sandeep Gopalpet 已提交
2809
				rx_queue->stats.rx_bytes += pkt_len;
2810
				skb_record_rx_queue(skb, rx_queue->qindex);
2811 2812 2813
				gfar_process_frame(dev, skb, amount_pull);

			} else {
2814
				netif_warn(priv, rx_err, dev, "Missing skb!\n");
S
Sandeep Gopalpet 已提交
2815
				rx_queue->stats.rx_dropped++;
2816 2817
				priv->extra_stats.rx_skbmissing++;
			}
L
Linus Torvalds 已提交
2818 2819 2820

		}

2821
		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
L
Linus Torvalds 已提交
2822

2823
		/* Setup the new bdp */
2824
		gfar_new_rxbdp(rx_queue, bdp, newskb);
L
Linus Torvalds 已提交
2825 2826

		/* Update to the next pointer */
2827
		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2828 2829

		/* update to point at the next skb */
2830 2831 2832
		rx_queue->skb_currx =
		    (rx_queue->skb_currx + 1) &
		    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2833 2834 2835
	}

	/* Update the current rxbd pointer to be the next one */
2836
	rx_queue->cur_rx = bdp;
L
Linus Torvalds 已提交
2837 2838 2839 2840

	return howmany;
}

2841
static int gfar_poll(struct napi_struct *napi, int budget)
L
Linus Torvalds 已提交
2842
{
2843 2844 2845
	struct gfar_priv_grp *gfargrp = container_of(napi,
			struct gfar_priv_grp, napi);
	struct gfar_private *priv = gfargrp->priv;
2846
	struct gfar __iomem *regs = gfargrp->regs;
2847
	struct gfar_priv_tx_q *tx_queue = NULL;
2848 2849
	struct gfar_priv_rx_q *rx_queue = NULL;
	int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2850 2851
	int tx_cleaned = 0, i, left_over_budget = budget;
	unsigned long serviced_queues = 0;
2852
	int num_queues = 0;
D
Dai Haruki 已提交
2853

2854 2855 2856
	num_queues = gfargrp->num_rx_queues;
	budget_per_queue = budget/num_queues;

2857 2858
	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived */
2859
	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2860

2861
	while (num_queues && left_over_budget) {
L
Linus Torvalds 已提交
2862

2863 2864 2865
		budget_per_queue = left_over_budget/num_queues;
		left_over_budget = 0;

2866
		for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2867 2868 2869 2870 2871
			if (test_bit(i, &serviced_queues))
				continue;
			rx_queue = priv->rx_queue[i];
			tx_queue = priv->tx_queue[rx_queue->qindex];

A
Anton Vorontsov 已提交
2872
			tx_cleaned += gfar_clean_tx_ring(tx_queue);
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
			rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
							budget_per_queue);
			rx_cleaned += rx_cleaned_per_queue;
			if(rx_cleaned_per_queue < budget_per_queue) {
				left_over_budget = left_over_budget +
					(budget_per_queue - rx_cleaned_per_queue);
				set_bit(i, &serviced_queues);
				num_queues--;
			}
		}
	}
L
Linus Torvalds 已提交
2884

2885 2886 2887 2888
	if (tx_cleaned)
		return budget;

	if (rx_cleaned < budget) {
2889
		napi_complete(napi);
L
Linus Torvalds 已提交
2890 2891

		/* Clear the halt bit in RSTAT */
2892
		gfar_write(&regs->rstat, gfargrp->rstat);
L
Linus Torvalds 已提交
2893

2894
		gfar_write(&regs->imask, IMASK_DEFAULT);
L
Linus Torvalds 已提交
2895 2896 2897

		/* If we are coalescing interrupts, update the timer */
		/* Otherwise, clear it */
2898 2899
		gfar_configure_coalescing(priv,
				gfargrp->rx_bit_map, gfargrp->tx_bit_map);
L
Linus Torvalds 已提交
2900 2901
	}

2902
	return rx_cleaned;
L
Linus Torvalds 已提交
2903 2904
}

2905 2906 2907 2908 2909 2910 2911 2912 2913
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
 * Polling 'interrupt' - used by things like netconsole to send skbs
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void gfar_netpoll(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2914
	int i = 0;
2915 2916

	/* If the device has multiple interrupts, run tx/rx */
2917
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
		for (i = 0; i < priv->num_grps; i++) {
			disable_irq(priv->gfargrp[i].interruptTransmit);
			disable_irq(priv->gfargrp[i].interruptReceive);
			disable_irq(priv->gfargrp[i].interruptError);
			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
						&priv->gfargrp[i]);
			enable_irq(priv->gfargrp[i].interruptError);
			enable_irq(priv->gfargrp[i].interruptReceive);
			enable_irq(priv->gfargrp[i].interruptTransmit);
		}
2928
	} else {
2929 2930 2931 2932 2933
		for (i = 0; i < priv->num_grps; i++) {
			disable_irq(priv->gfargrp[i].interruptTransmit);
			gfar_interrupt(priv->gfargrp[i].interruptTransmit,
						&priv->gfargrp[i]);
			enable_irq(priv->gfargrp[i].interruptTransmit);
2934
		}
2935 2936 2937 2938
	}
}
#endif

L
Linus Torvalds 已提交
2939
/* The interrupt handler for devices with one interrupt */
2940
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
L
Linus Torvalds 已提交
2941
{
2942
	struct gfar_priv_grp *gfargrp = grp_id;
L
Linus Torvalds 已提交
2943 2944

	/* Save ievent for future reference */
2945
	u32 events = gfar_read(&gfargrp->regs->ievent);
L
Linus Torvalds 已提交
2946 2947

	/* Check for reception */
2948
	if (events & IEVENT_RX_MASK)
2949
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
2950 2951

	/* Check for transmit completion */
2952
	if (events & IEVENT_TX_MASK)
2953
		gfar_transmit(irq, grp_id);
L
Linus Torvalds 已提交
2954

2955 2956
	/* Check for errors */
	if (events & IEVENT_ERR_MASK)
2957
		gfar_error(irq, grp_id);
L
Linus Torvalds 已提交
2958 2959 2960 2961 2962 2963

	return IRQ_HANDLED;
}

/* Called every time the controller might need to be made
 * aware of new link state.  The PHY code conveys this
2964
 * information through variables in the phydev structure, and this
L
Linus Torvalds 已提交
2965 2966 2967 2968 2969 2970
 * function converts those variables into the appropriate
 * register values, and can bring down the device if needed.
 */
static void adjust_link(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2971
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
2972 2973 2974 2975
	unsigned long flags;
	struct phy_device *phydev = priv->phydev;
	int new_state = 0;

2976 2977 2978
	local_irq_save(flags);
	lock_tx_qs(priv);

2979 2980
	if (phydev->link) {
		u32 tempval = gfar_read(&regs->maccfg2);
2981
		u32 ecntrl = gfar_read(&regs->ecntrl);
L
Linus Torvalds 已提交
2982 2983 2984

		/* Now we make sure that we can be in full duplex mode.
		 * If not, we operate in half-duplex mode. */
2985 2986 2987
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
L
Linus Torvalds 已提交
2988
				tempval &= ~(MACCFG2_FULL_DUPLEX);
2989
			else
L
Linus Torvalds 已提交
2990 2991
				tempval |= MACCFG2_FULL_DUPLEX;

2992
			priv->oldduplex = phydev->duplex;
L
Linus Torvalds 已提交
2993 2994
		}

2995 2996 2997
		if (phydev->speed != priv->oldspeed) {
			new_state = 1;
			switch (phydev->speed) {
L
Linus Torvalds 已提交
2998 2999 3000
			case 1000:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3001 3002

				ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3003 3004 3005 3006 3007
				break;
			case 100:
			case 10:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3008 3009 3010 3011 3012 3013 3014

				/* Reduced mode distinguishes
				 * between 10 and 100 */
				if (phydev->speed == SPEED_100)
					ecntrl |= ECNTRL_R100;
				else
					ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3015 3016
				break;
			default:
3017 3018 3019
				netif_warn(priv, link, dev,
					   "Ack!  Speed (%d) is not 10/100/1000!\n",
					   phydev->speed);
L
Linus Torvalds 已提交
3020 3021 3022
				break;
			}

3023
			priv->oldspeed = phydev->speed;
L
Linus Torvalds 已提交
3024 3025
		}

3026
		gfar_write(&regs->maccfg2, tempval);
3027
		gfar_write(&regs->ecntrl, ecntrl);
3028

L
Linus Torvalds 已提交
3029
		if (!priv->oldlink) {
3030
			new_state = 1;
L
Linus Torvalds 已提交
3031 3032
			priv->oldlink = 1;
		}
3033 3034 3035 3036 3037
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
		priv->oldspeed = 0;
		priv->oldduplex = -1;
L
Linus Torvalds 已提交
3038 3039
	}

3040 3041
	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);
3042 3043
	unlock_tx_qs(priv);
	local_irq_restore(flags);
3044
}
L
Linus Torvalds 已提交
3045 3046 3047 3048 3049 3050 3051

/* Update the hash table based on the current list of multicast
 * addresses we subscribe to.  Also, change the promiscuity of
 * the device based on the flags (this function is called
 * whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
3052
	struct netdev_hw_addr *ha;
L
Linus Torvalds 已提交
3053
	struct gfar_private *priv = netdev_priv(dev);
3054
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
3055 3056
	u32 tempval;

3057
	if (dev->flags & IFF_PROMISC) {
L
Linus Torvalds 已提交
3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
		/* Set RCTRL to PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval |= RCTRL_PROM;
		gfar_write(&regs->rctrl, tempval);
	} else {
		/* Set RCTRL to not PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval &= ~(RCTRL_PROM);
		gfar_write(&regs->rctrl, tempval);
	}
3068

3069
	if (dev->flags & IFF_ALLMULTI) {
L
Linus Torvalds 已提交
3070
		/* Set the hash to rx all multicast frames */
3071 3072 3073 3074 3075 3076 3077 3078
		gfar_write(&regs->igaddr0, 0xffffffff);
		gfar_write(&regs->igaddr1, 0xffffffff);
		gfar_write(&regs->igaddr2, 0xffffffff);
		gfar_write(&regs->igaddr3, 0xffffffff);
		gfar_write(&regs->igaddr4, 0xffffffff);
		gfar_write(&regs->igaddr5, 0xffffffff);
		gfar_write(&regs->igaddr6, 0xffffffff);
		gfar_write(&regs->igaddr7, 0xffffffff);
L
Linus Torvalds 已提交
3079 3080 3081 3082 3083 3084 3085 3086 3087
		gfar_write(&regs->gaddr0, 0xffffffff);
		gfar_write(&regs->gaddr1, 0xffffffff);
		gfar_write(&regs->gaddr2, 0xffffffff);
		gfar_write(&regs->gaddr3, 0xffffffff);
		gfar_write(&regs->gaddr4, 0xffffffff);
		gfar_write(&regs->gaddr5, 0xffffffff);
		gfar_write(&regs->gaddr6, 0xffffffff);
		gfar_write(&regs->gaddr7, 0xffffffff);
	} else {
3088 3089 3090
		int em_num;
		int idx;

L
Linus Torvalds 已提交
3091
		/* zero out the hash */
3092 3093 3094 3095 3096 3097 3098 3099
		gfar_write(&regs->igaddr0, 0x0);
		gfar_write(&regs->igaddr1, 0x0);
		gfar_write(&regs->igaddr2, 0x0);
		gfar_write(&regs->igaddr3, 0x0);
		gfar_write(&regs->igaddr4, 0x0);
		gfar_write(&regs->igaddr5, 0x0);
		gfar_write(&regs->igaddr6, 0x0);
		gfar_write(&regs->igaddr7, 0x0);
L
Linus Torvalds 已提交
3100 3101 3102 3103 3104 3105 3106 3107 3108
		gfar_write(&regs->gaddr0, 0x0);
		gfar_write(&regs->gaddr1, 0x0);
		gfar_write(&regs->gaddr2, 0x0);
		gfar_write(&regs->gaddr3, 0x0);
		gfar_write(&regs->gaddr4, 0x0);
		gfar_write(&regs->gaddr5, 0x0);
		gfar_write(&regs->gaddr6, 0x0);
		gfar_write(&regs->gaddr7, 0x0);

3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120
		/* If we have extended hash tables, we need to
		 * clear the exact match registers to prepare for
		 * setting them */
		if (priv->extended_hash) {
			em_num = GFAR_EM_NUM + 1;
			gfar_clear_exact_match(dev);
			idx = 1;
		} else {
			idx = 0;
			em_num = 0;
		}

3121
		if (netdev_mc_empty(dev))
L
Linus Torvalds 已提交
3122 3123 3124
			return;

		/* Parse the list, and set the appropriate bits */
3125
		netdev_for_each_mc_addr(ha, dev) {
3126
			if (idx < em_num) {
3127
				gfar_set_mac_for_addr(dev, idx, ha->addr);
3128 3129
				idx++;
			} else
3130
				gfar_set_hash_for_addr(dev, ha->addr);
L
Linus Torvalds 已提交
3131 3132 3133 3134
		}
	}
}

3135 3136 3137 3138 3139 3140

/* Clears each of the exact match registers to zero, so they
 * don't interfere with normal reception */
static void gfar_clear_exact_match(struct net_device *dev)
{
	int idx;
3141
	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3142 3143

	for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
J
Joe Perches 已提交
3144
		gfar_set_mac_for_addr(dev, idx, zero_arr);
3145 3146
}

L
Linus Torvalds 已提交
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
 * the entry. */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
3164
	u32 result = ether_crc(ETH_ALEN, addr);
3165 3166 3167
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
L
Linus Torvalds 已提交
3168 3169
	u32 value = (1 << (31-whichbit));

3170
	tempval = gfar_read(priv->hash_regs[whichreg]);
L
Linus Torvalds 已提交
3171
	tempval |= value;
3172
	gfar_write(priv->hash_regs[whichreg], tempval);
L
Linus Torvalds 已提交
3173 3174
}

3175 3176 3177 3178

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
J
Joe Perches 已提交
3179 3180
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr)
3181 3182
{
	struct gfar_private *priv = netdev_priv(dev);
3183
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3184
	int idx;
3185
	char tmpbuf[ETH_ALEN];
3186
	u32 tempval;
3187
	u32 __iomem *macptr = &regs->macstnaddr1;
3188 3189 3190 3191 3192

	macptr += num*2;

	/* Now copy it into the mac registers backwards, cuz */
	/* little endian is silly */
3193 3194
	for (idx = 0; idx < ETH_ALEN; idx++)
		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3195 3196 3197 3198 3199 3200 3201 3202

	gfar_write(macptr, *((u32 *) (tmpbuf)));

	tempval = *((u32 *) (tmpbuf + 4));

	gfar_write(macptr+1, tempval);
}

L
Linus Torvalds 已提交
3203
/* GFAR error interrupt handler */
3204
static irqreturn_t gfar_error(int irq, void *grp_id)
L
Linus Torvalds 已提交
3205
{
3206 3207 3208 3209
	struct gfar_priv_grp *gfargrp = grp_id;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_private *priv= gfargrp->priv;
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
3210 3211

	/* Save ievent for future reference */
3212
	u32 events = gfar_read(&regs->ievent);
L
Linus Torvalds 已提交
3213 3214

	/* Clear IEVENT */
3215
	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3216 3217

	/* Magic Packet is not an error. */
3218
	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3219 3220
	    (events & IEVENT_MAG))
		events &= ~IEVENT_MAG;
L
Linus Torvalds 已提交
3221 3222

	/* Hmm... */
3223
	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3224 3225
		netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
			   events, gfar_read(&regs->imask));
L
Linus Torvalds 已提交
3226 3227 3228

	/* Update the error counters */
	if (events & IEVENT_TXE) {
3229
		dev->stats.tx_errors++;
L
Linus Torvalds 已提交
3230 3231

		if (events & IEVENT_LC)
3232
			dev->stats.tx_window_errors++;
L
Linus Torvalds 已提交
3233
		if (events & IEVENT_CRL)
3234
			dev->stats.tx_aborted_errors++;
L
Linus Torvalds 已提交
3235
		if (events & IEVENT_XFUN) {
3236 3237
			unsigned long flags;

3238 3239
			netif_dbg(priv, tx_err, dev,
				  "TX FIFO underrun, packet dropped\n");
3240
			dev->stats.tx_dropped++;
L
Linus Torvalds 已提交
3241 3242
			priv->extra_stats.tx_underrun++;

3243 3244 3245
			local_irq_save(flags);
			lock_tx_qs(priv);

L
Linus Torvalds 已提交
3246
			/* Reactivate the Tx Queues */
3247
			gfar_write(&regs->tstat, gfargrp->tstat);
3248 3249 3250

			unlock_tx_qs(priv);
			local_irq_restore(flags);
L
Linus Torvalds 已提交
3251
		}
3252
		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
L
Linus Torvalds 已提交
3253 3254
	}
	if (events & IEVENT_BSY) {
3255
		dev->stats.rx_errors++;
L
Linus Torvalds 已提交
3256 3257
		priv->extra_stats.rx_bsy++;

3258
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3259

3260 3261
		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
			  gfar_read(&regs->rstat));
L
Linus Torvalds 已提交
3262 3263
	}
	if (events & IEVENT_BABR) {
3264
		dev->stats.rx_errors++;
L
Linus Torvalds 已提交
3265 3266
		priv->extra_stats.rx_babr++;

3267
		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
L
Linus Torvalds 已提交
3268 3269 3270
	}
	if (events & IEVENT_EBERR) {
		priv->extra_stats.eberr++;
3271
		netif_dbg(priv, rx_err, dev, "bus error\n");
L
Linus Torvalds 已提交
3272
	}
3273 3274
	if (events & IEVENT_RXC)
		netif_dbg(priv, rx_status, dev, "control frame\n");
L
Linus Torvalds 已提交
3275 3276 3277

	if (events & IEVENT_BABT) {
		priv->extra_stats.tx_babt++;
3278
		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
L
Linus Torvalds 已提交
3279 3280 3281 3282
	}
	return IRQ_HANDLED;
}

3283 3284 3285 3286 3287 3288
static struct of_device_id gfar_match[] =
{
	{
		.type = "network",
		.compatible = "gianfar",
	},
3289 3290 3291
	{
		.compatible = "fsl,etsec2",
	},
3292 3293
	{},
};
3294
MODULE_DEVICE_TABLE(of, gfar_match);
3295

L
Linus Torvalds 已提交
3296
/* Structure for a device driver */
3297
static struct platform_driver gfar_driver = {
3298 3299 3300 3301 3302 3303
	.driver = {
		.name = "fsl-gianfar",
		.owner = THIS_MODULE,
		.pm = GFAR_PM_OPS,
		.of_match_table = gfar_match,
	},
L
Linus Torvalds 已提交
3304 3305 3306 3307
	.probe = gfar_probe,
	.remove = gfar_remove,
};

3308
module_platform_driver(gfar_driver);