gianfar.c 89.9 KB
Newer Older
J
Jan Ceuleers 已提交
1
/* drivers/net/ethernet/freescale/gianfar.c
L
Linus Torvalds 已提交
2 3
 *
 * Gianfar Ethernet Driver
4 5
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
L
Linus Torvalds 已提交
6 7 8
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
9
 * Maintainer: Kumar Gala
10
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
L
Linus Torvalds 已提交
11
 *
W
Wu Jiajun-B06378 已提交
12
 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13
 * Copyright 2007 MontaVista Software, Inc.
L
Linus Torvalds 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
27
 *
28 29
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
L
Linus Torvalds 已提交
30 31 32
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
33 34
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
L
Linus Torvalds 已提交
35 36 37
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
38
 *  IEVENT register is set, triggering an interrupt when the
L
Linus Torvalds 已提交
39 40 41
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
42
 *  of frames or amount of time have passed).  In NAPI, the
L
Linus Torvalds 已提交
43
 *  interrupt handler will signal there is work to be done, and
44
 *  exit. This method will start at the last known empty
45
 *  descriptor, and process every subsequent descriptor until there
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

64 65 66
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

L
Linus Torvalds 已提交
67 68 69
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
70
#include <linux/unistd.h>
L
Linus Torvalds 已提交
71 72 73 74 75 76
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
77
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
78 79
#include <linux/spinlock.h>
#include <linux/mm.h>
80 81
#include <linux/of_address.h>
#include <linux/of_irq.h>
82
#include <linux/of_mdio.h>
83
#include <linux/of_platform.h>
84 85 86
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
K
Kumar Gala 已提交
87
#include <linux/in.h>
88
#include <linux/net_tstamp.h>
L
Linus Torvalds 已提交
89 90

#include <asm/io.h>
91
#include <asm/reg.h>
92
#include <asm/mpc85xx.h>
L
Linus Torvalds 已提交
93 94 95 96 97
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
98 99
#include <linux/mii.h>
#include <linux/phy.h>
100 101
#include <linux/phy_fixed.h>
#include <linux/of.h>
102
#include <linux/of_net.h>
L
Linus Torvalds 已提交
103 104 105 106 107

#include "gianfar.h"

#define TX_TIMEOUT      (1*HZ)

108
const char gfar_driver_version[] = "1.3";
L
Linus Torvalds 已提交
109 110 111

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
112
static void gfar_reset_task(struct work_struct *work);
L
Linus Torvalds 已提交
113 114
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
115
struct sk_buff *gfar_new_skb(struct net_device *dev);
116
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
117
			   struct sk_buff *skb);
L
Linus Torvalds 已提交
118 119
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
120 121 122
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
L
Linus Torvalds 已提交
123 124 125
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
126
static int gfar_probe(struct platform_device *ofdev);
127
static int gfar_remove(struct platform_device *ofdev);
128
static void free_skb_resources(struct gfar_private *priv);
L
Linus Torvalds 已提交
129 130
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
K
Kapil Juneja 已提交
131
static void gfar_configure_serdes(struct net_device *dev);
132
static int gfar_poll(struct napi_struct *napi, int budget);
133
static int gfar_poll_sq(struct napi_struct *napi, int budget);
134 135 136
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
137
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
C
Claudiu Manoil 已提交
138
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
139 140
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi);
141
void gfar_halt(struct net_device *dev);
142
static void gfar_halt_nodisable(struct net_device *dev);
143 144
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
J
Joe Perches 已提交
145 146
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr);
147
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
L
Linus Torvalds 已提交
148 149 150 151 152

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

153
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
154 155 156 157 158 159 160
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
161
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
162 163 164 165 166 167 168
		lstatus |= BD_LFLAG(RXBD_WRAP);

	eieio();

	bdp->lstatus = lstatus;
}

169
static int gfar_init_bds(struct net_device *ndev)
170
{
171
	struct gfar_private *priv = netdev_priv(ndev);
172 173
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
174 175
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
176
	int i, j;
177

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		/* Initialize some variables in our dev structure */
		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
		tx_queue->dirty_tx = tx_queue->tx_bd_base;
		tx_queue->cur_tx = tx_queue->tx_bd_base;
		tx_queue->skb_curtx = 0;
		tx_queue->skb_dirtytx = 0;

		/* Initialize Transmit Descriptor Ring */
		txbdp = tx_queue->tx_bd_base;
		for (j = 0; j < tx_queue->tx_ring_size; j++) {
			txbdp->lstatus = 0;
			txbdp->bufPtr = 0;
			txbdp++;
		}
194

195 196 197
		/* Set the last descriptor in the ring to indicate wrap */
		txbdp--;
		txbdp->status |= TXBD_WRAP;
198 199
	}

200 201 202 203 204
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->cur_rx = rx_queue->rx_bd_base;
		rx_queue->skb_currx = 0;
		rxbdp = rx_queue->rx_bd_base;
205

206 207
		for (j = 0; j < rx_queue->rx_ring_size; j++) {
			struct sk_buff *skb = rx_queue->rx_skbuff[j];
208

209 210 211 212 213 214
			if (skb) {
				gfar_init_rxbdp(rx_queue, rxbdp,
						rxbdp->bufPtr);
			} else {
				skb = gfar_new_skb(ndev);
				if (!skb) {
215
					netdev_err(ndev, "Can't allocate RX buffers\n");
216
					return -ENOMEM;
217 218 219 220
				}
				rx_queue->rx_skbuff[j] = skb;

				gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 222
			}

223
			rxbdp++;
224 225 226 227 228 229 230 231 232
		}

	}

	return 0;
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
233
	void *vaddr;
234 235
	dma_addr_t addr;
	int i, j, k;
236
	struct gfar_private *priv = netdev_priv(ndev);
237
	struct device *dev = priv->dev;
238 239 240
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

241 242 243 244 245 246 247
	priv->total_tx_ring_size = 0;
	for (i = 0; i < priv->num_tx_queues; i++)
		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;

	priv->total_rx_ring_size = 0;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
248 249

	/* Allocate memory for the buffer descriptors */
250
	vaddr = dma_alloc_coherent(dev,
251 252 253 254 255 256
				   (priv->total_tx_ring_size *
				    sizeof(struct txbd8)) +
				   (priv->total_rx_ring_size *
				    sizeof(struct rxbd8)),
				   &addr, GFP_KERNEL);
	if (!vaddr)
257 258
		return -ENOMEM;

259 260
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
261
		tx_queue->tx_bd_base = vaddr;
262 263 264
		tx_queue->tx_bd_dma_base = addr;
		tx_queue->dev = ndev;
		/* enet DMA only understands physical addresses */
265 266
		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
267
	}
268 269

	/* Start the rx descriptor ring where the tx ring leaves off */
270 271
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
272
		rx_queue->rx_bd_base = vaddr;
273 274
		rx_queue->rx_bd_dma_base = addr;
		rx_queue->dev = ndev;
275 276
		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
277
	}
278 279

	/* Setup the skbuff rings */
280 281
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
282 283 284 285 286
		tx_queue->tx_skbuff =
			kmalloc_array(tx_queue->tx_ring_size,
				      sizeof(*tx_queue->tx_skbuff),
				      GFP_KERNEL);
		if (!tx_queue->tx_skbuff)
287
			goto cleanup;
288

289 290 291
		for (k = 0; k < tx_queue->tx_ring_size; k++)
			tx_queue->tx_skbuff[k] = NULL;
	}
292

293 294
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
295 296 297 298 299
		rx_queue->rx_skbuff =
			kmalloc_array(rx_queue->rx_ring_size,
				      sizeof(*rx_queue->rx_skbuff),
				      GFP_KERNEL);
		if (!rx_queue->rx_skbuff)
300 301 302 303 304
			goto cleanup;

		for (j = 0; j < rx_queue->rx_ring_size; j++)
			rx_queue->rx_skbuff[j] = NULL;
	}
305

306 307
	if (gfar_init_bds(ndev))
		goto cleanup;
308 309 310 311 312 313 314 315

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

316 317
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
318
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
319
	u32 __iomem *baddr;
320 321 322
	int i;

	baddr = &regs->tbase0;
323
	for (i = 0; i < priv->num_tx_queues; i++) {
324
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
325
		baddr += 2;
326 327 328
	}

	baddr = &regs->rbase0;
329
	for (i = 0; i < priv->num_rx_queues; i++) {
330
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
331
		baddr += 2;
332 333 334
	}
}

335 336 337
static void gfar_init_mac(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
338
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
339 340 341 342
	u32 rctrl = 0;
	u32 tctrl = 0;
	u32 attrs = 0;

343 344
	/* write the tx/rx base registers */
	gfar_init_tx_rx_base(priv);
345

346
	/* Configure the coalescing support */
347
	gfar_configure_coalescing_all(priv);
348

349 350 351
	/* set this when rx hw offload (TOE) functions are being used */
	priv->uses_rxfcb = 0;

S
Sandeep Gopalpet 已提交
352
	if (priv->rx_filer_enable) {
353
		rctrl |= RCTRL_FILREN;
S
Sandeep Gopalpet 已提交
354 355 356
		/* Program the RIR0 reg with the required distribution */
		gfar_write(&regs->rir0, DEFAULT_RIR0);
	}
357

358 359 360 361
	/* Restore PROMISC mode */
	if (ndev->flags & IFF_PROMISC)
		rctrl |= RCTRL_PROM;

362
	if (ndev->features & NETIF_F_RXCSUM) {
363
		rctrl |= RCTRL_CHECKSUMMING;
364 365
		priv->uses_rxfcb = 1;
	}
366 367 368 369 370 371 372 373 374 375 376 377 378

	if (priv->extended_hash) {
		rctrl |= RCTRL_EXTHASH;

		gfar_clear_exact_match(ndev);
		rctrl |= RCTRL_EMEN;
	}

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

379 380 381
	/* Insert receive time stamps into padding alignment bytes */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
		rctrl &= ~RCTRL_PAL_MASK;
382
		rctrl |= RCTRL_PADDING(8);
383 384 385
		priv->padding = 8;
	}

386
	/* Enable HW time stamping if requested from user space */
387
	if (priv->hwts_rx_en) {
388
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
389 390
		priv->uses_rxfcb = 1;
	}
391

392
	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
393
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
394 395
		priv->uses_rxfcb = 1;
	}
396 397 398 399 400 401 402

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);

	if (ndev->features & NETIF_F_IP_CSUM)
		tctrl |= TCTRL_INIT_CSUM;

403 404 405 406 407 408 409
	if (priv->prio_sched_en)
		tctrl |= TCTRL_TXSCHED_PRIO;
	else {
		tctrl |= TCTRL_TXSCHED_WRRS;
		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
	}
410

411 412 413 414 415 416 417 418 419
	gfar_write(&regs->tctrl, tctrl);

	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing or locking
J
Jan Ceuleers 已提交
420 421
	 * depending on the approprate variables
	 */
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}

S
Sandeep Gopalpet 已提交
437 438 439 440 441
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
442
	int i;
S
Sandeep Gopalpet 已提交
443 444 445

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
446
		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
S
Sandeep Gopalpet 已提交
447 448 449 450
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
451
	dev->stats.rx_bytes   = rx_bytes;
S
Sandeep Gopalpet 已提交
452 453 454
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
E
Eric Dumazet 已提交
455 456
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
S
Sandeep Gopalpet 已提交
457 458
	}

459
	dev->stats.tx_bytes   = tx_bytes;
S
Sandeep Gopalpet 已提交
460 461 462 463 464
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

465 466 467 468 469
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
470
	.ndo_set_features = gfar_set_features,
471
	.ndo_set_rx_mode = gfar_set_multi,
472 473
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
S
Sandeep Gopalpet 已提交
474
	.ndo_get_stats = gfar_get_stats,
475 476
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
477 478 479 480 481
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

482 483
void lock_rx_qs(struct gfar_private *priv)
{
484
	int i;
485 486 487 488 489 490 491

	for (i = 0; i < priv->num_rx_queues; i++)
		spin_lock(&priv->rx_queue[i]->rxlock);
}

void lock_tx_qs(struct gfar_private *priv)
{
492
	int i;
493 494 495 496 497 498 499

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_lock(&priv->tx_queue[i]->txlock);
}

void unlock_rx_qs(struct gfar_private *priv)
{
500
	int i;
501 502 503 504 505 506 507

	for (i = 0; i < priv->num_rx_queues; i++)
		spin_unlock(&priv->rx_queue[i]->rxlock);
}

void unlock_tx_qs(struct gfar_private *priv)
{
508
	int i;
509 510 511 512 513 514 515

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_unlock(&priv->tx_queue[i]->txlock);
}

static void free_tx_pointers(struct gfar_private *priv)
{
516
	int i;
517 518 519 520 521 522 523

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

static void free_rx_pointers(struct gfar_private *priv)
{
524
	int i;
525 526 527 528 529

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

530 531
static void unmap_group_regs(struct gfar_private *priv)
{
532
	int i;
533 534 535 536 537 538

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

539 540 541 542 543 544 545 546 547 548 549 550 551
static void free_gfar_dev(struct gfar_private *priv)
{
	int i, j;

	for (i = 0; i < priv->num_grps; i++)
		for (j = 0; j < GFAR_NUM_IRQS; j++) {
			kfree(priv->gfargrp[i].irqinfo[j]);
			priv->gfargrp[i].irqinfo[j] = NULL;
		}

	free_netdev(priv->ndev);
}

552 553
static void disable_napi(struct gfar_private *priv)
{
554
	int i;
555 556 557 558 559 560 561

	for (i = 0; i < priv->num_grps; i++)
		napi_disable(&priv->gfargrp[i].napi);
}

static void enable_napi(struct gfar_private *priv)
{
562
	int i;
563 564 565 566 567 568

	for (i = 0; i < priv->num_grps; i++)
		napi_enable(&priv->gfargrp[i].napi);
}

static int gfar_parse_group(struct device_node *np,
569
			    struct gfar_private *priv, const char *model)
570
{
571
	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
572
	u32 *queue_mask;
573 574
	int i;

575 576 577 578
	for (i = 0; i < GFAR_NUM_IRQS; i++) {
		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
					  GFP_KERNEL);
		if (!grp->irqinfo[i])
579 580
			return -ENOMEM;
	}
581

582 583
	grp->regs = of_iomap(np, 0);
	if (!grp->regs)
584 585
		return -ENOMEM;

586
	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
587 588 589

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
590 591 592 593 594
		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
		    gfar_irq(grp, RX)->irq == NO_IRQ ||
		    gfar_irq(grp, ER)->irq == NO_IRQ)
595 596 597
			return -EINVAL;
	}

598 599
	grp->priv = priv;
	spin_lock_init(&grp->grplock);
600 601
	if (priv->mode == MQ_MG_MODE) {
		queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
602
		grp->rx_bit_map = queue_mask ?
603 604
			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
		queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
605
		grp->tx_bit_map = queue_mask ?
606
			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
607
	} else {
608 609
		grp->rx_bit_map = 0xFF;
		grp->tx_bit_map = 0xFF;
610 611 612 613 614 615
	}
	priv->num_grps++;

	return 0;
}

616
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
617 618 619 620
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
621 622 623
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
624
	struct device_node *np = ofdev->dev.of_node;
625
	struct device_node *child = NULL;
A
Andy Fleming 已提交
626 627 628
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
629 630
	unsigned int num_tx_qs, num_rx_qs;
	u32 *tx_queues, *rx_queues;
631 632 633 634

	if (!np || !of_device_is_available(np))
		return -ENODEV;

635 636 637 638 639
	/* parse the num of tx and rx queues */
	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
	num_tx_qs = tx_queues ? *tx_queues : 1;

	if (num_tx_qs > MAX_TX_QS) {
640 641 642
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
643 644 645 646 647 648 649
		return -EINVAL;
	}

	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
	num_rx_qs = rx_queues ? *rx_queues : 1;

	if (num_rx_qs > MAX_RX_QS) {
650 651 652
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
653 654 655 656 657 658 659 660 661 662 663 664
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
	priv->ndev = dev;

	priv->num_tx_queues = num_tx_qs;
665
	netif_set_real_num_rx_queues(dev, num_rx_qs);
666
	priv->num_rx_queues = num_rx_qs;
667
	priv->num_grps = 0x0;
668

J
Jan Ceuleers 已提交
669
	/* Init Rx queue filer rule set linked list */
S
Sebastian Poehn 已提交
670 671 672 673
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

674 675
	model = of_get_property(np, "model", NULL);

676 677
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
678

679 680 681 682 683 684 685
	/* Parse and initialize group specific information */
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		priv->mode = MQ_MG_MODE;
		for_each_child_of_node(np, child) {
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
686
		}
687 688 689
	} else {
		priv->mode = SQ_SG_MODE;
		err = gfar_parse_group(np, priv, model);
690
		if (err)
691
			goto err_grp_init;
692 693
	}

694
	for (i = 0; i < priv->num_tx_queues; i++)
695
		priv->tx_queue[i] = NULL;
696 697 698 699
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->rx_queue[i] = NULL;

	for (i = 0; i < priv->num_tx_queues; i++) {
700 701
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
702 703 704 705 706 707 708 709 710 711 712
		if (!priv->tx_queue[i]) {
			err = -ENOMEM;
			goto tx_alloc_failed;
		}
		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = dev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
713 714
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
715 716 717 718 719 720 721 722 723 724 725
		if (!priv->rx_queue[i]) {
			err = -ENOMEM;
			goto rx_alloc_failed;
		}
		priv->rx_queue[i]->rx_skbuff = NULL;
		priv->rx_queue[i]->qindex = i;
		priv->rx_queue[i]->dev = dev;
		spin_lock_init(&(priv->rx_queue[i]->rxlock));
	}


A
Andy Fleming 已提交
726 727
	stash = of_get_property(np, "bd-stash", NULL);

728
	if (stash) {
A
Andy Fleming 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

746
	mac_addr = of_get_mac_address(np);
747

748
	if (mac_addr)
749
		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
750 751

	if (model && !strcasecmp(model, "TSEC"))
752 753 754 755 756
		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;

757
	if (model && !strcasecmp(model, "eTSEC"))
758 759 760 761 762 763 764 765 766 767
		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
				     FSL_GIANFAR_DEV_HAS_PADDING |
				     FSL_GIANFAR_DEV_HAS_CSUM |
				     FSL_GIANFAR_DEV_HAS_VLAN |
				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
				     FSL_GIANFAR_DEV_HAS_TIMER;
768 769 770 771 772 773 774 775 776 777 778 779

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

780
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
781 782

	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
783
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
784 785 786

	return 0;

787 788 789 790
rx_alloc_failed:
	free_rx_pointers(priv);
tx_alloc_failed:
	free_tx_pointers(priv);
791 792
err_grp_init:
	unmap_group_regs(priv);
793
	free_gfar_dev(priv);
794 795 796
	return err;
}

797
static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
798 799 800 801 802 803 804 805 806 807 808
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

809 810 811 812 813 814 815 816 817 818
	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->hwts_tx_en = 0;
		break;
	case HWTSTAMP_TX_ON:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
		priv->hwts_tx_en = 1;
		break;
	default:
819
		return -ERANGE;
820
	}
821 822 823

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
824 825 826 827 828
		if (priv->hwts_rx_en) {
			stop_gfar(netdev);
			priv->hwts_rx_en = 0;
			startup_gfar(netdev);
		}
829 830 831 832
		break;
	default:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
833 834 835 836 837
		if (!priv->hwts_rx_en) {
			stop_gfar(netdev);
			priv->hwts_rx_en = 1;
			startup_gfar(netdev);
		}
838 839 840 841 842 843 844 845
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
	}

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

846 847 848 849 850 851 852 853 854 855 856 857 858 859
static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	config.flags = 0;
	config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
	config.rx_filter = (priv->hwts_rx_en ?
			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

860 861 862 863 864 865 866
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

867
	if (cmd == SIOCSHWTSTAMP)
868 869 870
		return gfar_hwtstamp_set(dev, rq);
	if (cmd == SIOCGHWTSTAMP)
		return gfar_hwtstamp_get(dev, rq);
871

872 873 874
	if (!priv->phydev)
		return -ENODEV;

875
	return phy_mii_ioctl(priv->phydev, rq, cmd);
876 877
}

878 879 880 881
static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
	unsigned int new_bit_map = 0x0;
	int mask = 0x1 << (max_qs - 1), i;
882

883 884 885 886 887 888 889
	for (i = 0; i < max_qs; i++) {
		if (bit_map & mask)
			new_bit_map = new_bit_map + (1 << i);
		mask = mask >> 0x1;
	}
	return new_bit_map;
}
890

891 892
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
893 894 895 896 897 898
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
W
Wu Jiajun-B06378 已提交
899 900
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
901 902 903 904
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
W
Wu Jiajun-B06378 已提交
905 906
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
907 908 909 910 911
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
912 913
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
914 915 916 917 918
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
W
Wu Jiajun-B06378 已提交
919 920
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
921 922 923 924 925 926 927 928 929 930 931 932 933 934
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
W
Wu Jiajun-B06378 已提交
935 936
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
937 938 939 940 941 942 943 944 945
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

U
Uwe Kleine-König 已提交
946
	/* cur_filer_idx indicated the first non-masked rule */
947 948 949 950 951
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
W
Wu Jiajun-B06378 已提交
952 953
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
954 955 956 957
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

958
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
959 960 961 962 963 964 965 966
{
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
967
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
968 969
		priv->errata |= GFAR_ERRATA_74;

970 971
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
972
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
973 974
		priv->errata |= GFAR_ERRATA_76;

975 976 977 978 979 980 981 982 983 984
	/* MPC8313 Rev < 2.0 */
	if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
		priv->errata |= GFAR_ERRATA_12;
}

static void __gfar_detect_errata_85xx(struct gfar_private *priv)
{
	unsigned int svr = mfspr(SPRN_SVR);

	if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
985
		priv->errata |= GFAR_ERRATA_12;
986 987 988
	if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
	    ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
		priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
989 990 991 992 993 994 995 996 997 998 999 1000 1001
}

static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;

	/* no plans to fix */
	priv->errata |= GFAR_ERRATA_A002;

	if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
		__gfar_detect_errata_85xx(priv);
	else /* non-mpc85xx parts, i.e. e300 core based */
		__gfar_detect_errata_83xx(priv);
1002

1003 1004 1005 1006 1007
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

1008
/* Set up the ethernet device structure, private data,
J
Jan Ceuleers 已提交
1009 1010
 * and anything else we need before we start
 */
1011
static int gfar_probe(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1012 1013 1014 1015
{
	u32 tempval;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
1016
	struct gfar __iomem *regs = NULL;
1017
	int err = 0, i, grp_idx = 0;
1018
	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
1019
	u32 isrg = 0;
1020
	u32 __iomem *baddr;
L
Linus Torvalds 已提交
1021

1022
	err = gfar_of_init(ofdev, &dev);
L
Linus Torvalds 已提交
1023

1024 1025
	if (err)
		return err;
L
Linus Torvalds 已提交
1026 1027

	priv = netdev_priv(dev);
1028 1029
	priv->ndev = dev;
	priv->ofdev = ofdev;
1030
	priv->dev = &ofdev->dev;
1031
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
1032

1033
	spin_lock_init(&priv->bflock);
1034
	INIT_WORK(&priv->reset_task, gfar_reset_task);
L
Linus Torvalds 已提交
1035

1036
	platform_set_drvdata(ofdev, priv);
1037
	regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1038

1039 1040
	gfar_detect_errata(priv);

J
Jan Ceuleers 已提交
1041 1042 1043
	/* Stop the DMA engine now, in case it was running before
	 * (The firmware could have used it, and left it running).
	 */
1044
	gfar_halt(dev);
L
Linus Torvalds 已提交
1045 1046

	/* Reset MAC layer */
1047
	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
L
Linus Torvalds 已提交
1048

1049 1050 1051
	/* We need to delay at least 3 TX clocks */
	udelay(2);

1052 1053 1054 1055 1056 1057 1058 1059
	tempval = 0;
	if (!priv->pause_aneg_en && priv->tx_pause_en)
		tempval |= MACCFG1_TX_FLOW;
	if (!priv->pause_aneg_en && priv->rx_pause_en)
		tempval |= MACCFG1_RX_FLOW;
	/* the soft reset bit is not self-resetting, so we need to
	 * clear it before resuming normal operation
	 */
1060
	gfar_write(&regs->maccfg1, tempval);
L
Linus Torvalds 已提交
1061 1062

	/* Initialize MACCFG2. */
1063 1064 1065 1066
	tempval = MACCFG2_INIT_SETTINGS;
	if (gfar_has_errata(priv, GFAR_ERRATA_74))
		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
	gfar_write(&regs->maccfg2, tempval);
L
Linus Torvalds 已提交
1067 1068

	/* Initialize ECNTRL */
1069
	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
L
Linus Torvalds 已提交
1070 1071

	/* Set the dev->base_addr to the gfar reg region */
1072
	dev->base_addr = (unsigned long) regs;
L
Linus Torvalds 已提交
1073 1074 1075 1076

	/* Fill in the dev structure */
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->mtu = 1500;
1077
	dev->netdev_ops = &gfar_netdev_ops;
1078 1079
	dev->ethtool_ops = &gfar_ethtool_ops;

1080
	/* Register for napi ...We are registering NAPI for each grp */
1081 1082
	if (priv->mode == SQ_SG_MODE)
		netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
1083
			       GFAR_DEV_WEIGHT);
1084 1085 1086 1087
	else
		for (i = 0; i < priv->num_grps; i++)
			netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
				       GFAR_DEV_WEIGHT);
1088

1089
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1090
		dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1091
				   NETIF_F_RXCSUM;
1092
		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1093
				 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1094
	}
1095

J
Jiri Pirko 已提交
1096
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1097 1098 1099
		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
				    NETIF_F_HW_VLAN_CTAG_RX;
		dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
J
Jiri Pirko 已提交
1100
	}
1101

1102
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1103 1104 1105
		priv->extended_hash = 1;
		priv->hash_width = 9;

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;
1122 1123 1124 1125 1126

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

1127 1128 1129 1130 1131 1132 1133 1134
		priv->hash_regs[0] = &regs->gaddr0;
		priv->hash_regs[1] = &regs->gaddr1;
		priv->hash_regs[2] = &regs->gaddr2;
		priv->hash_regs[3] = &regs->gaddr3;
		priv->hash_regs[4] = &regs->gaddr4;
		priv->hash_regs[5] = &regs->gaddr5;
		priv->hash_regs[6] = &regs->gaddr6;
		priv->hash_regs[7] = &regs->gaddr7;
1135 1136
	}

1137
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
1138 1139 1140 1141
		priv->padding = DEFAULT_PADDING;
	else
		priv->padding = 0;

1142
	if (dev->features & NETIF_F_IP_CSUM ||
1143
	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1144
		dev->needed_headroom = GMAC_FCB_LEN;
L
Linus Torvalds 已提交
1145

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
	/* Program the isrg regs only if number of grps > 1 */
	if (priv->num_grps > 1) {
		baddr = &regs->isrg0;
		for (i = 0; i < priv->num_grps; i++) {
			isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
			isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
			gfar_write(baddr, isrg);
			baddr++;
			isrg = 0x0;
		}
	}

1158
	/* Need to reverse the bit maps as  bit_map's MSB is q0
1159
	 * but, for_each_set_bit parses from right to left, which
J
Jan Ceuleers 已提交
1160 1161
	 * basically reverses the queue numbers
	 */
1162
	for (i = 0; i< priv->num_grps; i++) {
1163 1164 1165 1166
		priv->gfargrp[i].tx_bit_map =
			reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
		priv->gfargrp[i].rx_bit_map =
			reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1167 1168 1169
	}

	/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
J
Jan Ceuleers 已提交
1170 1171
	 * also assign queues to groups
	 */
1172 1173
	for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
		priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1174

1175
		for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1176
				 priv->num_rx_queues) {
1177 1178 1179 1180 1181 1182
			priv->gfargrp[grp_idx].num_rx_queues++;
			priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
			rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
			rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
		}
		priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1183

1184
		for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1185
				 priv->num_tx_queues) {
1186 1187 1188 1189 1190 1191 1192 1193
			priv->gfargrp[grp_idx].num_tx_queues++;
			priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
			tstat = tstat | (TSTAT_CLEAR_THALT >> i);
			tqueue = tqueue | (TQUEUE_EN0 >> i);
		}
		priv->gfargrp[grp_idx].rstat = rstat;
		priv->gfargrp[grp_idx].tstat = tstat;
		rstat = tstat =0;
1194 1195 1196 1197 1198
	}

	gfar_write(&regs->rqueue, rqueue);
	gfar_write(&regs->tqueue, tqueue);

L
Linus Torvalds 已提交
1199 1200
	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;

1201
	/* Initializing some of the rx/tx queue level parameters */
1202 1203 1204 1205 1206 1207
	for (i = 0; i < priv->num_tx_queues; i++) {
		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
		priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
		priv->tx_queue[i]->txic = DEFAULT_TXIC;
	}
1208

1209 1210 1211 1212 1213
	for (i = 0; i < priv->num_rx_queues; i++) {
		priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
		priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
		priv->rx_queue[i]->rxic = DEFAULT_RXIC;
	}
L
Linus Torvalds 已提交
1214

J
Jan Ceuleers 已提交
1215
	/* always enable rx filer */
S
Sebastian Poehn 已提交
1216
	priv->rx_filer_enable = 1;
1217 1218
	/* Enable most messages by default */
	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1219 1220 1221
	/* use pritority h/w tx queue scheduling for single queue devices */
	if (priv->num_tx_queues == 1)
		priv->prio_sched_en = 1;
1222

1223 1224 1225
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(dev);

L
Linus Torvalds 已提交
1226 1227 1228
	err = register_netdev(dev);

	if (err) {
1229
		pr_err("%s: Cannot register net device, aborting\n", dev->name);
L
Linus Torvalds 已提交
1230 1231 1232
		goto register_fail;
	}

1233
	device_init_wakeup(&dev->dev,
1234 1235
			   priv->device_flags &
			   FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1236

1237
	/* fill out IRQ number and name fields */
1238
	for (i = 0; i < priv->num_grps; i++) {
1239
		struct gfar_priv_grp *grp = &priv->gfargrp[i];
1240
		if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1241
			sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
1242
				dev->name, "_g", '0' + i, "_tx");
1243
			sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
1244
				dev->name, "_g", '0' + i, "_rx");
1245
			sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
1246
				dev->name, "_g", '0' + i, "_er");
1247
		} else
1248
			strcpy(gfar_irq(grp, TX)->name, dev->name);
1249
	}
1250

1251 1252 1253
	/* Initialize the filer table */
	gfar_init_filer_table(priv);

1254 1255 1256
	/* Create all the sysfs files */
	gfar_init_sysfs(dev);

L
Linus Torvalds 已提交
1257
	/* Print out the device info */
1258
	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
L
Linus Torvalds 已提交
1259

J
Jan Ceuleers 已提交
1260 1261 1262
	/* Even more device info helps when determining which kernel
	 * provided which set of benchmarks.
	 */
1263
	netdev_info(dev, "Running with NAPI enabled\n");
1264
	for (i = 0; i < priv->num_rx_queues; i++)
1265 1266
		netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
			    i, priv->rx_queue[i]->rx_ring_size);
1267
	for (i = 0; i < priv->num_tx_queues; i++)
1268 1269
		netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
			    i, priv->tx_queue[i]->tx_ring_size);
L
Linus Torvalds 已提交
1270 1271 1272 1273

	return 0;

register_fail:
1274
	unmap_group_regs(priv);
1275 1276
	free_tx_pointers(priv);
	free_rx_pointers(priv);
1277 1278 1279 1280
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);
1281
	free_gfar_dev(priv);
1282
	return err;
L
Linus Torvalds 已提交
1283 1284
}

1285
static int gfar_remove(struct platform_device *ofdev)
L
Linus Torvalds 已提交
1286
{
1287
	struct gfar_private *priv = platform_get_drvdata(ofdev);
L
Linus Torvalds 已提交
1288

1289 1290 1291 1292 1293
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);

D
David S. Miller 已提交
1294
	unregister_netdev(priv->ndev);
1295
	unmap_group_regs(priv);
1296
	free_gfar_dev(priv);
L
Linus Torvalds 已提交
1297 1298 1299 1300

	return 0;
}

1301
#ifdef CONFIG_PM
1302 1303

static int gfar_suspend(struct device *dev)
1304
{
1305 1306
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1307
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1308 1309 1310 1311
	unsigned long flags;
	u32 tempval;

	int magic_packet = priv->wol_en &&
1312 1313
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1314

1315
	netif_device_detach(ndev);
1316

1317
	if (netif_running(ndev)) {
1318 1319 1320 1321

		local_irq_save(flags);
		lock_tx_qs(priv);
		lock_rx_qs(priv);
1322

1323
		gfar_halt_nodisable(ndev);
1324 1325

		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
1326
		tempval = gfar_read(&regs->maccfg1);
1327 1328 1329 1330 1331 1332

		tempval &= ~MACCFG1_TX_EN;

		if (!magic_packet)
			tempval &= ~MACCFG1_RX_EN;

1333
		gfar_write(&regs->maccfg1, tempval);
1334

1335 1336 1337
		unlock_rx_qs(priv);
		unlock_tx_qs(priv);
		local_irq_restore(flags);
1338

1339
		disable_napi(priv);
1340 1341 1342

		if (magic_packet) {
			/* Enable interrupt on Magic Packet */
1343
			gfar_write(&regs->imask, IMASK_MAG);
1344 1345

			/* Enable Magic Packet mode */
1346
			tempval = gfar_read(&regs->maccfg2);
1347
			tempval |= MACCFG2_MPEN;
1348
			gfar_write(&regs->maccfg2, tempval);
1349 1350 1351 1352 1353 1354 1355 1356
		} else {
			phy_stop(priv->phydev);
		}
	}

	return 0;
}

1357
static int gfar_resume(struct device *dev)
1358
{
1359 1360
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
1361
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1362 1363 1364
	unsigned long flags;
	u32 tempval;
	int magic_packet = priv->wol_en &&
1365 1366
			   (priv->device_flags &
			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1367

1368 1369
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);
1370 1371 1372 1373 1374 1375 1376 1377 1378
		return 0;
	}

	if (!magic_packet && priv->phydev)
		phy_start(priv->phydev);

	/* Disable Magic Packet mode, in case something
	 * else woke us up.
	 */
1379 1380 1381
	local_irq_save(flags);
	lock_tx_qs(priv);
	lock_rx_qs(priv);
1382

1383
	tempval = gfar_read(&regs->maccfg2);
1384
	tempval &= ~MACCFG2_MPEN;
1385
	gfar_write(&regs->maccfg2, tempval);
1386

1387
	gfar_start(ndev);
1388

1389 1390 1391
	unlock_rx_qs(priv);
	unlock_tx_qs(priv);
	local_irq_restore(flags);
1392

1393 1394
	netif_device_attach(ndev);

1395
	enable_napi(priv);
1396 1397 1398 1399 1400 1401 1402 1403 1404

	return 0;
}

static int gfar_restore(struct device *dev)
{
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;

1405 1406 1407
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);

1408
		return 0;
1409
	}
1410

1411 1412 1413 1414 1415
	if (gfar_init_bds(ndev)) {
		free_skb_resources(priv);
		return -ENOMEM;
	}

1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
	init_registers(ndev);
	gfar_set_mac_address(ndev);
	gfar_init_mac(ndev);
	gfar_start(ndev);

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

	if (priv->phydev)
		phy_start(priv->phydev);
1427

1428
	netif_device_attach(ndev);
1429
	enable_napi(priv);
1430 1431 1432

	return 0;
}
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443

static struct dev_pm_ops gfar_pm_ops = {
	.suspend = gfar_suspend,
	.resume = gfar_resume,
	.freeze = gfar_suspend,
	.thaw = gfar_resume,
	.restore = gfar_restore,
};

#define GFAR_PM_OPS (&gfar_pm_ops)

1444
#else
1445 1446 1447

#define GFAR_PM_OPS NULL

1448
#endif
L
Linus Torvalds 已提交
1449

1450 1451 1452 1453 1454 1455
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1456
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1457 1458 1459
	u32 ecntrl;

	ecntrl = gfar_read(&regs->ecntrl);
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
1472
		if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
1473
			return PHY_INTERFACE_MODE_RMII;
1474
		}
A
Andy Fleming 已提交
1475
		else {
1476
			phy_interface_t interface = priv->interface;
A
Andy Fleming 已提交
1477

J
Jan Ceuleers 已提交
1478
			/* This isn't autodetected right now, so it must
A
Andy Fleming 已提交
1479 1480 1481 1482 1483
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

1484
			return PHY_INTERFACE_MODE_RGMII;
A
Andy Fleming 已提交
1485
		}
1486 1487
	}

1488
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1489 1490 1491 1492 1493 1494
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}


1495 1496
/* Initializes driver's PHY state, and attaches to the PHY.
 * Returns 0 on success.
L
Linus Torvalds 已提交
1497 1498 1499 1500
 */
static int init_phy(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1501
	uint gigabit_support =
1502
		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
1503
		GFAR_SUPPORTED_GBIT : 0;
1504
	phy_interface_t interface;
L
Linus Torvalds 已提交
1505 1506 1507 1508 1509

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

1510 1511
	interface = gfar_get_interface(dev);

1512 1513 1514 1515 1516 1517 1518 1519
	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
				      interface);
	if (!priv->phydev)
		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
							 interface);
	if (!priv->phydev) {
		dev_err(&dev->dev, "could not attach to PHY\n");
		return -ENODEV;
1520
	}
L
Linus Torvalds 已提交
1521

K
Kapil Juneja 已提交
1522 1523 1524
	if (interface == PHY_INTERFACE_MODE_SGMII)
		gfar_configure_serdes(dev);

1525
	/* Remove any features not supported by the controller */
1526 1527
	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
	priv->phydev->advertising = priv->phydev->supported;
L
Linus Torvalds 已提交
1528 1529 1530 1531

	return 0;
}

J
Jan Ceuleers 已提交
1532
/* Initialize TBI PHY interface for communicating with the
1533 1534 1535 1536 1537 1538 1539
 * SERDES lynx PHY on the chip.  We communicate with this PHY
 * through the MDIO bus on each controller, treating it as a
 * "normal" PHY at the address found in the TBIPA register.  We assume
 * that the TBIPA register is valid.  Either the MDIO bus code will set
 * it to a value that doesn't conflict with other PHYs on the bus, or the
 * value doesn't matter, as there are no other PHYs on the bus.
 */
K
Kapil Juneja 已提交
1540 1541 1542
static void gfar_configure_serdes(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1543 1544 1545 1546 1547 1548 1549
	struct phy_device *tbiphy;

	if (!priv->tbi_node) {
		dev_warn(&dev->dev, "error: SGMII mode requires that the "
				    "device tree specify a tbi-handle\n");
		return;
	}
1550

1551 1552 1553
	tbiphy = of_phy_find_device(priv->tbi_node);
	if (!tbiphy) {
		dev_err(&dev->dev, "error: Could not get TBI device\n");
1554 1555
		return;
	}
K
Kapil Juneja 已提交
1556

J
Jan Ceuleers 已提交
1557
	/* If the link is already up, we must already be ok, and don't need to
1558 1559 1560 1561
	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
	 * everything for us?  Resetting it takes the link down and requires
	 * several seconds for it to come back.
	 */
1562
	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1563
		return;
K
Kapil Juneja 已提交
1564

1565
	/* Single clk mode, mii mode off(for serdes communication) */
1566
	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
K
Kapil Juneja 已提交
1567

1568
	phy_write(tbiphy, MII_ADVERTISE,
1569 1570
		  ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
		  ADVERTISE_1000XPSE_ASYM);
K
Kapil Juneja 已提交
1571

1572 1573 1574
	phy_write(tbiphy, MII_BMCR,
		  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
		  BMCR_SPEED1000);
K
Kapil Juneja 已提交
1575 1576
}

L
Linus Torvalds 已提交
1577 1578 1579
static void init_registers(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1580
	struct gfar __iomem *regs = NULL;
1581
	int i;
L
Linus Torvalds 已提交
1582

1583 1584 1585 1586
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear IEVENT */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
L
Linus Torvalds 已提交
1587

1588 1589 1590
		/* Initialize IMASK */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}
L
Linus Torvalds 已提交
1591

1592
	regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1593
	/* Init hash registers to zero */
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	gfar_write(&regs->igaddr0, 0);
	gfar_write(&regs->igaddr1, 0);
	gfar_write(&regs->igaddr2, 0);
	gfar_write(&regs->igaddr3, 0);
	gfar_write(&regs->igaddr4, 0);
	gfar_write(&regs->igaddr5, 0);
	gfar_write(&regs->igaddr6, 0);
	gfar_write(&regs->igaddr7, 0);

	gfar_write(&regs->gaddr0, 0);
	gfar_write(&regs->gaddr1, 0);
	gfar_write(&regs->gaddr2, 0);
	gfar_write(&regs->gaddr3, 0);
	gfar_write(&regs->gaddr4, 0);
	gfar_write(&regs->gaddr5, 0);
	gfar_write(&regs->gaddr6, 0);
	gfar_write(&regs->gaddr7, 0);
L
Linus Torvalds 已提交
1611 1612

	/* Zero out the rmon mib registers if it has them */
1613
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1614
		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
L
Linus Torvalds 已提交
1615 1616

		/* Mask off the CAM interrupts */
1617 1618
		gfar_write(&regs->rmon.cam1, 0xffffffff);
		gfar_write(&regs->rmon.cam2, 0xffffffff);
L
Linus Torvalds 已提交
1619 1620 1621
	}

	/* Initialize the max receive buffer length */
1622
	gfar_write(&regs->mrblr, priv->rx_buffer_size);
L
Linus Torvalds 已提交
1623 1624

	/* Initialize the Minimum Frame Length Register */
1625
	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
L
Linus Torvalds 已提交
1626 1627
}

1628 1629 1630 1631
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
	u32 res;

J
Jan Ceuleers 已提交
1632
	/* Normaly TSEC should not hang on GRS commands, so we should
1633 1634
	 * actually wait for IEVENT_GRSC flag.
	 */
1635
	if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1636 1637
		return 0;

J
Jan Ceuleers 已提交
1638
	/* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
	 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
	 * and the Rx can be safely reset.
	 */
	res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
	res &= 0x7f807f80;
	if ((res & 0xffff) == (res >> 16))
		return 1;

	return 0;
}
1649 1650

/* Halt the receive and transmit queues */
1651
static void gfar_halt_nodisable(struct net_device *dev)
L
Linus Torvalds 已提交
1652 1653
{
	struct gfar_private *priv = netdev_priv(dev);
1654
	struct gfar __iomem *regs = NULL;
L
Linus Torvalds 已提交
1655
	u32 tempval;
1656
	int i;
L
Linus Torvalds 已提交
1657

1658 1659 1660 1661
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Mask all interrupts */
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
L
Linus Torvalds 已提交
1662

1663 1664 1665
		/* Clear all interrupts */
		gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
	}
L
Linus Torvalds 已提交
1666

1667
	regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
1668
	/* Stop the DMA, and wait for it to stop */
1669
	tempval = gfar_read(&regs->dmactrl);
1670 1671
	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
	    (DMACTRL_GRS | DMACTRL_GTS)) {
1672 1673
		int ret;

L
Linus Torvalds 已提交
1674
		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1675
		gfar_write(&regs->dmactrl, tempval);
L
Linus Torvalds 已提交
1676

1677 1678 1679 1680 1681 1682 1683
		do {
			ret = spin_event_timeout(((gfar_read(&regs->ievent) &
				 (IEVENT_GRSC | IEVENT_GTSC)) ==
				 (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
			if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
				ret = __gfar_is_rx_idle(priv);
		} while (!ret);
L
Linus Torvalds 已提交
1684
	}
1685 1686 1687 1688 1689 1690
}

/* Halt the receive and transmit queues */
void gfar_halt(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1691
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1692
	u32 tempval;
L
Linus Torvalds 已提交
1693

1694 1695
	gfar_halt_nodisable(dev);

L
Linus Torvalds 已提交
1696 1697 1698 1699
	/* Disable Rx and Tx */
	tempval = gfar_read(&regs->maccfg1);
	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);
1700 1701
}

1702 1703
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
1704 1705 1706
	free_irq(gfar_irq(grp, TX)->irq, grp);
	free_irq(gfar_irq(grp, RX)->irq, grp);
	free_irq(gfar_irq(grp, ER)->irq, grp);
1707 1708
}

1709 1710 1711 1712
void stop_gfar(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long flags;
1713
	int i;
1714

1715 1716
	phy_stop(priv->phydev);

1717

1718
	/* Lock it down */
1719 1720 1721
	local_irq_save(flags);
	lock_tx_qs(priv);
	lock_rx_qs(priv);
1722 1723

	gfar_halt(dev);
L
Linus Torvalds 已提交
1724

1725 1726 1727
	unlock_rx_qs(priv);
	unlock_tx_qs(priv);
	local_irq_restore(flags);
L
Linus Torvalds 已提交
1728 1729

	/* Free the IRQs */
1730
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1731 1732
		for (i = 0; i < priv->num_grps; i++)
			free_grp_irqs(&priv->gfargrp[i]);
L
Linus Torvalds 已提交
1733
	} else {
1734
		for (i = 0; i < priv->num_grps; i++)
1735
			free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
1736
				 &priv->gfargrp[i]);
L
Linus Torvalds 已提交
1737 1738 1739 1740 1741
	}

	free_skb_resources(priv);
}

1742
static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
1743 1744
{
	struct txbd8 *txbdp;
1745
	struct gfar_private *priv = netdev_priv(tx_queue->dev);
D
Dai Haruki 已提交
1746
	int i, j;
L
Linus Torvalds 已提交
1747

1748
	txbdp = tx_queue->tx_bd_base;
L
Linus Torvalds 已提交
1749

1750 1751
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
		if (!tx_queue->tx_skbuff[i])
D
Dai Haruki 已提交
1752
			continue;
L
Linus Torvalds 已提交
1753

1754
		dma_unmap_single(priv->dev, txbdp->bufPtr,
1755
				 txbdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
1756
		txbdp->lstatus = 0;
1757
		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1758
		     j++) {
D
Dai Haruki 已提交
1759
			txbdp++;
1760
			dma_unmap_page(priv->dev, txbdp->bufPtr,
1761
				       txbdp->length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1762
		}
1763
		txbdp++;
1764 1765
		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
		tx_queue->tx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1766
	}
1767
	kfree(tx_queue->tx_skbuff);
1768
	tx_queue->tx_skbuff = NULL;
1769
}
L
Linus Torvalds 已提交
1770

1771 1772 1773 1774 1775
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
{
	struct rxbd8 *rxbdp;
	struct gfar_private *priv = netdev_priv(rx_queue->dev);
	int i;
L
Linus Torvalds 已提交
1776

1777
	rxbdp = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1778

1779 1780
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		if (rx_queue->rx_skbuff[i]) {
1781 1782
			dma_unmap_single(priv->dev, rxbdp->bufPtr,
					 priv->rx_buffer_size,
1783
					 DMA_FROM_DEVICE);
1784 1785
			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
			rx_queue->rx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1786
		}
1787 1788 1789
		rxbdp->lstatus = 0;
		rxbdp->bufPtr = 0;
		rxbdp++;
L
Linus Torvalds 已提交
1790
	}
1791
	kfree(rx_queue->rx_skbuff);
1792
	rx_queue->rx_skbuff = NULL;
1793
}
1794

1795
/* If there are any tx skbs or rx skbs still around, free them.
J
Jan Ceuleers 已提交
1796 1797
 * Then free tx_skbuff and rx_skbuff
 */
1798 1799 1800 1801 1802 1803 1804 1805
static void free_skb_resources(struct gfar_private *priv)
{
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
	int i;

	/* Go through all the buffer descriptors and free their data buffers */
	for (i = 0; i < priv->num_tx_queues; i++) {
1806
		struct netdev_queue *txq;
1807

1808
		tx_queue = priv->tx_queue[i];
1809
		txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1810
		if (tx_queue->tx_skbuff)
1811
			free_skb_tx_queue(tx_queue);
1812
		netdev_tx_reset_queue(txq);
1813 1814 1815 1816
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
1817
		if (rx_queue->rx_skbuff)
1818 1819 1820
			free_skb_rx_queue(rx_queue);
	}

1821
	dma_free_coherent(priv->dev,
1822 1823 1824 1825
			  sizeof(struct txbd8) * priv->total_tx_ring_size +
			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
			  priv->tx_queue[0]->tx_bd_base,
			  priv->tx_queue[0]->tx_bd_dma_base);
L
Linus Torvalds 已提交
1826 1827
}

1828 1829 1830
void gfar_start(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1831
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1832
	u32 tempval;
1833
	int i = 0;
1834 1835 1836 1837 1838 1839 1840

	/* Enable Rx and Tx in MACCFG1 */
	tempval = gfar_read(&regs->maccfg1);
	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);

	/* Initialize DMACTRL to have WWR and WOP */
1841
	tempval = gfar_read(&regs->dmactrl);
1842
	tempval |= DMACTRL_INIT_SETTINGS;
1843
	gfar_write(&regs->dmactrl, tempval);
1844 1845

	/* Make sure we aren't stopped */
1846
	tempval = gfar_read(&regs->dmactrl);
1847
	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1848
	gfar_write(&regs->dmactrl, tempval);
1849

1850 1851 1852 1853 1854 1855 1856 1857
	for (i = 0; i < priv->num_grps; i++) {
		regs = priv->gfargrp[i].regs;
		/* Clear THLT/RHLT, so that the DMA starts polling now */
		gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
		/* Unmask the interrupts we look for */
		gfar_write(&regs->imask, IMASK_DEFAULT);
	}
1858

E
Eric Dumazet 已提交
1859
	dev->trans_start = jiffies; /* prevent tx timeout */
1860 1861
}

1862
static void gfar_configure_coalescing(struct gfar_private *priv,
1863
			       unsigned long tx_mask, unsigned long rx_mask)
L
Linus Torvalds 已提交
1864
{
1865
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
1866
	u32 __iomem *baddr;
1867

1868
	if (priv->mode == MQ_MG_MODE) {
1869
		int i = 0;
1870

1871
		baddr = &regs->txic0;
1872
		for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1873 1874
			gfar_write(baddr + i, 0);
			if (likely(priv->tx_queue[i]->txcoalescing))
1875 1876 1877 1878
				gfar_write(baddr + i, priv->tx_queue[i]->txic);
		}

		baddr = &regs->rxic0;
1879
		for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1880 1881
			gfar_write(baddr + i, 0);
			if (likely(priv->rx_queue[i]->rxcoalescing))
1882 1883
				gfar_write(baddr + i, priv->rx_queue[i]->rxic);
		}
1884
	} else {
1885
		/* Backward compatible case -- even if we enable
1886 1887 1888 1889 1890 1891 1892 1893 1894
		 * multiple queues, there's only single reg to program
		 */
		gfar_write(&regs->txic, 0);
		if (likely(priv->tx_queue[0]->txcoalescing))
			gfar_write(&regs->txic, priv->tx_queue[0]->txic);

		gfar_write(&regs->rxic, 0);
		if (unlikely(priv->rx_queue[0]->rxcoalescing))
			gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1895 1896 1897
	}
}

1898 1899 1900 1901 1902
void gfar_configure_coalescing_all(struct gfar_private *priv)
{
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
}

1903 1904 1905 1906 1907
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
	struct gfar_private *priv = grp->priv;
	struct net_device *dev = priv->ndev;
	int err;
L
Linus Torvalds 已提交
1908 1909

	/* If the device has multiple interrupts, register for
J
Jan Ceuleers 已提交
1910 1911
	 * them.  Otherwise, only register for the one
	 */
1912
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1913
		/* Install our interrupt handlers for Error,
J
Jan Ceuleers 已提交
1914 1915
		 * Transmit, and Receive
		 */
1916 1917 1918
		err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
				  gfar_irq(grp, ER)->name, grp);
		if (err < 0) {
1919
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1920
				  gfar_irq(grp, ER)->irq);
1921

1922
			goto err_irq_fail;
L
Linus Torvalds 已提交
1923
		}
1924 1925 1926
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1927
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1928
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1929 1930
			goto tx_irq_fail;
		}
1931 1932 1933
		err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
				  gfar_irq(grp, RX)->name, grp);
		if (err < 0) {
1934
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1935
				  gfar_irq(grp, RX)->irq);
L
Linus Torvalds 已提交
1936 1937 1938
			goto rx_irq_fail;
		}
	} else {
1939 1940 1941
		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
				  gfar_irq(grp, TX)->name, grp);
		if (err < 0) {
1942
			netif_err(priv, intr, dev, "Can't get IRQ %d\n",
1943
				  gfar_irq(grp, TX)->irq);
L
Linus Torvalds 已提交
1944 1945 1946 1947
			goto err_irq_fail;
		}
	}

1948 1949 1950
	return 0;

rx_irq_fail:
1951
	free_irq(gfar_irq(grp, TX)->irq, grp);
1952
tx_irq_fail:
1953
	free_irq(gfar_irq(grp, ER)->irq, grp);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
err_irq_fail:
	return err;

}

/* Bring the controller up and running */
int startup_gfar(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
	struct gfar __iomem *regs = NULL;
	int err, i, j;

	for (i = 0; i < priv->num_grps; i++) {
		regs= priv->gfargrp[i].regs;
		gfar_write(&regs->imask, IMASK_INIT_CLEAR);
	}

	regs= priv->gfargrp[0].regs;
	err = gfar_alloc_skb_resources(ndev);
	if (err)
		return err;

	gfar_init_mac(ndev);

	for (i = 0; i < priv->num_grps; i++) {
		err = register_grp_irqs(&priv->gfargrp[i]);
		if (err) {
			for (j = 0; j < i; j++)
				free_grp_irqs(&priv->gfargrp[j]);
1983
			goto irq_fail;
1984 1985 1986
		}
	}

1987
	/* Start the controller */
1988
	gfar_start(ndev);
L
Linus Torvalds 已提交
1989

1990 1991
	phy_start(priv->phydev);

1992
	gfar_configure_coalescing_all(priv);
1993

L
Linus Torvalds 已提交
1994 1995
	return 0;

1996
irq_fail:
1997
	free_skb_resources(priv);
L
Linus Torvalds 已提交
1998 1999 2000
	return err;
}

J
Jan Ceuleers 已提交
2001 2002 2003
/* Called when something needs to use the ethernet device
 * Returns 0 for success.
 */
L
Linus Torvalds 已提交
2004 2005
static int gfar_enet_open(struct net_device *dev)
{
2006
	struct gfar_private *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
2007 2008
	int err;

2009
	enable_napi(priv);
2010

L
Linus Torvalds 已提交
2011 2012 2013 2014 2015 2016 2017
	/* Initialize a bunch of registers */
	init_registers(dev);

	gfar_set_mac_address(dev);

	err = init_phy(dev);

2018
	if (err) {
2019
		disable_napi(priv);
L
Linus Torvalds 已提交
2020
		return err;
2021
	}
L
Linus Torvalds 已提交
2022 2023

	err = startup_gfar(dev);
2024
	if (err) {
2025
		disable_napi(priv);
2026 2027
		return err;
	}
L
Linus Torvalds 已提交
2028

2029
	netif_tx_start_all_queues(dev);
L
Linus Torvalds 已提交
2030

2031 2032
	device_set_wakeup_enable(&dev->dev, priv->wol_en);

L
Linus Torvalds 已提交
2033 2034 2035
	return err;
}

2036
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
2037
{
2038
	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
2039 2040

	memset(fcb, 0, GMAC_FCB_LEN);
2041 2042 2043 2044

	return fcb;
}

2045
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
2046
				    int fcb_length)
2047 2048 2049 2050 2051
{
	/* If we're here, it's a IP packet with a TCP or UDP
	 * payload.  We set it to checksum, using a pseudo-header
	 * we provide
	 */
2052
	u8 flags = TXFCB_DEFAULT;
2053

J
Jan Ceuleers 已提交
2054 2055 2056
	/* Tell the controller what the protocol is
	 * And provide the already calculated phcs
	 */
2057
	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
2058
		flags |= TXFCB_UDP;
2059
		fcb->phcs = udp_hdr(skb)->check;
2060
	} else
2061
		fcb->phcs = tcp_hdr(skb)->check;
2062 2063 2064 2065

	/* l3os is the distance between the start of the
	 * frame (skb->data) and the start of the IP hdr.
	 * l4os is the distance between the start of the
J
Jan Ceuleers 已提交
2066 2067
	 * l3 hdr and the l4 hdr
	 */
2068
	fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
2069
	fcb->l4os = skb_network_header_len(skb);
2070

2071
	fcb->flags = flags;
2072 2073
}

2074
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
2075
{
2076
	fcb->flags |= TXFCB_VLN;
2077 2078 2079
	fcb->vlctl = vlan_tx_tag_get(skb);
}

D
Dai Haruki 已提交
2080
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
2081
				      struct txbd8 *base, int ring_size)
D
Dai Haruki 已提交
2082 2083 2084 2085 2086 2087 2088
{
	struct txbd8 *new_bd = bdp + stride;

	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
}

static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
2089
				      int ring_size)
D
Dai Haruki 已提交
2090 2091 2092 2093
{
	return skip_txbd(bdp, 1, base, ring_size);
}

2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111
/* eTSEC12: csum generation not supported for some fcb offsets */
static inline bool gfar_csum_errata_12(struct gfar_private *priv,
				       unsigned long fcb_addr)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
	       (fcb_addr % 0x20) > 0x18);
}

/* eTSEC76: csum generation for frames larger than 2500 may
 * cause excess delays before start of transmission
 */
static inline bool gfar_csum_errata_76(struct gfar_private *priv,
				       unsigned int len)
{
	return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
	       (len > 2500));
}

J
Jan Ceuleers 已提交
2112 2113 2114
/* This is called by the kernel when a frame is ready for transmission.
 * It is pointed to by the dev->hard_start_xmit function pointer
 */
L
Linus Torvalds 已提交
2115 2116 2117
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2118
	struct gfar_priv_tx_q *tx_queue = NULL;
2119
	struct netdev_queue *txq;
2120
	struct gfar __iomem *regs = NULL;
2121
	struct txfcb *fcb = NULL;
2122
	struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
2123
	u32 lstatus;
2124 2125
	int i, rq = 0;
	int do_tstamp, do_csum, do_vlan;
D
Dai Haruki 已提交
2126
	u32 bufaddr;
A
Andy Fleming 已提交
2127
	unsigned long flags;
2128
	unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
2129 2130 2131 2132

	rq = skb->queue_mapping;
	tx_queue = priv->tx_queue[rq];
	txq = netdev_get_tx_queue(dev, rq);
2133
	base = tx_queue->tx_bd_base;
2134
	regs = tx_queue->grp->regs;
2135

2136 2137 2138 2139 2140 2141 2142 2143
	do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
	do_vlan = vlan_tx_tag_present(skb);
	do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		    priv->hwts_tx_en;

	if (do_csum || do_vlan)
		fcb_len = GMAC_FCB_LEN;

2144
	/* check if time stamp should be generated */
2145 2146
	if (unlikely(do_tstamp))
		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
D
Dai Haruki 已提交
2147

2148
	/* make space for additional header when fcb is needed */
2149
	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
2150 2151
		struct sk_buff *skb_new;

2152
		skb_new = skb_realloc_headroom(skb, fcb_len);
2153 2154
		if (!skb_new) {
			dev->stats.tx_errors++;
D
David S. Miller 已提交
2155
			kfree_skb(skb);
2156 2157
			return NETDEV_TX_OK;
		}
2158

2159 2160 2161
		if (skb->sk)
			skb_set_owner_w(skb_new, skb->sk);
		consume_skb(skb);
2162 2163 2164
		skb = skb_new;
	}

D
Dai Haruki 已提交
2165 2166 2167
	/* total number of fragments in the SKB */
	nr_frags = skb_shinfo(skb)->nr_frags;

2168 2169 2170 2171 2172 2173
	/* calculate the required number of TxBDs for this skb */
	if (unlikely(do_tstamp))
		nr_txbds = nr_frags + 2;
	else
		nr_txbds = nr_frags + 1;

D
Dai Haruki 已提交
2174
	/* check if there is space to queue this packet */
2175
	if (nr_txbds > tx_queue->num_txbdfree) {
D
Dai Haruki 已提交
2176
		/* no space, stop the queue */
2177
		netif_tx_stop_queue(txq);
D
Dai Haruki 已提交
2178 2179 2180
		dev->stats.tx_fifo_errors++;
		return NETDEV_TX_BUSY;
	}
L
Linus Torvalds 已提交
2181 2182

	/* Update transmit stats */
2183 2184 2185 2186
	bytes_sent = skb->len;
	tx_queue->stats.tx_bytes += bytes_sent;
	/* keep Tx bytes on wire for BQL accounting */
	GFAR_CB(skb)->bytes_sent = bytes_sent;
E
Eric Dumazet 已提交
2187
	tx_queue->stats.tx_packets++;
L
Linus Torvalds 已提交
2188

2189
	txbdp = txbdp_start = tx_queue->cur_tx;
2190 2191 2192 2193 2194
	lstatus = txbdp->lstatus;

	/* Time stamp insertion requires one additional TxBD */
	if (unlikely(do_tstamp))
		txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2195
						 tx_queue->tx_ring_size);
L
Linus Torvalds 已提交
2196

D
Dai Haruki 已提交
2197
	if (nr_frags == 0) {
2198 2199
		if (unlikely(do_tstamp))
			txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2200
							  TXBD_INTERRUPT);
2201 2202
		else
			lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
D
Dai Haruki 已提交
2203 2204 2205
	} else {
		/* Place the fragment addresses and lengths into the TxBDs */
		for (i = 0; i < nr_frags; i++) {
2206
			unsigned int frag_len;
D
Dai Haruki 已提交
2207
			/* Point at the next BD, wrapping as needed */
2208
			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2209

2210
			frag_len = skb_shinfo(skb)->frags[i].size;
D
Dai Haruki 已提交
2211

2212
			lstatus = txbdp->lstatus | frag_len |
2213
				  BD_LFLAG(TXBD_READY);
D
Dai Haruki 已提交
2214 2215 2216 2217

			/* Handle the last BD specially */
			if (i == nr_frags - 1)
				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
L
Linus Torvalds 已提交
2218

2219
			bufaddr = skb_frag_dma_map(priv->dev,
2220 2221
						   &skb_shinfo(skb)->frags[i],
						   0,
2222
						   frag_len,
2223
						   DMA_TO_DEVICE);
D
Dai Haruki 已提交
2224 2225 2226 2227 2228 2229 2230 2231

			/* set the TxBD length and buffer pointer */
			txbdp->bufPtr = bufaddr;
			txbdp->lstatus = lstatus;
		}

		lstatus = txbdp_start->lstatus;
	}
L
Linus Torvalds 已提交
2232

2233 2234 2235 2236 2237 2238
	/* Add TxPAL between FCB and frame if required */
	if (unlikely(do_tstamp)) {
		skb_push(skb, GMAC_TXPAL_LEN);
		memset(skb->data, 0, GMAC_TXPAL_LEN);
	}

2239 2240
	/* Add TxFCB if required */
	if (fcb_len) {
2241
		fcb = gfar_add_fcb(skb);
2242
		lstatus |= BD_LFLAG(TXBD_TOE);
2243 2244 2245 2246 2247
	}

	/* Set up checksumming */
	if (do_csum) {
		gfar_tx_checksum(skb, fcb, fcb_len);
2248 2249 2250

		if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
		    unlikely(gfar_csum_errata_76(priv, skb->len))) {
2251 2252
			__skb_pull(skb, GMAC_FCB_LEN);
			skb_checksum_help(skb);
2253 2254 2255 2256 2257 2258 2259 2260
			if (do_vlan || do_tstamp) {
				/* put back a new fcb for vlan/tstamp TOE */
				fcb = gfar_add_fcb(skb);
			} else {
				/* Tx TOE not used */
				lstatus &= ~(BD_LFLAG(TXBD_TOE));
				fcb = NULL;
			}
2261
		}
2262 2263
	}

2264
	if (do_vlan)
2265
		gfar_tx_vlan(skb, fcb);
2266

2267 2268
	/* Setup tx hardware time stamping if requested */
	if (unlikely(do_tstamp)) {
2269
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2270 2271 2272
		fcb->ptp = 1;
	}

2273
	txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
2274
					     skb_headlen(skb), DMA_TO_DEVICE);
L
Linus Torvalds 已提交
2275

J
Jan Ceuleers 已提交
2276
	/* If time stamping is requested one additional TxBD must be set up. The
2277 2278 2279 2280 2281
	 * first TxBD points to the FCB and must have a data length of
	 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
	 * the full frame length.
	 */
	if (unlikely(do_tstamp)) {
2282
		txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
2283
		txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2284
					 (skb_headlen(skb) - fcb_len);
2285 2286 2287 2288
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
	} else {
		lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
	}
L
Linus Torvalds 已提交
2289

2290
	netdev_tx_sent_queue(txq, bytes_sent);
2291

J
Jan Ceuleers 已提交
2292
	/* We can work in parallel with gfar_clean_tx_ring(), except
A
Anton Vorontsov 已提交
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
	 * when modifying num_txbdfree. Note that we didn't grab the lock
	 * when we were reading the num_txbdfree and checking for available
	 * space, that's because outside of this function it can only grow,
	 * and once we've got needed space, it cannot suddenly disappear.
	 *
	 * The lock also protects us from gfar_error(), which can modify
	 * regs->tstat and thus retrigger the transfers, which is why we
	 * also must grab the lock before setting ready bit for the first
	 * to be transmitted BD.
	 */
	spin_lock_irqsave(&tx_queue->txlock, flags);

J
Jan Ceuleers 已提交
2305
	/* The powerpc-specific eieio() is used, as wmb() has too strong
2306 2307 2308 2309 2310 2311 2312
	 * semantics (it requires synchronization between cacheable and
	 * uncacheable mappings, which eieio doesn't provide and which we
	 * don't need), thus requiring a more expensive sync instruction.  At
	 * some point, the set of architecture-independent barrier functions
	 * should be expanded to include weaker barriers.
	 */
	eieio();
2313

D
Dai Haruki 已提交
2314 2315
	txbdp_start->lstatus = lstatus;

2316 2317 2318 2319
	eieio(); /* force lstatus write before tx_skbuff */

	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;

D
Dai Haruki 已提交
2320
	/* Update the current skb pointer to the next entry we will use
J
Jan Ceuleers 已提交
2321 2322
	 * (wrapping if necessary)
	 */
2323
	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2324
			      TX_RING_MOD_MASK(tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2325

2326
	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
2327 2328

	/* reduce TxBD free count */
2329
	tx_queue->num_txbdfree -= (nr_txbds);
L
Linus Torvalds 已提交
2330 2331

	/* If the next BD still needs to be cleaned up, then the bds
J
Jan Ceuleers 已提交
2332 2333
	 * are full.  We need to tell the kernel to stop sending us stuff.
	 */
2334
	if (!tx_queue->num_txbdfree) {
2335
		netif_tx_stop_queue(txq);
L
Linus Torvalds 已提交
2336

2337
		dev->stats.tx_fifo_errors++;
L
Linus Torvalds 已提交
2338 2339 2340
	}

	/* Tell the DMA to go go go */
2341
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
L
Linus Torvalds 已提交
2342 2343

	/* Unlock priv */
2344
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
2345

2346
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
2347 2348 2349 2350 2351 2352
}

/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2353

2354
	disable_napi(priv);
2355

2356
	cancel_work_sync(&priv->reset_task);
L
Linus Torvalds 已提交
2357 2358
	stop_gfar(dev);

2359 2360 2361
	/* Disconnect from the PHY */
	phy_disconnect(priv->phydev);
	priv->phydev = NULL;
L
Linus Torvalds 已提交
2362

2363
	netif_tx_stop_all_queues(dev);
L
Linus Torvalds 已提交
2364 2365 2366 2367 2368

	return 0;
}

/* Changes the mac address if the controller is not running. */
2369
static int gfar_set_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
2370
{
2371
	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
L
Linus Torvalds 已提交
2372 2373 2374 2375

	return 0;
}

S
Sebastian Pöhn 已提交
2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
/* Check if rx parser should be activated */
void gfar_check_rx_parser_mode(struct gfar_private *priv)
{
	struct gfar __iomem *regs;
	u32 tempval;

	regs = priv->gfargrp[0].regs;

	tempval = gfar_read(&regs->rctrl);
	/* If parse is no longer required, then disable parser */
2386
	if (tempval & RCTRL_REQ_PARSER) {
S
Sebastian Pöhn 已提交
2387
		tempval |= RCTRL_PRSDEP_INIT;
2388 2389
		priv->uses_rxfcb = 1;
	} else {
S
Sebastian Pöhn 已提交
2390
		tempval &= ~RCTRL_PRSDEP_INIT;
2391 2392
		priv->uses_rxfcb = 0;
	}
S
Sebastian Pöhn 已提交
2393 2394 2395
	gfar_write(&regs->rctrl, tempval);
}

2396
/* Enables and disables VLAN insertion/extraction */
2397
void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2398 2399
{
	struct gfar_private *priv = netdev_priv(dev);
2400
	struct gfar __iomem *regs = NULL;
2401 2402 2403
	unsigned long flags;
	u32 tempval;

2404
	regs = priv->gfargrp[0].regs;
2405 2406
	local_irq_save(flags);
	lock_rx_qs(priv);
2407

2408
	if (features & NETIF_F_HW_VLAN_CTAG_TX) {
2409
		/* Enable VLAN tag insertion */
2410
		tempval = gfar_read(&regs->tctrl);
2411
		tempval |= TCTRL_VLINS;
2412
		gfar_write(&regs->tctrl, tempval);
2413 2414
	} else {
		/* Disable VLAN tag insertion */
2415
		tempval = gfar_read(&regs->tctrl);
2416
		tempval &= ~TCTRL_VLINS;
2417
		gfar_write(&regs->tctrl, tempval);
J
Jiri Pirko 已提交
2418
	}
2419

2420
	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
J
Jiri Pirko 已提交
2421 2422 2423 2424
		/* Enable VLAN tag extraction */
		tempval = gfar_read(&regs->rctrl);
		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
		gfar_write(&regs->rctrl, tempval);
2425
		priv->uses_rxfcb = 1;
J
Jiri Pirko 已提交
2426
	} else {
2427
		/* Disable VLAN tag extraction */
2428
		tempval = gfar_read(&regs->rctrl);
2429
		tempval &= ~RCTRL_VLEX;
2430
		gfar_write(&regs->rctrl, tempval);
S
Sebastian Pöhn 已提交
2431 2432

		gfar_check_rx_parser_mode(priv);
2433 2434
	}

2435 2436
	gfar_change_mtu(dev, dev->mtu);

2437 2438
	unlock_rx_qs(priv);
	local_irq_restore(flags);
2439 2440
}

L
Linus Torvalds 已提交
2441 2442 2443 2444
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
	int tempsize, tempval;
	struct gfar_private *priv = netdev_priv(dev);
2445
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
2446
	int oldsize = priv->rx_buffer_size;
2447 2448
	int frame_size = new_mtu + ETH_HLEN;

L
Linus Torvalds 已提交
2449
	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
2450
		netif_err(priv, drv, dev, "Invalid MTU setting\n");
L
Linus Torvalds 已提交
2451 2452 2453
		return -EINVAL;
	}

2454
	if (priv->uses_rxfcb)
2455 2456 2457 2458
		frame_size += GMAC_FCB_LEN;

	frame_size += priv->padding;

2459 2460
	tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
		   INCREMENTAL_BUFFER_SIZE;
L
Linus Torvalds 已提交
2461 2462

	/* Only stop and start the controller if it isn't already
J
Jan Ceuleers 已提交
2463 2464
	 * stopped, and we changed something
	 */
L
Linus Torvalds 已提交
2465 2466 2467 2468 2469 2470 2471
	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
		stop_gfar(dev);

	priv->rx_buffer_size = tempsize;

	dev->mtu = new_mtu;

2472 2473
	gfar_write(&regs->mrblr, priv->rx_buffer_size);
	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
L
Linus Torvalds 已提交
2474 2475 2476

	/* If the mtu is larger than the max size for standard
	 * ethernet frames (ie, a jumbo frame), then set maccfg2
J
Jan Ceuleers 已提交
2477 2478
	 * to allow huge frames, and to check the length
	 */
2479
	tempval = gfar_read(&regs->maccfg2);
L
Linus Torvalds 已提交
2480

2481
	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
2482
	    gfar_has_errata(priv, GFAR_ERRATA_74))
L
Linus Torvalds 已提交
2483 2484 2485 2486
		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
	else
		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);

2487
	gfar_write(&regs->maccfg2, tempval);
L
Linus Torvalds 已提交
2488 2489 2490 2491 2492 2493 2494

	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
		startup_gfar(dev);

	return 0;
}

2495
/* gfar_reset_task gets scheduled when a packet has not been
L
Linus Torvalds 已提交
2496 2497
 * transmitted after a set amount of time.
 * For now, assume that clearing out all the structures, and
2498 2499 2500
 * starting over will fix the problem.
 */
static void gfar_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
2501
{
2502
	struct gfar_private *priv = container_of(work, struct gfar_private,
2503
						 reset_task);
2504
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
2505 2506

	if (dev->flags & IFF_UP) {
2507
		netif_tx_stop_all_queues(dev);
L
Linus Torvalds 已提交
2508 2509
		stop_gfar(dev);
		startup_gfar(dev);
2510
		netif_tx_start_all_queues(dev);
L
Linus Torvalds 已提交
2511 2512
	}

2513
	netif_tx_schedule_all(dev);
L
Linus Torvalds 已提交
2514 2515
}

2516 2517 2518 2519 2520 2521 2522 2523
static void gfar_timeout(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	dev->stats.tx_errors++;
	schedule_work(&priv->reset_task);
}

E
Eran Liberty 已提交
2524 2525 2526 2527 2528 2529
static void gfar_align_skb(struct sk_buff *skb)
{
	/* We need the data buffer to be aligned properly.  We will reserve
	 * as many bytes as needed to align the data properly
	 */
	skb_reserve(skb, RXBUF_ALIGNMENT -
2530
		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
E
Eran Liberty 已提交
2531 2532
}

L
Linus Torvalds 已提交
2533
/* Interrupt Handler for Transmit complete */
C
Claudiu Manoil 已提交
2534
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
2535
{
2536
	struct net_device *dev = tx_queue->dev;
2537
	struct netdev_queue *txq;
D
Dai Haruki 已提交
2538
	struct gfar_private *priv = netdev_priv(dev);
2539
	struct txbd8 *bdp, *next = NULL;
D
Dai Haruki 已提交
2540
	struct txbd8 *lbdp = NULL;
2541
	struct txbd8 *base = tx_queue->tx_bd_base;
D
Dai Haruki 已提交
2542 2543
	struct sk_buff *skb;
	int skb_dirtytx;
2544
	int tx_ring_size = tx_queue->tx_ring_size;
2545
	int frags = 0, nr_txbds = 0;
D
Dai Haruki 已提交
2546
	int i;
D
Dai Haruki 已提交
2547
	int howmany = 0;
2548 2549
	int tqi = tx_queue->qindex;
	unsigned int bytes_sent = 0;
D
Dai Haruki 已提交
2550
	u32 lstatus;
2551
	size_t buflen;
L
Linus Torvalds 已提交
2552

2553
	txq = netdev_get_tx_queue(dev, tqi);
2554 2555
	bdp = tx_queue->dirty_tx;
	skb_dirtytx = tx_queue->skb_dirtytx;
L
Linus Torvalds 已提交
2556

2557
	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
A
Anton Vorontsov 已提交
2558 2559
		unsigned long flags;

D
Dai Haruki 已提交
2560
		frags = skb_shinfo(skb)->nr_frags;
2561

J
Jan Ceuleers 已提交
2562
		/* When time stamping, one additional TxBD must be freed.
2563 2564
		 * Also, we need to dma_unmap_single() the TxPAL.
		 */
2565
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2566 2567 2568 2569 2570
			nr_txbds = frags + 2;
		else
			nr_txbds = frags + 1;

		lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
L
Linus Torvalds 已提交
2571

D
Dai Haruki 已提交
2572
		lstatus = lbdp->lstatus;
L
Linus Torvalds 已提交
2573

D
Dai Haruki 已提交
2574 2575
		/* Only clean completed frames */
		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2576
		    (lstatus & BD_LENGTH_MASK))
D
Dai Haruki 已提交
2577 2578
			break;

2579
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2580
			next = next_txbd(bdp, base, tx_ring_size);
2581
			buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2582 2583 2584
		} else
			buflen = bdp->length;

2585
		dma_unmap_single(priv->dev, bdp->bufPtr,
2586
				 buflen, DMA_TO_DEVICE);
2587

2588
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2589 2590
			struct skb_shared_hwtstamps shhwtstamps;
			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2591

2592 2593
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2594
			skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2595 2596 2597 2598
			skb_tstamp_tx(skb, &shhwtstamps);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next;
		}
A
Andy Fleming 已提交
2599

D
Dai Haruki 已提交
2600 2601
		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
		bdp = next_txbd(bdp, base, tx_ring_size);
D
Dai Haruki 已提交
2602

D
Dai Haruki 已提交
2603
		for (i = 0; i < frags; i++) {
2604
			dma_unmap_page(priv->dev, bdp->bufPtr,
2605
				       bdp->length, DMA_TO_DEVICE);
D
Dai Haruki 已提交
2606 2607 2608
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next_txbd(bdp, base, tx_ring_size);
		}
L
Linus Torvalds 已提交
2609

2610
		bytes_sent += GFAR_CB(skb)->bytes_sent;
2611

E
Eric Dumazet 已提交
2612
		dev_kfree_skb_any(skb);
2613

2614
		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
D
Dai Haruki 已提交
2615

D
Dai Haruki 已提交
2616
		skb_dirtytx = (skb_dirtytx + 1) &
2617
			      TX_RING_MOD_MASK(tx_ring_size);
D
Dai Haruki 已提交
2618 2619

		howmany++;
A
Anton Vorontsov 已提交
2620
		spin_lock_irqsave(&tx_queue->txlock, flags);
2621
		tx_queue->num_txbdfree += nr_txbds;
A
Anton Vorontsov 已提交
2622
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
2623
	}
L
Linus Torvalds 已提交
2624

D
Dai Haruki 已提交
2625
	/* If we freed a buffer, we can restart transmission, if necessary */
2626
	if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
2627
		netif_wake_subqueue(dev, tqi);
L
Linus Torvalds 已提交
2628

D
Dai Haruki 已提交
2629
	/* Update dirty indicators */
2630 2631
	tx_queue->skb_dirtytx = skb_dirtytx;
	tx_queue->dirty_tx = bdp;
L
Linus Torvalds 已提交
2632

2633
	netdev_tx_completed_queue(txq, howmany, bytes_sent);
D
Dai Haruki 已提交
2634 2635
}

2636
static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
D
Dai Haruki 已提交
2637
{
2638 2639
	unsigned long flags;

2640 2641
	spin_lock_irqsave(&gfargrp->grplock, flags);
	if (napi_schedule_prep(&gfargrp->napi)) {
2642
		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2643
		__napi_schedule(&gfargrp->napi);
2644
	} else {
J
Jan Ceuleers 已提交
2645
		/* Clear IEVENT, so interrupts aren't called again
2646 2647
		 * because of the packets that have already arrived.
		 */
2648
		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2649
	}
2650
	spin_unlock_irqrestore(&gfargrp->grplock, flags);
2651

2652
}
L
Linus Torvalds 已提交
2653

2654
/* Interrupt Handler for Transmit complete */
2655
static irqreturn_t gfar_transmit(int irq, void *grp_id)
2656
{
2657
	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
L
Linus Torvalds 已提交
2658 2659 2660
	return IRQ_HANDLED;
}

2661
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2662
			   struct sk_buff *skb)
2663
{
2664
	struct net_device *dev = rx_queue->dev;
2665
	struct gfar_private *priv = netdev_priv(dev);
2666
	dma_addr_t buf;
2667

2668
	buf = dma_map_single(priv->dev, skb->data,
2669
			     priv->rx_buffer_size, DMA_FROM_DEVICE);
2670
	gfar_init_rxbdp(rx_queue, bdp, buf);
2671 2672
}

2673
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
L
Linus Torvalds 已提交
2674 2675
{
	struct gfar_private *priv = netdev_priv(dev);
E
Eric Dumazet 已提交
2676
	struct sk_buff *skb;
L
Linus Torvalds 已提交
2677

E
Eran Liberty 已提交
2678
	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2679
	if (!skb)
L
Linus Torvalds 已提交
2680 2681
		return NULL;

E
Eran Liberty 已提交
2682
	gfar_align_skb(skb);
2683

E
Eran Liberty 已提交
2684 2685 2686
	return skb;
}

2687
struct sk_buff *gfar_new_skb(struct net_device *dev)
E
Eran Liberty 已提交
2688
{
E
Eric Dumazet 已提交
2689
	return gfar_alloc_skb(dev);
L
Linus Torvalds 已提交
2690 2691
}

2692
static inline void count_errors(unsigned short status, struct net_device *dev)
L
Linus Torvalds 已提交
2693
{
2694
	struct gfar_private *priv = netdev_priv(dev);
2695
	struct net_device_stats *stats = &dev->stats;
L
Linus Torvalds 已提交
2696 2697
	struct gfar_extra_stats *estats = &priv->extra_stats;

J
Jan Ceuleers 已提交
2698
	/* If the packet was truncated, none of the other errors matter */
L
Linus Torvalds 已提交
2699 2700 2701
	if (status & RXBD_TRUNCATED) {
		stats->rx_length_errors++;

2702
		atomic64_inc(&estats->rx_trunc);
L
Linus Torvalds 已提交
2703 2704 2705 2706 2707 2708 2709 2710

		return;
	}
	/* Count the errors, if there were any */
	if (status & (RXBD_LARGE | RXBD_SHORT)) {
		stats->rx_length_errors++;

		if (status & RXBD_LARGE)
2711
			atomic64_inc(&estats->rx_large);
L
Linus Torvalds 已提交
2712
		else
2713
			atomic64_inc(&estats->rx_short);
L
Linus Torvalds 已提交
2714 2715 2716
	}
	if (status & RXBD_NONOCTET) {
		stats->rx_frame_errors++;
2717
		atomic64_inc(&estats->rx_nonoctet);
L
Linus Torvalds 已提交
2718 2719
	}
	if (status & RXBD_CRCERR) {
2720
		atomic64_inc(&estats->rx_crcerr);
L
Linus Torvalds 已提交
2721 2722 2723
		stats->rx_crc_errors++;
	}
	if (status & RXBD_OVERRUN) {
2724
		atomic64_inc(&estats->rx_overrun);
L
Linus Torvalds 已提交
2725 2726 2727 2728
		stats->rx_crc_errors++;
	}
}

2729
irqreturn_t gfar_receive(int irq, void *grp_id)
L
Linus Torvalds 已提交
2730
{
2731
	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
L
Linus Torvalds 已提交
2732 2733 2734
	return IRQ_HANDLED;
}

2735 2736 2737 2738
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
	/* If valid headers were found, and valid sums
	 * were verified, then we tell the kernel that no
J
Jan Ceuleers 已提交
2739 2740
	 * checksumming is necessary.  Otherwise, it is [FIXME]
	 */
2741
	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2742 2743
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
2744
		skb_checksum_none_assert(skb);
2745 2746 2747
}


J
Jan Ceuleers 已提交
2748
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2749 2750
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			       int amount_pull, struct napi_struct *napi)
L
Linus Torvalds 已提交
2751 2752
{
	struct gfar_private *priv = netdev_priv(dev);
2753
	struct rxfcb *fcb = NULL;
L
Linus Torvalds 已提交
2754

2755 2756
	/* fcb is at the beginning if exists */
	fcb = (struct rxfcb *)skb->data;
2757

J
Jan Ceuleers 已提交
2758 2759 2760
	/* Remove the FCB from the skb
	 * Remove the padded bytes, if there are any
	 */
2761 2762
	if (amount_pull) {
		skb_record_rx_queue(skb, fcb->rq);
2763
		skb_pull(skb, amount_pull);
2764
	}
2765

2766 2767 2768 2769
	/* Get receive timestamp from the skb */
	if (priv->hwts_rx_en) {
		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
		u64 *ns = (u64 *) skb->data;
2770

2771 2772 2773 2774 2775 2776 2777
		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
		shhwtstamps->hwtstamp = ns_to_ktime(*ns);
	}

	if (priv->padding)
		skb_pull(skb, priv->padding);

2778
	if (dev->features & NETIF_F_RXCSUM)
2779
		gfar_rx_checksum(skb, fcb);
2780

2781 2782
	/* Tell the skb what kind of packet this is */
	skb->protocol = eth_type_trans(skb, dev);
L
Linus Torvalds 已提交
2783

2784
	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2785 2786 2787
	 * Even if vlan rx accel is disabled, on some chips
	 * RXFCB_VLN is pseudo randomly set.
	 */
2788
	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2789
	    fcb->flags & RXFCB_VLN)
2790
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), fcb->vlctl);
J
Jiri Pirko 已提交
2791

2792
	/* Send the packet up the stack */
2793
	napi_gro_receive(napi, skb);
2794

L
Linus Torvalds 已提交
2795 2796 2797
}

/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2798 2799
 * until the budget/quota has been reached. Returns the number
 * of frames handled
L
Linus Torvalds 已提交
2800
 */
2801
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
L
Linus Torvalds 已提交
2802
{
2803
	struct net_device *dev = rx_queue->dev;
2804
	struct rxbd8 *bdp, *base;
L
Linus Torvalds 已提交
2805
	struct sk_buff *skb;
2806 2807
	int pkt_len;
	int amount_pull;
L
Linus Torvalds 已提交
2808 2809 2810 2811
	int howmany = 0;
	struct gfar_private *priv = netdev_priv(dev);

	/* Get the first full descriptor */
2812 2813
	bdp = rx_queue->cur_rx;
	base = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
2814

2815
	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
2816

L
Linus Torvalds 已提交
2817
	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2818
		struct sk_buff *newskb;
2819

2820
		rmb();
2821 2822 2823 2824

		/* Add another skb for the future */
		newskb = gfar_new_skb(dev);

2825
		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
L
Linus Torvalds 已提交
2826

2827
		dma_unmap_single(priv->dev, bdp->bufPtr,
2828
				 priv->rx_buffer_size, DMA_FROM_DEVICE);
A
Andy Fleming 已提交
2829

2830
		if (unlikely(!(bdp->status & RXBD_ERR) &&
2831
			     bdp->length > priv->rx_buffer_size))
2832 2833
			bdp->status = RXBD_LARGE;

2834 2835
		/* We drop the frame if we failed to allocate a new buffer */
		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
2836
			     bdp->status & RXBD_ERR)) {
2837 2838 2839 2840
			count_errors(bdp->status, dev);

			if (unlikely(!newskb))
				newskb = skb;
E
Eran Liberty 已提交
2841
			else if (skb)
E
Eric Dumazet 已提交
2842
				dev_kfree_skb(skb);
2843
		} else {
L
Linus Torvalds 已提交
2844
			/* Increment the number of packets */
S
Sandeep Gopalpet 已提交
2845
			rx_queue->stats.rx_packets++;
L
Linus Torvalds 已提交
2846 2847
			howmany++;

2848 2849 2850 2851
			if (likely(skb)) {
				pkt_len = bdp->length - ETH_FCS_LEN;
				/* Remove the FCS from the packet length */
				skb_put(skb, pkt_len);
S
Sandeep Gopalpet 已提交
2852
				rx_queue->stats.rx_bytes += pkt_len;
2853
				skb_record_rx_queue(skb, rx_queue->qindex);
W
Wu Jiajun-B06378 已提交
2854
				gfar_process_frame(dev, skb, amount_pull,
2855
						   &rx_queue->grp->napi);
2856 2857

			} else {
2858
				netif_warn(priv, rx_err, dev, "Missing skb!\n");
S
Sandeep Gopalpet 已提交
2859
				rx_queue->stats.rx_dropped++;
2860
				atomic64_inc(&priv->extra_stats.rx_skbmissing);
2861
			}
L
Linus Torvalds 已提交
2862 2863 2864

		}

2865
		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
L
Linus Torvalds 已提交
2866

2867
		/* Setup the new bdp */
2868
		gfar_new_rxbdp(rx_queue, bdp, newskb);
L
Linus Torvalds 已提交
2869 2870

		/* Update to the next pointer */
2871
		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2872 2873

		/* update to point at the next skb */
2874 2875
		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2876 2877 2878
	}

	/* Update the current rxbd pointer to be the next one */
2879
	rx_queue->cur_rx = bdp;
L
Linus Torvalds 已提交
2880 2881 2882 2883

	return howmany;
}

2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
static int gfar_poll_sq(struct napi_struct *napi, int budget)
{
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi);
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
	struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
	int work_done = 0;

	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived
	 */
	gfar_write(&regs->ievent, IEVENT_RTX_MASK);

	/* run Tx cleanup to completion */
	if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
		gfar_clean_tx_ring(tx_queue);

	work_done = gfar_clean_rx_ring(rx_queue, budget);

	if (work_done < budget) {
		napi_complete(napi);
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);

		gfar_write(&regs->imask, IMASK_DEFAULT);

		/* If we are coalescing interrupts, update the timer
		 * Otherwise, clear it
		 */
		gfar_write(&regs->txic, 0);
		if (likely(tx_queue->txcoalescing))
			gfar_write(&regs->txic, tx_queue->txic);

		gfar_write(&regs->rxic, 0);
		if (unlikely(rx_queue->rxcoalescing))
			gfar_write(&regs->rxic, rx_queue->rxic);
	}

	return work_done;
}

2926
static int gfar_poll(struct napi_struct *napi, int budget)
L
Linus Torvalds 已提交
2927
{
2928 2929
	struct gfar_priv_grp *gfargrp =
		container_of(napi, struct gfar_priv_grp, napi);
2930
	struct gfar_private *priv = gfargrp->priv;
2931
	struct gfar __iomem *regs = gfargrp->regs;
2932
	struct gfar_priv_tx_q *tx_queue = NULL;
2933
	struct gfar_priv_rx_q *rx_queue = NULL;
C
Claudiu Manoil 已提交
2934
	int work_done = 0, work_done_per_q = 0;
2935
	int i, budget_per_q = 0;
2936
	int has_tx_work = 0;
2937 2938
	unsigned long rstat_rxf;
	int num_act_queues;
2939

2940
	/* Clear IEVENT, so interrupts aren't called again
J
Jan Ceuleers 已提交
2941 2942
	 * because of the packets that have already arrived
	 */
2943
	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2944

2945 2946 2947 2948 2949 2950
	rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;

	num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
	if (num_act_queues)
		budget_per_q = budget/num_act_queues;

2951 2952 2953 2954 2955 2956
	for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
		tx_queue = priv->tx_queue[i];
		/* run Tx cleanup to completion */
		if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
			gfar_clean_tx_ring(tx_queue);
			has_tx_work = 1;
C
Claudiu Manoil 已提交
2957
		}
2958
	}
2959

2960 2961 2962 2963
	for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
		/* skip queue if not active */
		if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
			continue;
L
Linus Torvalds 已提交
2964

2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
		rx_queue = priv->rx_queue[i];
		work_done_per_q =
			gfar_clean_rx_ring(rx_queue, budget_per_q);
		work_done += work_done_per_q;

		/* finished processing this queue */
		if (work_done_per_q < budget_per_q) {
			/* clear active queue hw indication */
			gfar_write(&regs->rstat,
				   RSTAT_CLEAR_RXF0 >> i);
			num_act_queues--;

			if (!num_act_queues)
				break;
		}
	}
2981

2982
	if (!num_act_queues && !has_tx_work) {
L
Linus Torvalds 已提交
2983

2984
		napi_complete(napi);
L
Linus Torvalds 已提交
2985

2986 2987
		/* Clear the halt bit in RSTAT */
		gfar_write(&regs->rstat, gfargrp->rstat);
L
Linus Torvalds 已提交
2988

2989
		gfar_write(&regs->imask, IMASK_DEFAULT);
C
Claudiu Manoil 已提交
2990

2991 2992 2993 2994 2995
		/* If we are coalescing interrupts, update the timer
		 * Otherwise, clear it
		 */
		gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
					  gfargrp->tx_bit_map);
L
Linus Torvalds 已提交
2996 2997
	}

C
Claudiu Manoil 已提交
2998
	return work_done;
L
Linus Torvalds 已提交
2999 3000
}

3001
#ifdef CONFIG_NET_POLL_CONTROLLER
J
Jan Ceuleers 已提交
3002
/* Polling 'interrupt' - used by things like netconsole to send skbs
3003 3004 3005 3006 3007 3008
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void gfar_netpoll(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3009
	int i;
3010 3011

	/* If the device has multiple interrupts, run tx/rx */
3012
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3013
		for (i = 0; i < priv->num_grps; i++) {
3014 3015 3016 3017 3018 3019 3020 3021 3022
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			disable_irq(gfar_irq(grp, RX)->irq);
			disable_irq(gfar_irq(grp, ER)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, ER)->irq);
			enable_irq(gfar_irq(grp, RX)->irq);
			enable_irq(gfar_irq(grp, TX)->irq);
3023
		}
3024
	} else {
3025
		for (i = 0; i < priv->num_grps; i++) {
3026 3027 3028 3029 3030
			struct gfar_priv_grp *grp = &priv->gfargrp[i];

			disable_irq(gfar_irq(grp, TX)->irq);
			gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
			enable_irq(gfar_irq(grp, TX)->irq);
3031
		}
3032 3033 3034 3035
	}
}
#endif

L
Linus Torvalds 已提交
3036
/* The interrupt handler for devices with one interrupt */
3037
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
L
Linus Torvalds 已提交
3038
{
3039
	struct gfar_priv_grp *gfargrp = grp_id;
L
Linus Torvalds 已提交
3040 3041

	/* Save ievent for future reference */
3042
	u32 events = gfar_read(&gfargrp->regs->ievent);
L
Linus Torvalds 已提交
3043 3044

	/* Check for reception */
3045
	if (events & IEVENT_RX_MASK)
3046
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3047 3048

	/* Check for transmit completion */
3049
	if (events & IEVENT_TX_MASK)
3050
		gfar_transmit(irq, grp_id);
L
Linus Torvalds 已提交
3051

3052 3053
	/* Check for errors */
	if (events & IEVENT_ERR_MASK)
3054
		gfar_error(irq, grp_id);
L
Linus Torvalds 已提交
3055 3056 3057 3058

	return IRQ_HANDLED;
}

3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
	struct phy_device *phydev = priv->phydev;
	u32 val = 0;

	if (!phydev->duplex)
		return val;

	if (!priv->pause_aneg_en) {
		if (priv->tx_pause_en)
			val |= MACCFG1_TX_FLOW;
		if (priv->rx_pause_en)
			val |= MACCFG1_RX_FLOW;
	} else {
		u16 lcl_adv, rmt_adv;
		u8 flowctrl;
		/* get link partner capabilities */
		rmt_adv = 0;
		if (phydev->pause)
			rmt_adv = LPA_PAUSE_CAP;
		if (phydev->asym_pause)
			rmt_adv |= LPA_PAUSE_ASYM;

		lcl_adv = mii_advertise_flowctrl(phydev->advertising);

		flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
		if (flowctrl & FLOW_CTRL_TX)
			val |= MACCFG1_TX_FLOW;
		if (flowctrl & FLOW_CTRL_RX)
			val |= MACCFG1_RX_FLOW;
	}

	return val;
}

L
Linus Torvalds 已提交
3094 3095
/* Called every time the controller might need to be made
 * aware of new link state.  The PHY code conveys this
3096
 * information through variables in the phydev structure, and this
L
Linus Torvalds 已提交
3097 3098 3099 3100 3101 3102
 * function converts those variables into the appropriate
 * register values, and can bring down the device if needed.
 */
static void adjust_link(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
3103
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3104 3105 3106 3107
	unsigned long flags;
	struct phy_device *phydev = priv->phydev;
	int new_state = 0;

3108 3109 3110
	local_irq_save(flags);
	lock_tx_qs(priv);

3111
	if (phydev->link) {
3112
		u32 tempval1 = gfar_read(&regs->maccfg1);
3113
		u32 tempval = gfar_read(&regs->maccfg2);
3114
		u32 ecntrl = gfar_read(&regs->ecntrl);
L
Linus Torvalds 已提交
3115 3116

		/* Now we make sure that we can be in full duplex mode.
J
Jan Ceuleers 已提交
3117 3118
		 * If not, we operate in half-duplex mode.
		 */
3119 3120 3121
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
L
Linus Torvalds 已提交
3122
				tempval &= ~(MACCFG2_FULL_DUPLEX);
3123
			else
L
Linus Torvalds 已提交
3124 3125
				tempval |= MACCFG2_FULL_DUPLEX;

3126
			priv->oldduplex = phydev->duplex;
L
Linus Torvalds 已提交
3127 3128
		}

3129 3130 3131
		if (phydev->speed != priv->oldspeed) {
			new_state = 1;
			switch (phydev->speed) {
L
Linus Torvalds 已提交
3132 3133 3134
			case 1000:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3135 3136

				ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3137 3138 3139 3140 3141
				break;
			case 100:
			case 10:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3142 3143

				/* Reduced mode distinguishes
J
Jan Ceuleers 已提交
3144 3145
				 * between 10 and 100
				 */
3146 3147 3148 3149
				if (phydev->speed == SPEED_100)
					ecntrl |= ECNTRL_R100;
				else
					ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
3150 3151
				break;
			default:
3152 3153 3154
				netif_warn(priv, link, dev,
					   "Ack!  Speed (%d) is not 10/100/1000!\n",
					   phydev->speed);
L
Linus Torvalds 已提交
3155 3156 3157
				break;
			}

3158
			priv->oldspeed = phydev->speed;
L
Linus Torvalds 已提交
3159 3160
		}

3161 3162 3163 3164
		tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
		tempval1 |= gfar_get_flowctrl_cfg(priv);

		gfar_write(&regs->maccfg1, tempval1);
3165
		gfar_write(&regs->maccfg2, tempval);
3166
		gfar_write(&regs->ecntrl, ecntrl);
3167

L
Linus Torvalds 已提交
3168
		if (!priv->oldlink) {
3169
			new_state = 1;
L
Linus Torvalds 已提交
3170 3171
			priv->oldlink = 1;
		}
3172 3173 3174 3175 3176
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
		priv->oldspeed = 0;
		priv->oldduplex = -1;
L
Linus Torvalds 已提交
3177 3178
	}

3179 3180
	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);
3181 3182
	unlock_tx_qs(priv);
	local_irq_restore(flags);
3183
}
L
Linus Torvalds 已提交
3184 3185 3186 3187

/* Update the hash table based on the current list of multicast
 * addresses we subscribe to.  Also, change the promiscuity of
 * the device based on the flags (this function is called
J
Jan Ceuleers 已提交
3188 3189
 * whenever dev->flags is changed
 */
L
Linus Torvalds 已提交
3190 3191
static void gfar_set_multi(struct net_device *dev)
{
3192
	struct netdev_hw_addr *ha;
L
Linus Torvalds 已提交
3193
	struct gfar_private *priv = netdev_priv(dev);
3194
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
L
Linus Torvalds 已提交
3195 3196
	u32 tempval;

3197
	if (dev->flags & IFF_PROMISC) {
L
Linus Torvalds 已提交
3198 3199 3200 3201 3202 3203 3204 3205 3206 3207
		/* Set RCTRL to PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval |= RCTRL_PROM;
		gfar_write(&regs->rctrl, tempval);
	} else {
		/* Set RCTRL to not PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval &= ~(RCTRL_PROM);
		gfar_write(&regs->rctrl, tempval);
	}
3208

3209
	if (dev->flags & IFF_ALLMULTI) {
L
Linus Torvalds 已提交
3210
		/* Set the hash to rx all multicast frames */
3211 3212 3213 3214 3215 3216 3217 3218
		gfar_write(&regs->igaddr0, 0xffffffff);
		gfar_write(&regs->igaddr1, 0xffffffff);
		gfar_write(&regs->igaddr2, 0xffffffff);
		gfar_write(&regs->igaddr3, 0xffffffff);
		gfar_write(&regs->igaddr4, 0xffffffff);
		gfar_write(&regs->igaddr5, 0xffffffff);
		gfar_write(&regs->igaddr6, 0xffffffff);
		gfar_write(&regs->igaddr7, 0xffffffff);
L
Linus Torvalds 已提交
3219 3220 3221 3222 3223 3224 3225 3226 3227
		gfar_write(&regs->gaddr0, 0xffffffff);
		gfar_write(&regs->gaddr1, 0xffffffff);
		gfar_write(&regs->gaddr2, 0xffffffff);
		gfar_write(&regs->gaddr3, 0xffffffff);
		gfar_write(&regs->gaddr4, 0xffffffff);
		gfar_write(&regs->gaddr5, 0xffffffff);
		gfar_write(&regs->gaddr6, 0xffffffff);
		gfar_write(&regs->gaddr7, 0xffffffff);
	} else {
3228 3229 3230
		int em_num;
		int idx;

L
Linus Torvalds 已提交
3231
		/* zero out the hash */
3232 3233 3234 3235 3236 3237 3238 3239
		gfar_write(&regs->igaddr0, 0x0);
		gfar_write(&regs->igaddr1, 0x0);
		gfar_write(&regs->igaddr2, 0x0);
		gfar_write(&regs->igaddr3, 0x0);
		gfar_write(&regs->igaddr4, 0x0);
		gfar_write(&regs->igaddr5, 0x0);
		gfar_write(&regs->igaddr6, 0x0);
		gfar_write(&regs->igaddr7, 0x0);
L
Linus Torvalds 已提交
3240 3241 3242 3243 3244 3245 3246 3247 3248
		gfar_write(&regs->gaddr0, 0x0);
		gfar_write(&regs->gaddr1, 0x0);
		gfar_write(&regs->gaddr2, 0x0);
		gfar_write(&regs->gaddr3, 0x0);
		gfar_write(&regs->gaddr4, 0x0);
		gfar_write(&regs->gaddr5, 0x0);
		gfar_write(&regs->gaddr6, 0x0);
		gfar_write(&regs->gaddr7, 0x0);

3249 3250
		/* If we have extended hash tables, we need to
		 * clear the exact match registers to prepare for
J
Jan Ceuleers 已提交
3251 3252
		 * setting them
		 */
3253 3254 3255 3256 3257 3258 3259 3260 3261
		if (priv->extended_hash) {
			em_num = GFAR_EM_NUM + 1;
			gfar_clear_exact_match(dev);
			idx = 1;
		} else {
			idx = 0;
			em_num = 0;
		}

3262
		if (netdev_mc_empty(dev))
L
Linus Torvalds 已提交
3263 3264 3265
			return;

		/* Parse the list, and set the appropriate bits */
3266
		netdev_for_each_mc_addr(ha, dev) {
3267
			if (idx < em_num) {
3268
				gfar_set_mac_for_addr(dev, idx, ha->addr);
3269 3270
				idx++;
			} else
3271
				gfar_set_hash_for_addr(dev, ha->addr);
L
Linus Torvalds 已提交
3272 3273 3274 3275
		}
	}
}

3276 3277

/* Clears each of the exact match registers to zero, so they
J
Jan Ceuleers 已提交
3278 3279
 * don't interfere with normal reception
 */
3280 3281 3282
static void gfar_clear_exact_match(struct net_device *dev)
{
	int idx;
3283
	static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3284

3285
	for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
J
Joe Perches 已提交
3286
		gfar_set_mac_for_addr(dev, idx, zero_arr);
3287 3288
}

L
Linus Torvalds 已提交
3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
J
Jan Ceuleers 已提交
3301 3302
 * the entry.
 */
L
Linus Torvalds 已提交
3303 3304 3305 3306
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
3307
	u32 result = ether_crc(ETH_ALEN, addr);
3308 3309 3310
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
L
Linus Torvalds 已提交
3311 3312
	u32 value = (1 << (31-whichbit));

3313
	tempval = gfar_read(priv->hash_regs[whichreg]);
L
Linus Torvalds 已提交
3314
	tempval |= value;
3315
	gfar_write(priv->hash_regs[whichreg], tempval);
L
Linus Torvalds 已提交
3316 3317
}

3318 3319 3320 3321

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
J
Joe Perches 已提交
3322 3323
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr)
3324 3325
{
	struct gfar_private *priv = netdev_priv(dev);
3326
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
3327
	int idx;
3328
	char tmpbuf[ETH_ALEN];
3329
	u32 tempval;
3330
	u32 __iomem *macptr = &regs->macstnaddr1;
3331 3332 3333

	macptr += num*2;

J
Jan Ceuleers 已提交
3334 3335 3336
	/* Now copy it into the mac registers backwards, cuz
	 * little endian is silly
	 */
3337 3338
	for (idx = 0; idx < ETH_ALEN; idx++)
		tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
3339 3340 3341 3342 3343 3344 3345 3346

	gfar_write(macptr, *((u32 *) (tmpbuf)));

	tempval = *((u32 *) (tmpbuf + 4));

	gfar_write(macptr+1, tempval);
}

L
Linus Torvalds 已提交
3347
/* GFAR error interrupt handler */
3348
static irqreturn_t gfar_error(int irq, void *grp_id)
L
Linus Torvalds 已提交
3349
{
3350 3351 3352 3353
	struct gfar_priv_grp *gfargrp = grp_id;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_private *priv= gfargrp->priv;
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
3354 3355

	/* Save ievent for future reference */
3356
	u32 events = gfar_read(&regs->ievent);
L
Linus Torvalds 已提交
3357 3358

	/* Clear IEVENT */
3359
	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
3360 3361

	/* Magic Packet is not an error. */
3362
	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
3363 3364
	    (events & IEVENT_MAG))
		events &= ~IEVENT_MAG;
L
Linus Torvalds 已提交
3365 3366

	/* Hmm... */
3367
	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
3368 3369
		netdev_dbg(dev,
			   "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3370
			   events, gfar_read(&regs->imask));
L
Linus Torvalds 已提交
3371 3372 3373

	/* Update the error counters */
	if (events & IEVENT_TXE) {
3374
		dev->stats.tx_errors++;
L
Linus Torvalds 已提交
3375 3376

		if (events & IEVENT_LC)
3377
			dev->stats.tx_window_errors++;
L
Linus Torvalds 已提交
3378
		if (events & IEVENT_CRL)
3379
			dev->stats.tx_aborted_errors++;
L
Linus Torvalds 已提交
3380
		if (events & IEVENT_XFUN) {
3381 3382
			unsigned long flags;

3383 3384
			netif_dbg(priv, tx_err, dev,
				  "TX FIFO underrun, packet dropped\n");
3385
			dev->stats.tx_dropped++;
3386
			atomic64_inc(&priv->extra_stats.tx_underrun);
L
Linus Torvalds 已提交
3387

3388 3389 3390
			local_irq_save(flags);
			lock_tx_qs(priv);

L
Linus Torvalds 已提交
3391
			/* Reactivate the Tx Queues */
3392
			gfar_write(&regs->tstat, gfargrp->tstat);
3393 3394 3395

			unlock_tx_qs(priv);
			local_irq_restore(flags);
L
Linus Torvalds 已提交
3396
		}
3397
		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
L
Linus Torvalds 已提交
3398 3399
	}
	if (events & IEVENT_BSY) {
3400
		dev->stats.rx_errors++;
3401
		atomic64_inc(&priv->extra_stats.rx_bsy);
L
Linus Torvalds 已提交
3402

3403
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
3404

3405 3406
		netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
			  gfar_read(&regs->rstat));
L
Linus Torvalds 已提交
3407 3408
	}
	if (events & IEVENT_BABR) {
3409
		dev->stats.rx_errors++;
3410
		atomic64_inc(&priv->extra_stats.rx_babr);
L
Linus Torvalds 已提交
3411

3412
		netif_dbg(priv, rx_err, dev, "babbling RX error\n");
L
Linus Torvalds 已提交
3413 3414
	}
	if (events & IEVENT_EBERR) {
3415
		atomic64_inc(&priv->extra_stats.eberr);
3416
		netif_dbg(priv, rx_err, dev, "bus error\n");
L
Linus Torvalds 已提交
3417
	}
3418 3419
	if (events & IEVENT_RXC)
		netif_dbg(priv, rx_status, dev, "control frame\n");
L
Linus Torvalds 已提交
3420 3421

	if (events & IEVENT_BABT) {
3422
		atomic64_inc(&priv->extra_stats.tx_babt);
3423
		netif_dbg(priv, tx_err, dev, "babbling TX error\n");
L
Linus Torvalds 已提交
3424 3425 3426 3427
	}
	return IRQ_HANDLED;
}

3428 3429 3430 3431 3432 3433
static struct of_device_id gfar_match[] =
{
	{
		.type = "network",
		.compatible = "gianfar",
	},
3434 3435 3436
	{
		.compatible = "fsl,etsec2",
	},
3437 3438
	{},
};
3439
MODULE_DEVICE_TABLE(of, gfar_match);
3440

L
Linus Torvalds 已提交
3441
/* Structure for a device driver */
3442
static struct platform_driver gfar_driver = {
3443 3444 3445 3446 3447 3448
	.driver = {
		.name = "fsl-gianfar",
		.owner = THIS_MODULE,
		.pm = GFAR_PM_OPS,
		.of_match_table = gfar_match,
	},
L
Linus Torvalds 已提交
3449 3450 3451 3452
	.probe = gfar_probe,
	.remove = gfar_remove,
};

3453
module_platform_driver(gfar_driver);