gianfar.c 66.6 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3 4
 * drivers/net/gianfar.c
 *
 * Gianfar Ethernet Driver
5 6
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
L
Linus Torvalds 已提交
7 8 9
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
10
 * Maintainer: Kumar Gala
11
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
L
Linus Torvalds 已提交
12
 *
13 14
 * Copyright 2002-2009 Freescale Semiconductor, Inc.
 * Copyright 2007 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
28
 *
29 30
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
L
Linus Torvalds 已提交
31 32 33
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
34 35
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
L
Linus Torvalds 已提交
36 37 38
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
39
 *  IEVENT register is set, triggering an interrupt when the
L
Linus Torvalds 已提交
40 41 42
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
43
 *  of frames or amount of time have passed).  In NAPI, the
L
Linus Torvalds 已提交
44
 *  interrupt handler will signal there is work to be done, and
45
 *  exit. This method will start at the last known empty
46
 *  descriptor, and process every subsequent descriptor until there
L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
68
#include <linux/unistd.h>
L
Linus Torvalds 已提交
69 70 71 72 73 74 75
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
76
#include <linux/if_vlan.h>
L
Linus Torvalds 已提交
77 78
#include <linux/spinlock.h>
#include <linux/mm.h>
79
#include <linux/of_mdio.h>
80
#include <linux/of_platform.h>
81 82 83
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
K
Kumar Gala 已提交
84
#include <linux/in.h>
L
Linus Torvalds 已提交
85 86 87 88 89 90 91

#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
92 93
#include <linux/mii.h>
#include <linux/phy.h>
94 95
#include <linux/phy_fixed.h>
#include <linux/of.h>
L
Linus Torvalds 已提交
96 97

#include "gianfar.h"
98
#include "fsl_pq_mdio.h"
L
Linus Torvalds 已提交
99 100 101 102 103 104

#define TX_TIMEOUT      (1*HZ)
#undef BRIEF_GFAR_ERRORS
#undef VERBOSE_GFAR_ERRORS

const char gfar_driver_name[] = "Gianfar Ethernet";
105
const char gfar_driver_version[] = "1.3";
L
Linus Torvalds 已提交
106 107 108

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
109
static void gfar_reset_task(struct work_struct *work);
L
Linus Torvalds 已提交
110 111
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
112
struct sk_buff *gfar_new_skb(struct net_device *dev);
113
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
114
		struct sk_buff *skb);
L
Linus Torvalds 已提交
115 116
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
117 118 119
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
L
Linus Torvalds 已提交
120 121 122
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
123 124 125
static int gfar_probe(struct of_device *ofdev,
		const struct of_device_id *match);
static int gfar_remove(struct of_device *ofdev);
126
static void free_skb_resources(struct gfar_private *priv);
L
Linus Torvalds 已提交
127 128
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
K
Kapil Juneja 已提交
129
static void gfar_configure_serdes(struct net_device *dev);
130
static int gfar_poll(struct napi_struct *napi, int budget);
131 132 133
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
134 135
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136 137
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
			      int amount_pull);
138 139
static void gfar_vlan_rx_register(struct net_device *netdev,
		                struct vlan_group *grp);
140
void gfar_halt(struct net_device *dev);
141
static void gfar_halt_nodisable(struct net_device *dev);
142 143 144
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
145
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
L
Linus Torvalds 已提交
146 147 148 149 150

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

151
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
152 153 154 155 156 157 158
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
159
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
160 161 162 163 164 165 166
		lstatus |= BD_LFLAG(RXBD_WRAP);

	eieio();

	bdp->lstatus = lstatus;
}

167
static int gfar_init_bds(struct net_device *ndev)
168
{
169
	struct gfar_private *priv = netdev_priv(ndev);
170 171
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
172 173
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
174 175
	int i;

176 177 178
	tx_queue = priv->tx_queue;
	rx_queue = priv->rx_queue;

179
	/* Initialize some variables in our dev structure */
180 181 182 183 184
	tx_queue->num_txbdfree = tx_queue->tx_ring_size;
	tx_queue->dirty_tx = tx_queue->cur_tx = tx_queue->tx_bd_base;
	rx_queue->cur_rx = rx_queue->rx_bd_base;
	tx_queue->skb_curtx = tx_queue->skb_dirtytx = 0;
	rx_queue->skb_currx = 0;
185 186

	/* Initialize Transmit Descriptor Ring */
187 188
	txbdp = tx_queue->tx_bd_base;
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
189 190 191 192 193 194 195 196 197
		txbdp->lstatus = 0;
		txbdp->bufPtr = 0;
		txbdp++;
	}

	/* Set the last descriptor in the ring to indicate wrap */
	txbdp--;
	txbdp->status |= TXBD_WRAP;

198 199 200
	rxbdp = rx_queue->rx_bd_base;
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		struct sk_buff *skb = rx_queue->rx_skbuff[i];
201 202

		if (skb) {
203
			gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr);
204 205 206 207 208 209 210
		} else {
			skb = gfar_new_skb(ndev);
			if (!skb) {
				pr_err("%s: Can't allocate RX buffers\n",
				       ndev->name);
				return -ENOMEM;
			}
211
			rx_queue->rx_skbuff[i] = skb;
212

213
			gfar_new_rxbdp(rx_queue, rxbdp, skb);
214 215 216 217 218 219 220 221 222 223
		}

		rxbdp++;
	}

	return 0;
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
224 225 226 227
	void *vaddr;
	int i;
	struct gfar_private *priv = netdev_priv(ndev);
	struct device *dev = &priv->ofdev->dev;
228 229 230 231 232
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

	tx_queue = priv->tx_queue;
	rx_queue = priv->rx_queue;
233 234

	/* Allocate memory for the buffer descriptors */
235
	vaddr = dma_alloc_coherent(dev,
236 237 238
			sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size +
			sizeof(*rx_queue->rx_bd_base) * rx_queue->rx_ring_size,
			&tx_queue->tx_bd_dma_base, GFP_KERNEL);
239 240 241 242 243 244 245
	if (!vaddr) {
		if (netif_msg_ifup(priv))
			pr_err("%s: Could not allocate buffer descriptors!\n",
			       ndev->name);
		return -ENOMEM;
	}

246 247
	tx_queue->tx_bd_base = vaddr;
	tx_queue->dev = ndev;
248 249

	/* Start the rx descriptor ring where the tx ring leaves off */
250 251 252
	vaddr = vaddr + sizeof(*tx_queue->tx_bd_base) * tx_queue->tx_ring_size;
	rx_queue->rx_bd_base = vaddr;
	rx_queue->dev = ndev;
253 254

	/* Setup the skbuff rings */
255 256 257
	tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
				  tx_queue->tx_ring_size, GFP_KERNEL);
	if (!tx_queue->tx_skbuff) {
258 259 260 261 262 263
		if (netif_msg_ifup(priv))
			pr_err("%s: Could not allocate tx_skbuff\n",
			       ndev->name);
		goto cleanup;
	}

264 265
	for (i = 0; i < tx_queue->tx_ring_size; i++)
		tx_queue->tx_skbuff[i] = NULL;
266

267 268 269
	rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
				  rx_queue->rx_ring_size, GFP_KERNEL);
	if (!rx_queue->rx_skbuff) {
270 271 272 273 274 275
		if (netif_msg_ifup(priv))
			pr_err("%s: Could not allocate rx_skbuff\n",
			       ndev->name);
		goto cleanup;
	}

276 277
	for (i = 0; i < rx_queue->rx_ring_size; i++)
		rx_queue->rx_skbuff[i] = NULL;
278

279 280
	if (gfar_init_bds(ndev))
		goto cleanup;
281 282 283 284 285 286 287 288 289 290 291

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

static void gfar_init_mac(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
292 293
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
294
	struct gfar __iomem *regs = priv->gfargrp.regs;
295 296 297 298
	u32 rctrl = 0;
	u32 tctrl = 0;
	u32 attrs = 0;

299 300 301
	tx_queue = priv->tx_queue;
	rx_queue = priv->rx_queue;

302
	/* enet DMA only understands physical addresses */
303 304 305 306
	gfar_write(&regs->tbase0, tx_queue->tx_bd_dma_base);
	gfar_write(&regs->rbase0, tx_queue->tx_bd_dma_base +
				  sizeof(*tx_queue->tx_bd_base) *
				  tx_queue->tx_ring_size);
307

308 309
	/* Configure the coalescing support */
	gfar_write(&regs->txic, 0);
310 311
	if (tx_queue->txcoalescing)
		gfar_write(&regs->txic, tx_queue->txic);
312 313

	gfar_write(&regs->rxic, 0);
314 315
	if (rx_queue->rxcoalescing)
		gfar_write(&regs->rxic, rx_queue->rxic);
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

	if (priv->rx_csum_enable)
		rctrl |= RCTRL_CHECKSUMMING;

	if (priv->extended_hash) {
		rctrl |= RCTRL_EXTHASH;

		gfar_clear_exact_match(ndev);
		rctrl |= RCTRL_EMEN;
	}

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

	/* keep vlan related bits if it's enabled */
	if (priv->vlgrp) {
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
		tctrl |= TCTRL_VLINS;
	}

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);

	if (ndev->features & NETIF_F_IP_CSUM)
		tctrl |= TCTRL_INIT_CSUM;

	gfar_write(&regs->tctrl, tctrl);

	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing or locking
	 * depending on the approprate variables */
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}

369 370 371 372 373 374 375 376 377
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
	.ndo_set_multicast_list = gfar_set_multi,
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
	.ndo_vlan_rx_register = gfar_vlan_rx_register,
378 379
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
380 381 382 383 384
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

385 386
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
387
{
388
	return priv->vlgrp || priv->rx_csum_enable;
389
}
390

391 392 393 394 395 396 397 398 399
static int gfar_of_init(struct net_device *dev)
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
	u64 addr, size;
	int err = 0;
	struct gfar_private *priv = netdev_priv(dev);
	struct device_node *np = priv->node;
A
Andy Fleming 已提交
400 401 402
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
403 404 405 406 407 408

	if (!np || !of_device_is_available(np))
		return -ENODEV;

	/* get a pointer to the register memory */
	addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
409
	priv->gfargrp.regs = ioremap(addr, size);
410

411
	if (priv->gfargrp.regs == NULL)
412 413
		return -ENOMEM;

414 415
	priv->gfargrp.priv = priv; /* back pointer from group to priv */
	priv->gfargrp.interruptTransmit = irq_of_parse_and_map(np, 0);
416 417 418 419 420

	model = of_get_property(np, "model", NULL);

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
421
		priv->gfargrp.interruptReceive = irq_of_parse_and_map(np, 1);
422

423
		priv->gfargrp.interruptError = irq_of_parse_and_map(np, 2);
424

425 426 427
		if (priv->gfargrp.interruptTransmit < 0 ||
				priv->gfargrp.interruptReceive < 0 ||
				priv->gfargrp.interruptError < 0) {
428 429 430 431 432
			err = -EINVAL;
			goto err_out;
		}
	}

A
Andy Fleming 已提交
433 434
	stash = of_get_property(np, "bd-stash", NULL);

435
	if (stash) {
A
Andy Fleming 已提交
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	mac_addr = of_get_mac_address(np);
	if (mac_addr)
		memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);

	if (model && !strcasecmp(model, "TSEC"))
		priv->device_flags =
			FSL_GIANFAR_DEV_HAS_GIGABIT |
			FSL_GIANFAR_DEV_HAS_COALESCE |
			FSL_GIANFAR_DEV_HAS_RMON |
			FSL_GIANFAR_DEV_HAS_MULTI_INTR;
	if (model && !strcasecmp(model, "eTSEC"))
		priv->device_flags =
			FSL_GIANFAR_DEV_HAS_GIGABIT |
			FSL_GIANFAR_DEV_HAS_COALESCE |
			FSL_GIANFAR_DEV_HAS_RMON |
			FSL_GIANFAR_DEV_HAS_MULTI_INTR |
469
			FSL_GIANFAR_DEV_HAS_PADDING |
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
			FSL_GIANFAR_DEV_HAS_CSUM |
			FSL_GIANFAR_DEV_HAS_VLAN |
			FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
			FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

486
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
487 488

	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
489
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
490 491 492 493

	return 0;

err_out:
494
	iounmap(priv->gfargrp.regs);
495 496 497
	return err;
}

498 499 500 501 502 503 504 505 506 507 508 509 510 511
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

	if (!priv->phydev)
		return -ENODEV;

	return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
}

512 513
/* Set up the ethernet device structure, private data,
 * and anything else we need before we start */
514 515
static int gfar_probe(struct of_device *ofdev,
		const struct of_device_id *match)
L
Linus Torvalds 已提交
516 517 518 519
{
	u32 tempval;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
520
	struct gfar __iomem *regs = NULL;
521 522
	int err = 0;
	int len_devname;
L
Linus Torvalds 已提交
523 524 525 526

	/* Create an ethernet device instance */
	dev = alloc_etherdev(sizeof (*priv));

527
	if (NULL == dev)
L
Linus Torvalds 已提交
528 529 530
		return -ENOMEM;

	priv = netdev_priv(dev);
531 532
	priv->ndev = dev;
	priv->ofdev = ofdev;
533
	priv->node = ofdev->node;
534
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
535

536
	err = gfar_of_init(dev);
L
Linus Torvalds 已提交
537

538
	if (err)
L
Linus Torvalds 已提交
539 540
		goto regs_fail;

541 542 543 544 545 546 547 548 549 550 551 552
	priv->tx_queue = (struct gfar_priv_tx_q *)kmalloc(
				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
	if (!priv->tx_queue)
		goto regs_fail;

	priv->rx_queue = (struct gfar_priv_rx_q *)kmalloc(
				sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
	if (!priv->rx_queue)
		goto rx_queue_fail;

	spin_lock_init(&priv->tx_queue->txlock);
	spin_lock_init(&priv->rx_queue->rxlock);
553
	spin_lock_init(&priv->gfargrp.grplock);
554
	spin_lock_init(&priv->bflock);
555
	INIT_WORK(&priv->reset_task, gfar_reset_task);
L
Linus Torvalds 已提交
556

557
	dev_set_drvdata(&ofdev->dev, priv);
558
	regs = priv->gfargrp.regs;
L
Linus Torvalds 已提交
559 560 561

	/* Stop the DMA engine now, in case it was running before */
	/* (The firmware could have used it, and left it running). */
562
	gfar_halt(dev);
L
Linus Torvalds 已提交
563 564

	/* Reset MAC layer */
565
	gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
L
Linus Torvalds 已提交
566

567 568 569
	/* We need to delay at least 3 TX clocks */
	udelay(2);

L
Linus Torvalds 已提交
570
	tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
571
	gfar_write(&regs->maccfg1, tempval);
L
Linus Torvalds 已提交
572 573

	/* Initialize MACCFG2. */
574
	gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
L
Linus Torvalds 已提交
575 576

	/* Initialize ECNTRL */
577
	gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
L
Linus Torvalds 已提交
578 579

	/* Set the dev->base_addr to the gfar reg region */
580
	dev->base_addr = (unsigned long) regs;
L
Linus Torvalds 已提交
581

582
	SET_NETDEV_DEV(dev, &ofdev->dev);
L
Linus Torvalds 已提交
583 584 585 586

	/* Fill in the dev structure */
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->mtu = 1500;
587
	dev->netdev_ops = &gfar_netdev_ops;
588 589
	dev->ethtool_ops = &gfar_ethtool_ops;

590 591 592
	/* Register for napi ...NAPI is for each rx_queue */
	netif_napi_add(dev, &priv->rx_queue->napi, gfar_poll, GFAR_DEV_WEIGHT);

593
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
594
		priv->rx_csum_enable = 1;
D
Dai Haruki 已提交
595
		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
596 597 598 599
	} else
		priv->rx_csum_enable = 0;

	priv->vlgrp = NULL;
L
Linus Torvalds 已提交
600

601
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
602 603
		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;

604
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
605 606 607
		priv->extended_hash = 1;
		priv->hash_width = 9;

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
		priv->hash_regs[0] = &regs->igaddr0;
		priv->hash_regs[1] = &regs->igaddr1;
		priv->hash_regs[2] = &regs->igaddr2;
		priv->hash_regs[3] = &regs->igaddr3;
		priv->hash_regs[4] = &regs->igaddr4;
		priv->hash_regs[5] = &regs->igaddr5;
		priv->hash_regs[6] = &regs->igaddr6;
		priv->hash_regs[7] = &regs->igaddr7;
		priv->hash_regs[8] = &regs->gaddr0;
		priv->hash_regs[9] = &regs->gaddr1;
		priv->hash_regs[10] = &regs->gaddr2;
		priv->hash_regs[11] = &regs->gaddr3;
		priv->hash_regs[12] = &regs->gaddr4;
		priv->hash_regs[13] = &regs->gaddr5;
		priv->hash_regs[14] = &regs->gaddr6;
		priv->hash_regs[15] = &regs->gaddr7;
624 625 626 627 628

	} else {
		priv->extended_hash = 0;
		priv->hash_width = 8;

629 630 631 632 633 634 635 636
		priv->hash_regs[0] = &regs->gaddr0;
		priv->hash_regs[1] = &regs->gaddr1;
		priv->hash_regs[2] = &regs->gaddr2;
		priv->hash_regs[3] = &regs->gaddr3;
		priv->hash_regs[4] = &regs->gaddr4;
		priv->hash_regs[5] = &regs->gaddr5;
		priv->hash_regs[6] = &regs->gaddr6;
		priv->hash_regs[7] = &regs->gaddr7;
637 638
	}

639
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
640 641 642 643 644 645
		priv->padding = DEFAULT_PADDING;
	else
		priv->padding = 0;

	if (dev->features & NETIF_F_IP_CSUM)
		dev->hard_header_len += GMAC_FCB_LEN;
L
Linus Torvalds 已提交
646 647 648

	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;

649 650 651 652 653 654 655 656 657
	/* Initializing some of the rx/tx queue level parameters */
	priv->tx_queue->tx_ring_size = DEFAULT_TX_RING_SIZE;
	priv->tx_queue->num_txbdfree = DEFAULT_TX_RING_SIZE;
	priv->tx_queue->txcoalescing = DEFAULT_TX_COALESCE;
	priv->tx_queue->txic = DEFAULT_TXIC;

	priv->rx_queue->rx_ring_size = DEFAULT_RX_RING_SIZE;
	priv->rx_queue->rxcoalescing = DEFAULT_RX_COALESCE;
	priv->rx_queue->rxic = DEFAULT_RXIC;
L
Linus Torvalds 已提交
658

659 660 661
	/* Enable most messages by default */
	priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;

662 663 664
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(dev);

L
Linus Torvalds 已提交
665 666 667 668 669 670 671 672
	err = register_netdev(dev);

	if (err) {
		printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
				dev->name);
		goto register_fail;
	}

673 674 675
	device_init_wakeup(&dev->dev,
		priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);

676 677
	/* fill out IRQ number and name fields */
	len_devname = strlen(dev->name);
678
	strncpy(&priv->gfargrp.int_name_tx[0], dev->name, len_devname);
679
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
680
		strncpy(&priv->gfargrp.int_name_tx[len_devname],
681 682
			"_tx", sizeof("_tx") + 1);

683 684
		strncpy(&priv->gfargrp.int_name_rx[0], dev->name, len_devname);
		strncpy(&priv->gfargrp.int_name_rx[len_devname],
685 686
			"_rx", sizeof("_rx") + 1);

687 688
		strncpy(&priv->gfargrp.int_name_er[0], dev->name, len_devname);
		strncpy(&priv->gfargrp.int_name_er[len_devname],
689 690
			"_er", sizeof("_er") + 1);
	} else
691
		priv->gfargrp.int_name_tx[len_devname] = '\0';
692

693 694 695
	/* Create all the sysfs files */
	gfar_init_sysfs(dev);

L
Linus Torvalds 已提交
696
	/* Print out the device info */
J
Johannes Berg 已提交
697
	printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
L
Linus Torvalds 已提交
698 699

	/* Even more device info helps when determining which kernel */
700
	/* provided which set of benchmarks. */
L
Linus Torvalds 已提交
701 702
	printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
	printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
703
	       dev->name, priv->rx_queue->rx_ring_size, priv->tx_queue->tx_ring_size);
L
Linus Torvalds 已提交
704 705 706 707

	return 0;

register_fail:
708
	iounmap(priv->gfargrp.regs);
709 710 711
	kfree(priv->rx_queue);
rx_queue_fail:
	kfree(priv->tx_queue);
L
Linus Torvalds 已提交
712
regs_fail:
713 714 715 716
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);
L
Linus Torvalds 已提交
717
	free_netdev(dev);
718
	return err;
L
Linus Torvalds 已提交
719 720
}

721
static int gfar_remove(struct of_device *ofdev)
L
Linus Torvalds 已提交
722
{
723
	struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
L
Linus Torvalds 已提交
724

725 726 727 728 729
	if (priv->phy_node)
		of_node_put(priv->phy_node);
	if (priv->tbi_node)
		of_node_put(priv->tbi_node);

730
	dev_set_drvdata(&ofdev->dev, NULL);
L
Linus Torvalds 已提交
731

D
David S. Miller 已提交
732
	unregister_netdev(priv->ndev);
733
	iounmap(priv->gfargrp.regs);
734
	free_netdev(priv->ndev);
L
Linus Torvalds 已提交
735 736 737 738

	return 0;
}

739
#ifdef CONFIG_PM
740 741

static int gfar_suspend(struct device *dev)
742
{
743 744
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
745 746
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
747
	struct gfar __iomem *regs = NULL;
748 749 750 751
	unsigned long flags;
	u32 tempval;

	int magic_packet = priv->wol_en &&
752
		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
753

754
	netif_device_detach(ndev);
755 756
	tx_queue = priv->tx_queue;
	rx_queue = priv->rx_queue;
757
	regs = priv->gfargrp.regs;
758

759
	if (netif_running(ndev)) {
760 761
		spin_lock_irqsave(&tx_queue->txlock, flags);
		spin_lock(&rx_queue->rxlock);
762

763
		gfar_halt_nodisable(ndev);
764 765

		/* Disable Tx, and Rx if wake-on-LAN is disabled. */
766
		tempval = gfar_read(&regs->maccfg1);
767 768 769 770 771 772

		tempval &= ~MACCFG1_TX_EN;

		if (!magic_packet)
			tempval &= ~MACCFG1_RX_EN;

773
		gfar_write(&regs->maccfg1, tempval);
774

775 776
		spin_unlock(&rx_queue->rxlock);
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
777

778
		napi_disable(&rx_queue->napi);
779 780 781

		if (magic_packet) {
			/* Enable interrupt on Magic Packet */
782
			gfar_write(&regs->imask, IMASK_MAG);
783 784

			/* Enable Magic Packet mode */
785
			tempval = gfar_read(&regs->maccfg2);
786
			tempval |= MACCFG2_MPEN;
787
			gfar_write(&regs->maccfg2, tempval);
788 789 790 791 792 793 794 795
		} else {
			phy_stop(priv->phydev);
		}
	}

	return 0;
}

796
static int gfar_resume(struct device *dev)
797
{
798 799
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;
800 801
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
802
	struct gfar __iomem *regs = NULL;
803 804 805
	unsigned long flags;
	u32 tempval;
	int magic_packet = priv->wol_en &&
806
		(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
807

808 809
	if (!netif_running(ndev)) {
		netif_device_attach(ndev);
810 811 812 813 814 815 816 817 818
		return 0;
	}

	if (!magic_packet && priv->phydev)
		phy_start(priv->phydev);

	/* Disable Magic Packet mode, in case something
	 * else woke us up.
	 */
819 820
	rx_queue = priv->rx_queue;
	tx_queue = priv->tx_queue;
821
	regs = priv->gfargrp.regs;
822

823 824
	spin_lock_irqsave(&tx_queue->txlock, flags);
	spin_lock(&rx_queue->rxlock);
825

826
	tempval = gfar_read(&regs->maccfg2);
827
	tempval &= ~MACCFG2_MPEN;
828
	gfar_write(&regs->maccfg2, tempval);
829

830
	gfar_start(ndev);
831

832 833
	spin_unlock(&rx_queue->rxlock);
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
834

835 836
	netif_device_attach(ndev);

837
	napi_enable(&rx_queue->napi);
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861

	return 0;
}

static int gfar_restore(struct device *dev)
{
	struct gfar_private *priv = dev_get_drvdata(dev);
	struct net_device *ndev = priv->ndev;

	if (!netif_running(ndev))
		return 0;

	gfar_init_bds(ndev);
	init_registers(ndev);
	gfar_set_mac_address(ndev);
	gfar_init_mac(ndev);
	gfar_start(ndev);

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

	if (priv->phydev)
		phy_start(priv->phydev);
862

863
	netif_device_attach(ndev);
864 865 866 867
	napi_enable(&priv->napi);

	return 0;
}
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888

static struct dev_pm_ops gfar_pm_ops = {
	.suspend = gfar_suspend,
	.resume = gfar_resume,
	.freeze = gfar_suspend,
	.thaw = gfar_resume,
	.restore = gfar_restore,
};

#define GFAR_PM_OPS (&gfar_pm_ops)

static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
{
	return gfar_suspend(&ofdev->dev);
}

static int gfar_legacy_resume(struct of_device *ofdev)
{
	return gfar_resume(&ofdev->dev);
}

889
#else
890 891 892 893 894

#define GFAR_PM_OPS NULL
#define gfar_legacy_suspend NULL
#define gfar_legacy_resume NULL

895
#endif
L
Linus Torvalds 已提交
896

897 898 899 900 901 902
/* Reads the controller's registers to determine what interface
 * connects it to the PHY.
 */
static phy_interface_t gfar_get_interface(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
903 904 905 906 907
	struct gfar __iomem *regs = NULL;
	u32 ecntrl;

	regs = priv->gfargrp.regs;
	ecntrl = gfar_read(&regs->ecntrl);
908 909 910 911 912 913 914 915 916 917 918 919 920 921

	if (ecntrl & ECNTRL_SGMII_MODE)
		return PHY_INTERFACE_MODE_SGMII;

	if (ecntrl & ECNTRL_TBI_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MODE)
			return PHY_INTERFACE_MODE_RTBI;
		else
			return PHY_INTERFACE_MODE_TBI;
	}

	if (ecntrl & ECNTRL_REDUCED_MODE) {
		if (ecntrl & ECNTRL_REDUCED_MII_MODE)
			return PHY_INTERFACE_MODE_RMII;
A
Andy Fleming 已提交
922
		else {
923
			phy_interface_t interface = priv->interface;
A
Andy Fleming 已提交
924 925 926 927 928 929 930 931

			/*
			 * This isn't autodetected right now, so it must
			 * be set by the device tree or platform code.
			 */
			if (interface == PHY_INTERFACE_MODE_RGMII_ID)
				return PHY_INTERFACE_MODE_RGMII_ID;

932
			return PHY_INTERFACE_MODE_RGMII;
A
Andy Fleming 已提交
933
		}
934 935
	}

936
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
937 938 939 940 941 942
		return PHY_INTERFACE_MODE_GMII;

	return PHY_INTERFACE_MODE_MII;
}


943 944
/* Initializes driver's PHY state, and attaches to the PHY.
 * Returns 0 on success.
L
Linus Torvalds 已提交
945 946 947 948
 */
static int init_phy(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
949
	uint gigabit_support =
950
		priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
951
		SUPPORTED_1000baseT_Full : 0;
952
	phy_interface_t interface;
L
Linus Torvalds 已提交
953 954 955 956 957

	priv->oldlink = 0;
	priv->oldspeed = 0;
	priv->oldduplex = -1;

958 959
	interface = gfar_get_interface(dev);

960 961 962 963 964 965 966 967
	priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
				      interface);
	if (!priv->phydev)
		priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
							 interface);
	if (!priv->phydev) {
		dev_err(&dev->dev, "could not attach to PHY\n");
		return -ENODEV;
968
	}
L
Linus Torvalds 已提交
969

K
Kapil Juneja 已提交
970 971 972
	if (interface == PHY_INTERFACE_MODE_SGMII)
		gfar_configure_serdes(dev);

973
	/* Remove any features not supported by the controller */
974 975
	priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
	priv->phydev->advertising = priv->phydev->supported;
L
Linus Torvalds 已提交
976 977 978 979

	return 0;
}

980 981 982 983 984 985 986 987 988
/*
 * Initialize TBI PHY interface for communicating with the
 * SERDES lynx PHY on the chip.  We communicate with this PHY
 * through the MDIO bus on each controller, treating it as a
 * "normal" PHY at the address found in the TBIPA register.  We assume
 * that the TBIPA register is valid.  Either the MDIO bus code will set
 * it to a value that doesn't conflict with other PHYs on the bus, or the
 * value doesn't matter, as there are no other PHYs on the bus.
 */
K
Kapil Juneja 已提交
989 990 991
static void gfar_configure_serdes(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
992 993 994 995 996 997 998
	struct phy_device *tbiphy;

	if (!priv->tbi_node) {
		dev_warn(&dev->dev, "error: SGMII mode requires that the "
				    "device tree specify a tbi-handle\n");
		return;
	}
999

1000 1001 1002
	tbiphy = of_phy_find_device(priv->tbi_node);
	if (!tbiphy) {
		dev_err(&dev->dev, "error: Could not get TBI device\n");
1003 1004
		return;
	}
K
Kapil Juneja 已提交
1005

1006 1007
	/*
	 * If the link is already up, we must already be ok, and don't need to
1008 1009 1010 1011
	 * configure and reset the TBI<->SerDes link.  Maybe U-Boot configured
	 * everything for us?  Resetting it takes the link down and requires
	 * several seconds for it to come back.
	 */
1012
	if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
1013
		return;
K
Kapil Juneja 已提交
1014

1015
	/* Single clk mode, mii mode off(for serdes communication) */
1016
	phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
K
Kapil Juneja 已提交
1017

1018
	phy_write(tbiphy, MII_ADVERTISE,
K
Kapil Juneja 已提交
1019 1020 1021
			ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
			ADVERTISE_1000XPSE_ASYM);

1022
	phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
K
Kapil Juneja 已提交
1023 1024 1025
			BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
}

L
Linus Torvalds 已提交
1026 1027 1028
static void init_registers(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1029
	struct gfar __iomem *regs = NULL;
L
Linus Torvalds 已提交
1030

1031
	regs = priv->gfargrp.regs;
L
Linus Torvalds 已提交
1032
	/* Clear IEVENT */
1033
	gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
L
Linus Torvalds 已提交
1034 1035

	/* Initialize IMASK */
1036
	gfar_write(&regs->imask, IMASK_INIT_CLEAR);
L
Linus Torvalds 已提交
1037 1038

	/* Init hash registers to zero */
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	gfar_write(&regs->igaddr0, 0);
	gfar_write(&regs->igaddr1, 0);
	gfar_write(&regs->igaddr2, 0);
	gfar_write(&regs->igaddr3, 0);
	gfar_write(&regs->igaddr4, 0);
	gfar_write(&regs->igaddr5, 0);
	gfar_write(&regs->igaddr6, 0);
	gfar_write(&regs->igaddr7, 0);

	gfar_write(&regs->gaddr0, 0);
	gfar_write(&regs->gaddr1, 0);
	gfar_write(&regs->gaddr2, 0);
	gfar_write(&regs->gaddr3, 0);
	gfar_write(&regs->gaddr4, 0);
	gfar_write(&regs->gaddr5, 0);
	gfar_write(&regs->gaddr6, 0);
	gfar_write(&regs->gaddr7, 0);
L
Linus Torvalds 已提交
1056 1057

	/* Zero out the rmon mib registers if it has them */
1058
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1059
		memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
L
Linus Torvalds 已提交
1060 1061

		/* Mask off the CAM interrupts */
1062 1063
		gfar_write(&regs->rmon.cam1, 0xffffffff);
		gfar_write(&regs->rmon.cam2, 0xffffffff);
L
Linus Torvalds 已提交
1064 1065 1066
	}

	/* Initialize the max receive buffer length */
1067
	gfar_write(&regs->mrblr, priv->rx_buffer_size);
L
Linus Torvalds 已提交
1068 1069

	/* Initialize the Minimum Frame Length Register */
1070
	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
L
Linus Torvalds 已提交
1071 1072
}

1073 1074

/* Halt the receive and transmit queues */
1075
static void gfar_halt_nodisable(struct net_device *dev)
L
Linus Torvalds 已提交
1076 1077
{
	struct gfar_private *priv = netdev_priv(dev);
1078
	struct gfar __iomem *regs = priv->gfargrp.regs;
L
Linus Torvalds 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087
	u32 tempval;

	/* Mask all interrupts */
	gfar_write(&regs->imask, IMASK_INIT_CLEAR);

	/* Clear all interrupts */
	gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);

	/* Stop the DMA, and wait for it to stop */
1088
	tempval = gfar_read(&regs->dmactrl);
L
Linus Torvalds 已提交
1089 1090 1091
	if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
	    != (DMACTRL_GRS | DMACTRL_GTS)) {
		tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1092
		gfar_write(&regs->dmactrl, tempval);
L
Linus Torvalds 已提交
1093

1094
		while (!(gfar_read(&regs->ievent) &
L
Linus Torvalds 已提交
1095 1096 1097
			 (IEVENT_GRSC | IEVENT_GTSC)))
			cpu_relax();
	}
1098 1099 1100 1101 1102 1103
}

/* Halt the receive and transmit queues */
void gfar_halt(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1104
	struct gfar __iomem *regs = priv->gfargrp.regs;
1105
	u32 tempval;
L
Linus Torvalds 已提交
1106

1107 1108
	gfar_halt_nodisable(dev);

L
Linus Torvalds 已提交
1109 1110 1111 1112
	/* Disable Rx and Tx */
	tempval = gfar_read(&regs->maccfg1);
	tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);
1113 1114 1115 1116 1117
}

void stop_gfar(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1118 1119
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
1120 1121
	unsigned long flags;

1122 1123
	phy_stop(priv->phydev);

1124 1125 1126
	tx_queue = priv->tx_queue;
	rx_queue = priv->rx_queue;

1127
	/* Lock it down */
1128 1129
	spin_lock_irqsave(&tx_queue->txlock, flags);
	spin_lock(&rx_queue->rxlock);
1130 1131

	gfar_halt(dev);
L
Linus Torvalds 已提交
1132

1133 1134
	spin_unlock(&rx_queue->rxlock);
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
1135 1136

	/* Free the IRQs */
1137
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1138 1139 1140
		free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
		free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
		free_irq(priv->gfargrp.interruptReceive, &priv->gfargrp);
L
Linus Torvalds 已提交
1141
	} else {
1142
		free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
L
Linus Torvalds 已提交
1143 1144 1145 1146 1147 1148 1149
	}

	free_skb_resources(priv);
}

/* If there are any tx skbs or rx skbs still around, free them.
 * Then free tx_skbuff and rx_skbuff */
1150
static void free_skb_resources(struct gfar_private *priv)
L
Linus Torvalds 已提交
1151
{
1152
	struct device *dev = &priv->ofdev->dev;
L
Linus Torvalds 已提交
1153 1154
	struct rxbd8 *rxbdp;
	struct txbd8 *txbdp;
1155 1156
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
D
Dai Haruki 已提交
1157
	int i, j;
L
Linus Torvalds 已提交
1158 1159

	/* Go through all the buffer descriptors and free their data buffers */
1160 1161
	tx_queue = priv->tx_queue;
	txbdp = tx_queue->tx_bd_base;
L
Linus Torvalds 已提交
1162

1163
	if (!tx_queue->tx_skbuff)
1164 1165
		goto skip_tx_skbuff;

1166 1167
	for (i = 0; i < tx_queue->tx_ring_size; i++) {
		if (!tx_queue->tx_skbuff[i])
D
Dai Haruki 已提交
1168
			continue;
L
Linus Torvalds 已提交
1169

1170
		dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
D
Dai Haruki 已提交
1171 1172
				txbdp->length, DMA_TO_DEVICE);
		txbdp->lstatus = 0;
1173
		for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) {
D
Dai Haruki 已提交
1174
			txbdp++;
1175
			dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
D
Dai Haruki 已提交
1176
					txbdp->length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1177
		}
1178
		txbdp++;
1179 1180
		dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
		tx_queue->tx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1181 1182
	}

1183
	kfree(tx_queue->tx_skbuff);
1184
skip_tx_skbuff:
L
Linus Torvalds 已提交
1185

1186 1187
	rx_queue = priv->rx_queue;
	rxbdp = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1188

1189
	if (!rx_queue->rx_skbuff)
1190
		goto skip_rx_skbuff;
L
Linus Torvalds 已提交
1191

1192 1193
	for (i = 0; i < rx_queue->rx_ring_size; i++) {
		if (rx_queue->rx_skbuff[i]) {
1194 1195 1196
			dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
					 priv->rx_buffer_size,
					DMA_FROM_DEVICE);
1197 1198
			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
			rx_queue->rx_skbuff[i] = NULL;
L
Linus Torvalds 已提交
1199 1200
		}

1201 1202 1203
		rxbdp->lstatus = 0;
		rxbdp->bufPtr = 0;
		rxbdp++;
L
Linus Torvalds 已提交
1204
	}
1205

1206
	kfree(rx_queue->rx_skbuff);
1207 1208
skip_rx_skbuff:

1209 1210 1211
	dma_free_coherent(dev, sizeof(*txbdp) * tx_queue->tx_ring_size +
			       sizeof(*rxbdp) * rx_queue->rx_ring_size,
			  tx_queue->tx_bd_base, tx_queue->tx_bd_dma_base);
L
Linus Torvalds 已提交
1212 1213
}

1214 1215 1216
void gfar_start(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1217
	struct gfar __iomem *regs = priv->gfargrp.regs;
1218 1219 1220 1221 1222 1223 1224 1225
	u32 tempval;

	/* Enable Rx and Tx in MACCFG1 */
	tempval = gfar_read(&regs->maccfg1);
	tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
	gfar_write(&regs->maccfg1, tempval);

	/* Initialize DMACTRL to have WWR and WOP */
1226
	tempval = gfar_read(&regs->dmactrl);
1227
	tempval |= DMACTRL_INIT_SETTINGS;
1228
	gfar_write(&regs->dmactrl, tempval);
1229 1230

	/* Make sure we aren't stopped */
1231
	tempval = gfar_read(&regs->dmactrl);
1232
	tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1233
	gfar_write(&regs->dmactrl, tempval);
1234

A
Andy Fleming 已提交
1235 1236 1237 1238
	/* Clear THLT/RHLT, so that the DMA starts polling now */
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
	gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);

1239 1240
	/* Unmask the interrupts we look for */
	gfar_write(&regs->imask, IMASK_DEFAULT);
1241 1242

	dev->trans_start = jiffies;
1243 1244
}

L
Linus Torvalds 已提交
1245
/* Bring the controller up and running */
1246
int startup_gfar(struct net_device *ndev)
L
Linus Torvalds 已提交
1247
{
1248
	struct gfar_private *priv = netdev_priv(ndev);
1249
	struct gfar __iomem *regs = priv->gfargrp.regs;
1250
	int err;
L
Linus Torvalds 已提交
1251 1252 1253

	gfar_write(&regs->imask, IMASK_INIT_CLEAR);

1254 1255 1256
	err = gfar_alloc_skb_resources(ndev);
	if (err)
		return err;
1257

1258
	gfar_init_mac(ndev);
L
Linus Torvalds 已提交
1259 1260 1261

	/* If the device has multiple interrupts, register for
	 * them.  Otherwise, only register for the one */
1262
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1263
		/* Install our interrupt handlers for Error,
L
Linus Torvalds 已提交
1264
		 * Transmit, and Receive */
1265 1266
		err = request_irq(priv->gfargrp.interruptError, gfar_error, 0,
				  priv->gfargrp.int_name_er, &priv->gfargrp);
1267
		if (err) {
1268
			if (netif_msg_intr(priv))
1269
				pr_err("%s: Can't get IRQ %d\n", ndev->name,
1270
				       priv->gfargrp.interruptError);
L
Linus Torvalds 已提交
1271 1272 1273
			goto err_irq_fail;
		}

1274 1275 1276 1277
		err = request_irq(priv->gfargrp.interruptTransmit,
					gfar_transmit, 0,
					priv->gfargrp.int_name_tx,
					&priv->gfargrp);
1278
		if (err) {
1279
			if (netif_msg_intr(priv))
1280
				pr_err("%s: Can't get IRQ %d\n", ndev->name,
1281
				       priv->gfargrp.interruptTransmit);
L
Linus Torvalds 已提交
1282 1283 1284
			goto tx_irq_fail;
		}

1285 1286 1287 1288
		err = request_irq(priv->gfargrp.interruptReceive,
					gfar_receive, 0,
					priv->gfargrp.int_name_rx,
					&priv->gfargrp);
1289
		if (err) {
1290
			if (netif_msg_intr(priv))
1291
				pr_err("%s: Can't get IRQ %d (receive0)\n",
1292 1293
					ndev->name,
					priv->gfargrp.interruptReceive);
L
Linus Torvalds 已提交
1294 1295 1296
			goto rx_irq_fail;
		}
	} else {
1297 1298 1299 1300
		err = request_irq(priv->gfargrp.interruptTransmit,
					gfar_interrupt, 0,
					priv->gfargrp.int_name_tx,
					&priv->gfargrp);
1301
		if (err) {
1302
			if (netif_msg_intr(priv))
1303
				pr_err("%s: Can't get IRQ %d\n", ndev->name,
1304
				       priv->gfargrp.interruptTransmit);
L
Linus Torvalds 已提交
1305 1306 1307 1308
			goto err_irq_fail;
		}
	}

1309
	/* Start the controller */
1310
	gfar_start(ndev);
L
Linus Torvalds 已提交
1311

1312 1313
	phy_start(priv->phydev);

L
Linus Torvalds 已提交
1314 1315 1316
	return 0;

rx_irq_fail:
1317
	free_irq(priv->gfargrp.interruptTransmit, &priv->gfargrp);
L
Linus Torvalds 已提交
1318
tx_irq_fail:
1319
	free_irq(priv->gfargrp.interruptError, &priv->gfargrp);
L
Linus Torvalds 已提交
1320
err_irq_fail:
1321
	free_skb_resources(priv);
L
Linus Torvalds 已提交
1322 1323 1324 1325 1326 1327 1328
	return err;
}

/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int gfar_enet_open(struct net_device *dev)
{
1329
	struct gfar_private *priv = netdev_priv(dev);
L
Linus Torvalds 已提交
1330 1331
	int err;

1332
	napi_enable(&priv->rx_queue->napi);
1333

1334 1335
	skb_queue_head_init(&priv->rx_recycle);

L
Linus Torvalds 已提交
1336 1337 1338 1339 1340 1341 1342
	/* Initialize a bunch of registers */
	init_registers(dev);

	gfar_set_mac_address(dev);

	err = init_phy(dev);

1343 1344
	if (err) {
		napi_disable(&priv->rx_queue->napi);
L
Linus Torvalds 已提交
1345
		return err;
1346
	}
L
Linus Torvalds 已提交
1347 1348

	err = startup_gfar(dev);
1349
	if (err) {
1350
		napi_disable(&priv->rx_queue->napi);
1351 1352
		return err;
	}
L
Linus Torvalds 已提交
1353 1354 1355

	netif_start_queue(dev);

1356 1357
	device_set_wakeup_enable(&dev->dev, priv->wol_en);

L
Linus Torvalds 已提交
1358 1359 1360
	return err;
}

1361
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1362
{
1363
	struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
1364 1365

	memset(fcb, 0, GMAC_FCB_LEN);
1366 1367 1368 1369 1370 1371

	return fcb;
}

static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
{
1372
	u8 flags = 0;
1373 1374 1375 1376 1377

	/* If we're here, it's a IP packet with a TCP or UDP
	 * payload.  We set it to checksum, using a pseudo-header
	 * we provide
	 */
1378
	flags = TXFCB_DEFAULT;
1379

1380 1381
	/* Tell the controller what the protocol is */
	/* And provide the already calculated phcs */
1382
	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1383
		flags |= TXFCB_UDP;
1384
		fcb->phcs = udp_hdr(skb)->check;
1385
	} else
1386
		fcb->phcs = tcp_hdr(skb)->check;
1387 1388 1389 1390 1391

	/* l3os is the distance between the start of the
	 * frame (skb->data) and the start of the IP hdr.
	 * l4os is the distance between the start of the
	 * l3 hdr and the l4 hdr */
1392
	fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1393
	fcb->l4os = skb_network_header_len(skb);
1394

1395
	fcb->flags = flags;
1396 1397
}

1398
void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1399
{
1400
	fcb->flags |= TXFCB_VLN;
1401 1402 1403
	fcb->vlctl = vlan_tx_tag_get(skb);
}

D
Dai Haruki 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
			       struct txbd8 *base, int ring_size)
{
	struct txbd8 *new_bd = bdp + stride;

	return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
}

static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
		int ring_size)
{
	return skip_txbd(bdp, 1, base, ring_size);
}

L
Linus Torvalds 已提交
1418 1419 1420 1421 1422
/* This is called by the kernel when a frame is ready for transmission. */
/* It is pointed to by the dev->hard_start_xmit function pointer */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1423
	struct gfar_priv_tx_q *tx_queue = NULL;
1424
	struct gfar __iomem *regs = NULL;
1425
	struct txfcb *fcb = NULL;
D
Dai Haruki 已提交
1426
	struct txbd8 *txbdp, *txbdp_start, *base;
1427
	u32 lstatus;
D
Dai Haruki 已提交
1428 1429
	int i;
	u32 bufaddr;
A
Andy Fleming 已提交
1430
	unsigned long flags;
D
Dai Haruki 已提交
1431 1432
	unsigned int nr_frags, length;

1433 1434
	tx_queue = priv->tx_queue;
	base = tx_queue->tx_bd_base;
1435
	regs = priv->gfargrp.regs;
D
Dai Haruki 已提交
1436

1437 1438 1439 1440
	/* make space for additional header when fcb is needed */
	if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
			(priv->vlgrp && vlan_tx_tag_present(skb))) &&
			(skb_headroom(skb) < GMAC_FCB_LEN)) {
1441 1442 1443 1444 1445
		struct sk_buff *skb_new;

		skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
		if (!skb_new) {
			dev->stats.tx_errors++;
D
David S. Miller 已提交
1446
			kfree_skb(skb);
1447 1448 1449 1450 1451 1452
			return NETDEV_TX_OK;
		}
		kfree_skb(skb);
		skb = skb_new;
	}

D
Dai Haruki 已提交
1453 1454 1455
	/* total number of fragments in the SKB */
	nr_frags = skb_shinfo(skb)->nr_frags;

1456
	spin_lock_irqsave(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
1457 1458

	/* check if there is space to queue this packet */
1459
	if ((nr_frags+1) > tx_queue->num_txbdfree) {
D
Dai Haruki 已提交
1460 1461 1462
		/* no space, stop the queue */
		netif_stop_queue(dev);
		dev->stats.tx_fifo_errors++;
1463
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
1464 1465
		return NETDEV_TX_BUSY;
	}
L
Linus Torvalds 已提交
1466 1467

	/* Update transmit stats */
1468
	dev->stats.tx_bytes += skb->len;
L
Linus Torvalds 已提交
1469

1470
	txbdp = txbdp_start = tx_queue->cur_tx;
L
Linus Torvalds 已提交
1471

D
Dai Haruki 已提交
1472 1473 1474 1475 1476 1477
	if (nr_frags == 0) {
		lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
	} else {
		/* Place the fragment addresses and lengths into the TxBDs */
		for (i = 0; i < nr_frags; i++) {
			/* Point at the next BD, wrapping as needed */
1478
			txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
1479 1480 1481 1482 1483 1484 1485 1486 1487

			length = skb_shinfo(skb)->frags[i].size;

			lstatus = txbdp->lstatus | length |
				BD_LFLAG(TXBD_READY);

			/* Handle the last BD specially */
			if (i == nr_frags - 1)
				lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
L
Linus Torvalds 已提交
1488

1489
			bufaddr = dma_map_page(&priv->ofdev->dev,
D
Dai Haruki 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
					skb_shinfo(skb)->frags[i].page,
					skb_shinfo(skb)->frags[i].page_offset,
					length,
					DMA_TO_DEVICE);

			/* set the TxBD length and buffer pointer */
			txbdp->bufPtr = bufaddr;
			txbdp->lstatus = lstatus;
		}

		lstatus = txbdp_start->lstatus;
	}
L
Linus Torvalds 已提交
1502

1503
	/* Set up checksumming */
1504
	if (CHECKSUM_PARTIAL == skb->ip_summed) {
1505 1506 1507
		fcb = gfar_add_fcb(skb);
		lstatus |= BD_LFLAG(TXBD_TOE);
		gfar_tx_checksum(skb, fcb);
1508 1509
	}

1510
	if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1511 1512
		if (unlikely(NULL == fcb)) {
			fcb = gfar_add_fcb(skb);
1513
			lstatus |= BD_LFLAG(TXBD_TOE);
1514
		}
1515 1516

		gfar_tx_vlan(skb, fcb);
1517 1518
	}

D
Dai Haruki 已提交
1519
	/* setup the TxBD length and buffer pointer for the first BD */
1520
	tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1521
	txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
D
Dai Haruki 已提交
1522
			skb_headlen(skb), DMA_TO_DEVICE);
L
Linus Torvalds 已提交
1523

D
Dai Haruki 已提交
1524
	lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
L
Linus Torvalds 已提交
1525

D
Dai Haruki 已提交
1526 1527
	/*
	 * The powerpc-specific eieio() is used, as wmb() has too strong
1528 1529 1530 1531 1532 1533 1534
	 * semantics (it requires synchronization between cacheable and
	 * uncacheable mappings, which eieio doesn't provide and which we
	 * don't need), thus requiring a more expensive sync instruction.  At
	 * some point, the set of architecture-independent barrier functions
	 * should be expanded to include weaker barriers.
	 */
	eieio();
1535

D
Dai Haruki 已提交
1536 1537 1538 1539
	txbdp_start->lstatus = lstatus;

	/* Update the current skb pointer to the next entry we will use
	 * (wrapping if necessary) */
1540 1541
	tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
		TX_RING_MOD_MASK(tx_queue->tx_ring_size);
D
Dai Haruki 已提交
1542

1543
	tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
D
Dai Haruki 已提交
1544 1545

	/* reduce TxBD free count */
1546
	tx_queue->num_txbdfree -= (nr_frags + 1);
D
Dai Haruki 已提交
1547 1548

	dev->trans_start = jiffies;
L
Linus Torvalds 已提交
1549 1550 1551

	/* If the next BD still needs to be cleaned up, then the bds
	   are full.  We need to tell the kernel to stop sending us stuff. */
1552
	if (!tx_queue->num_txbdfree) {
L
Linus Torvalds 已提交
1553 1554
		netif_stop_queue(dev);

1555
		dev->stats.tx_fifo_errors++;
L
Linus Torvalds 已提交
1556 1557 1558
	}

	/* Tell the DMA to go go go */
1559
	gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
L
Linus Torvalds 已提交
1560 1561

	/* Unlock priv */
1562
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
L
Linus Torvalds 已提交
1563

1564
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
1565 1566 1567 1568 1569 1570
}

/* Stops the kernel queue, and halts the controller */
static int gfar_close(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
1571

1572
	napi_disable(&priv->rx_queue->napi);
1573

1574
	skb_queue_purge(&priv->rx_recycle);
1575
	cancel_work_sync(&priv->reset_task);
L
Linus Torvalds 已提交
1576 1577
	stop_gfar(dev);

1578 1579 1580
	/* Disconnect from the PHY */
	phy_disconnect(priv->phydev);
	priv->phydev = NULL;
L
Linus Torvalds 已提交
1581 1582 1583 1584 1585 1586 1587

	netif_stop_queue(dev);

	return 0;
}

/* Changes the mac address if the controller is not running. */
1588
static int gfar_set_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
1589
{
1590
	gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
L
Linus Torvalds 已提交
1591 1592 1593 1594 1595

	return 0;
}


1596 1597 1598 1599 1600
/* Enables and disables VLAN insertion/extraction */
static void gfar_vlan_rx_register(struct net_device *dev,
		struct vlan_group *grp)
{
	struct gfar_private *priv = netdev_priv(dev);
1601
	struct gfar_priv_rx_q *rx_queue = NULL;
1602
	struct gfar __iomem *regs = NULL;
1603 1604 1605
	unsigned long flags;
	u32 tempval;

1606
	rx_queue = priv->rx_queue;
1607
	regs = priv->gfargrp.regs;
1608
	spin_lock_irqsave(&rx_queue->rxlock, flags);
1609

A
Anton Vorontsov 已提交
1610
	priv->vlgrp = grp;
1611 1612 1613

	if (grp) {
		/* Enable VLAN tag insertion */
1614
		tempval = gfar_read(&regs->tctrl);
1615 1616
		tempval |= TCTRL_VLINS;

1617
		gfar_write(&regs->tctrl, tempval);
1618

1619
		/* Enable VLAN tag extraction */
1620
		tempval = gfar_read(&regs->rctrl);
1621
		tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1622
		gfar_write(&regs->rctrl, tempval);
1623 1624
	} else {
		/* Disable VLAN tag insertion */
1625
		tempval = gfar_read(&regs->tctrl);
1626
		tempval &= ~TCTRL_VLINS;
1627
		gfar_write(&regs->tctrl, tempval);
1628 1629

		/* Disable VLAN tag extraction */
1630
		tempval = gfar_read(&regs->rctrl);
1631
		tempval &= ~RCTRL_VLEX;
1632 1633 1634 1635 1636
		/* If parse is no longer required, then disable parser */
		if (tempval & RCTRL_REQ_PARSER)
			tempval |= RCTRL_PRSDEP_INIT;
		else
			tempval &= ~RCTRL_PRSDEP_INIT;
1637
		gfar_write(&regs->rctrl, tempval);
1638 1639
	}

1640 1641
	gfar_change_mtu(dev, dev->mtu);

1642
	spin_unlock_irqrestore(&rx_queue->rxlock, flags);
1643 1644
}

L
Linus Torvalds 已提交
1645 1646 1647 1648
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
	int tempsize, tempval;
	struct gfar_private *priv = netdev_priv(dev);
1649
	struct gfar __iomem *regs = priv->gfargrp.regs;
L
Linus Torvalds 已提交
1650
	int oldsize = priv->rx_buffer_size;
1651 1652
	int frame_size = new_mtu + ETH_HLEN;

1653
	if (priv->vlgrp)
1654
		frame_size += VLAN_HLEN;
1655

L
Linus Torvalds 已提交
1656
	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1657 1658 1659
		if (netif_msg_drv(priv))
			printk(KERN_ERR "%s: Invalid MTU setting\n",
					dev->name);
L
Linus Torvalds 已提交
1660 1661 1662
		return -EINVAL;
	}

1663 1664 1665 1666 1667
	if (gfar_uses_fcb(priv))
		frame_size += GMAC_FCB_LEN;

	frame_size += priv->padding;

L
Linus Torvalds 已提交
1668 1669 1670 1671 1672
	tempsize =
	    (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
	    INCREMENTAL_BUFFER_SIZE;

	/* Only stop and start the controller if it isn't already
1673
	 * stopped, and we changed something */
L
Linus Torvalds 已提交
1674 1675 1676 1677 1678 1679 1680
	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
		stop_gfar(dev);

	priv->rx_buffer_size = tempsize;

	dev->mtu = new_mtu;

1681 1682
	gfar_write(&regs->mrblr, priv->rx_buffer_size);
	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
L
Linus Torvalds 已提交
1683 1684 1685 1686

	/* If the mtu is larger than the max size for standard
	 * ethernet frames (ie, a jumbo frame), then set maccfg2
	 * to allow huge frames, and to check the length */
1687
	tempval = gfar_read(&regs->maccfg2);
L
Linus Torvalds 已提交
1688 1689 1690 1691 1692 1693

	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
		tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
	else
		tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);

1694
	gfar_write(&regs->maccfg2, tempval);
L
Linus Torvalds 已提交
1695 1696 1697 1698 1699 1700 1701

	if ((oldsize != tempsize) && (dev->flags & IFF_UP))
		startup_gfar(dev);

	return 0;
}

1702
/* gfar_reset_task gets scheduled when a packet has not been
L
Linus Torvalds 已提交
1703 1704
 * transmitted after a set amount of time.
 * For now, assume that clearing out all the structures, and
1705 1706 1707
 * starting over will fix the problem.
 */
static void gfar_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
1708
{
1709 1710
	struct gfar_private *priv = container_of(work, struct gfar_private,
			reset_task);
1711
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
1712 1713

	if (dev->flags & IFF_UP) {
1714
		netif_stop_queue(dev);
L
Linus Torvalds 已提交
1715 1716
		stop_gfar(dev);
		startup_gfar(dev);
1717
		netif_start_queue(dev);
L
Linus Torvalds 已提交
1718 1719
	}

1720
	netif_tx_schedule_all(dev);
L
Linus Torvalds 已提交
1721 1722
}

1723 1724 1725 1726 1727 1728 1729 1730
static void gfar_timeout(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	dev->stats.tx_errors++;
	schedule_work(&priv->reset_task);
}

L
Linus Torvalds 已提交
1731
/* Interrupt Handler for Transmit complete */
1732
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
L
Linus Torvalds 已提交
1733
{
1734
	struct net_device *dev = tx_queue->dev;
D
Dai Haruki 已提交
1735
	struct gfar_private *priv = netdev_priv(dev);
1736
	struct gfar_priv_rx_q *rx_queue = NULL;
D
Dai Haruki 已提交
1737 1738
	struct txbd8 *bdp;
	struct txbd8 *lbdp = NULL;
1739
	struct txbd8 *base = tx_queue->tx_bd_base;
D
Dai Haruki 已提交
1740 1741
	struct sk_buff *skb;
	int skb_dirtytx;
1742
	int tx_ring_size = tx_queue->tx_ring_size;
D
Dai Haruki 已提交
1743 1744
	int frags = 0;
	int i;
D
Dai Haruki 已提交
1745
	int howmany = 0;
D
Dai Haruki 已提交
1746
	u32 lstatus;
L
Linus Torvalds 已提交
1747

1748 1749 1750
	rx_queue = priv->rx_queue;
	bdp = tx_queue->dirty_tx;
	skb_dirtytx = tx_queue->skb_dirtytx;
L
Linus Torvalds 已提交
1751

1752
	while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
D
Dai Haruki 已提交
1753 1754
		frags = skb_shinfo(skb)->nr_frags;
		lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
L
Linus Torvalds 已提交
1755

D
Dai Haruki 已提交
1756
		lstatus = lbdp->lstatus;
L
Linus Torvalds 已提交
1757

D
Dai Haruki 已提交
1758 1759 1760 1761 1762
		/* Only clean completed frames */
		if ((lstatus & BD_LFLAG(TXBD_READY)) &&
				(lstatus & BD_LENGTH_MASK))
			break;

1763
		dma_unmap_single(&priv->ofdev->dev,
D
Dai Haruki 已提交
1764 1765 1766
				bdp->bufPtr,
				bdp->length,
				DMA_TO_DEVICE);
A
Andy Fleming 已提交
1767

D
Dai Haruki 已提交
1768 1769
		bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
		bdp = next_txbd(bdp, base, tx_ring_size);
D
Dai Haruki 已提交
1770

D
Dai Haruki 已提交
1771
		for (i = 0; i < frags; i++) {
1772
			dma_unmap_page(&priv->ofdev->dev,
D
Dai Haruki 已提交
1773 1774 1775 1776 1777 1778
					bdp->bufPtr,
					bdp->length,
					DMA_TO_DEVICE);
			bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
			bdp = next_txbd(bdp, base, tx_ring_size);
		}
L
Linus Torvalds 已提交
1779

1780 1781 1782 1783
		/*
		 * If there's room in the queue (limit it to rx_buffer_size)
		 * we add this skb back into the pool, if it's the right size
		 */
1784
		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1785 1786 1787 1788 1789 1790
				skb_recycle_check(skb, priv->rx_buffer_size +
					RXBUF_ALIGNMENT))
			__skb_queue_head(&priv->rx_recycle, skb);
		else
			dev_kfree_skb_any(skb);

1791
		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
D
Dai Haruki 已提交
1792

D
Dai Haruki 已提交
1793 1794 1795 1796
		skb_dirtytx = (skb_dirtytx + 1) &
			TX_RING_MOD_MASK(tx_ring_size);

		howmany++;
1797
		tx_queue->num_txbdfree += frags + 1;
D
Dai Haruki 已提交
1798
	}
L
Linus Torvalds 已提交
1799

D
Dai Haruki 已提交
1800
	/* If we freed a buffer, we can restart transmission, if necessary */
1801
	if (netif_queue_stopped(dev) && tx_queue->num_txbdfree)
D
Dai Haruki 已提交
1802
		netif_wake_queue(dev);
L
Linus Torvalds 已提交
1803

D
Dai Haruki 已提交
1804
	/* Update dirty indicators */
1805 1806
	tx_queue->skb_dirtytx = skb_dirtytx;
	tx_queue->dirty_tx = bdp;
L
Linus Torvalds 已提交
1807

D
Dai Haruki 已提交
1808 1809 1810 1811 1812
	dev->stats.tx_packets += howmany;

	return howmany;
}

1813
static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
D
Dai Haruki 已提交
1814
{
1815
	struct gfar_private *priv = gfargrp->priv;
1816 1817
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
1818 1819
	unsigned long flags;

1820 1821 1822 1823
	rx_queue = priv->rx_queue;
	tx_queue = priv->tx_queue;
	spin_lock_irqsave(&tx_queue->txlock, flags);
	spin_lock(&rx_queue->rxlock);
1824

1825
	if (napi_schedule_prep(&rx_queue->napi)) {
1826
		gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1827
		__napi_schedule(&rx_queue->napi);
1828 1829 1830 1831 1832
	} else {
		/*
		 * Clear IEVENT, so interrupts aren't called again
		 * because of the packets that have already arrived.
		 */
1833
		gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1834
	}
1835

1836 1837
	spin_unlock(&rx_queue->rxlock);
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
1838
}
L
Linus Torvalds 已提交
1839

1840
/* Interrupt Handler for Transmit complete */
1841
static irqreturn_t gfar_transmit(int irq, void *grp_id)
1842
{
1843
	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
L
Linus Torvalds 已提交
1844 1845 1846
	return IRQ_HANDLED;
}

1847
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1848 1849
		struct sk_buff *skb)
{
1850
	struct net_device *dev = rx_queue->dev;
1851
	struct gfar_private *priv = netdev_priv(dev);
1852
	dma_addr_t buf;
1853

1854 1855
	buf = dma_map_single(&priv->ofdev->dev, skb->data,
			     priv->rx_buffer_size, DMA_FROM_DEVICE);
1856
	gfar_init_rxbdp(rx_queue, bdp, buf);
1857 1858 1859 1860
}


struct sk_buff * gfar_new_skb(struct net_device *dev)
L
Linus Torvalds 已提交
1861
{
1862
	unsigned int alignamount;
L
Linus Torvalds 已提交
1863 1864 1865
	struct gfar_private *priv = netdev_priv(dev);
	struct sk_buff *skb = NULL;

1866 1867 1868 1869
	skb = __skb_dequeue(&priv->rx_recycle);
	if (!skb)
		skb = netdev_alloc_skb(dev,
				priv->rx_buffer_size + RXBUF_ALIGNMENT);
L
Linus Torvalds 已提交
1870

1871
	if (!skb)
L
Linus Torvalds 已提交
1872 1873
		return NULL;

1874
	alignamount = RXBUF_ALIGNMENT -
1875
		(((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1876

L
Linus Torvalds 已提交
1877 1878 1879
	/* We need the data buffer to be aligned properly.  We will reserve
	 * as many bytes as needed to align the data properly
	 */
1880
	skb_reserve(skb, alignamount);
L
Linus Torvalds 已提交
1881 1882 1883 1884

	return skb;
}

1885
static inline void count_errors(unsigned short status, struct net_device *dev)
L
Linus Torvalds 已提交
1886
{
1887
	struct gfar_private *priv = netdev_priv(dev);
1888
	struct net_device_stats *stats = &dev->stats;
L
Linus Torvalds 已提交
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
	struct gfar_extra_stats *estats = &priv->extra_stats;

	/* If the packet was truncated, none of the other errors
	 * matter */
	if (status & RXBD_TRUNCATED) {
		stats->rx_length_errors++;

		estats->rx_trunc++;

		return;
	}
	/* Count the errors, if there were any */
	if (status & (RXBD_LARGE | RXBD_SHORT)) {
		stats->rx_length_errors++;

		if (status & RXBD_LARGE)
			estats->rx_large++;
		else
			estats->rx_short++;
	}
	if (status & RXBD_NONOCTET) {
		stats->rx_frame_errors++;
		estats->rx_nonoctet++;
	}
	if (status & RXBD_CRCERR) {
		estats->rx_crcerr++;
		stats->rx_crc_errors++;
	}
	if (status & RXBD_OVERRUN) {
		estats->rx_overrun++;
		stats->rx_crc_errors++;
	}
}

1923
irqreturn_t gfar_receive(int irq, void *grp_id)
L
Linus Torvalds 已提交
1924
{
1925
	gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
L
Linus Torvalds 已提交
1926 1927 1928
	return IRQ_HANDLED;
}

1929 1930 1931 1932 1933
static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
	/* If valid headers were found, and valid sums
	 * were verified, then we tell the kernel that no
	 * checksumming is necessary.  Otherwise, it is */
1934
	if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1935 1936 1937 1938 1939 1940
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	else
		skb->ip_summed = CHECKSUM_NONE;
}


L
Linus Torvalds 已提交
1941 1942 1943
/* gfar_process_frame() -- handle one incoming packet if skb
 * isn't NULL.  */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1944
			      int amount_pull)
L
Linus Torvalds 已提交
1945 1946
{
	struct gfar_private *priv = netdev_priv(dev);
1947
	struct rxfcb *fcb = NULL;
L
Linus Torvalds 已提交
1948

1949
	int ret;
L
Linus Torvalds 已提交
1950

1951 1952
	/* fcb is at the beginning if exists */
	fcb = (struct rxfcb *)skb->data;
1953

1954 1955 1956 1957
	/* Remove the FCB from the skb */
	/* Remove the padded bytes, if there are any */
	if (amount_pull)
		skb_pull(skb, amount_pull);
1958

1959 1960
	if (priv->rx_csum_enable)
		gfar_rx_checksum(skb, fcb);
1961

1962 1963
	/* Tell the skb what kind of packet this is */
	skb->protocol = eth_type_trans(skb, dev);
L
Linus Torvalds 已提交
1964

1965 1966 1967 1968 1969
	/* Send the packet up the stack */
	if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
		ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
	else
		ret = netif_receive_skb(skb);
1970

1971 1972
	if (NET_RX_DROP == ret)
		priv->extra_stats.kernel_dropped++;
L
Linus Torvalds 已提交
1973 1974 1975 1976 1977

	return 0;
}

/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1978
 *   until the budget/quota has been reached. Returns the number
L
Linus Torvalds 已提交
1979 1980
 *   of frames handled
 */
1981
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
L
Linus Torvalds 已提交
1982
{
1983
	struct net_device *dev = rx_queue->dev;
1984
	struct rxbd8 *bdp, *base;
L
Linus Torvalds 已提交
1985
	struct sk_buff *skb;
1986 1987
	int pkt_len;
	int amount_pull;
L
Linus Torvalds 已提交
1988 1989 1990 1991
	int howmany = 0;
	struct gfar_private *priv = netdev_priv(dev);

	/* Get the first full descriptor */
1992 1993
	bdp = rx_queue->cur_rx;
	base = rx_queue->rx_bd_base;
L
Linus Torvalds 已提交
1994

1995 1996 1997
	amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
		priv->padding;

L
Linus Torvalds 已提交
1998
	while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1999
		struct sk_buff *newskb;
2000
		rmb();
2001 2002 2003 2004

		/* Add another skb for the future */
		newskb = gfar_new_skb(dev);

2005
		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
L
Linus Torvalds 已提交
2006

2007
		dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
A
Andy Fleming 已提交
2008 2009
				priv->rx_buffer_size, DMA_FROM_DEVICE);

2010 2011 2012 2013 2014 2015 2016
		/* We drop the frame if we failed to allocate a new buffer */
		if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
				 bdp->status & RXBD_ERR)) {
			count_errors(bdp->status, dev);

			if (unlikely(!newskb))
				newskb = skb;
2017 2018 2019 2020 2021 2022 2023 2024 2025
			else if (skb) {
				/*
				 * We need to reset ->data to what it
				 * was before gfar_new_skb() re-aligned
				 * it to an RXBUF_ALIGNMENT boundary
				 * before we put the skb back on the
				 * recycle list.
				 */
				skb->data = skb->head + NET_SKB_PAD;
2026
				__skb_queue_head(&priv->rx_recycle, skb);
2027
			}
2028
		} else {
L
Linus Torvalds 已提交
2029
			/* Increment the number of packets */
2030
			dev->stats.rx_packets++;
L
Linus Torvalds 已提交
2031 2032
			howmany++;

2033 2034 2035 2036 2037
			if (likely(skb)) {
				pkt_len = bdp->length - ETH_FCS_LEN;
				/* Remove the FCS from the packet length */
				skb_put(skb, pkt_len);
				dev->stats.rx_bytes += pkt_len;
L
Linus Torvalds 已提交
2038

2039 2040
				if (in_irq() || irqs_disabled())
					printk("Interrupt problem!\n");
2041 2042 2043 2044 2045 2046 2047 2048 2049
				gfar_process_frame(dev, skb, amount_pull);

			} else {
				if (netif_msg_rx_err(priv))
					printk(KERN_WARNING
					       "%s: Missing skb!\n", dev->name);
				dev->stats.rx_dropped++;
				priv->extra_stats.rx_skbmissing++;
			}
L
Linus Torvalds 已提交
2050 2051 2052

		}

2053
		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
L
Linus Torvalds 已提交
2054

2055
		/* Setup the new bdp */
2056
		gfar_new_rxbdp(rx_queue, bdp, newskb);
L
Linus Torvalds 已提交
2057 2058

		/* Update to the next pointer */
2059
		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2060 2061

		/* update to point at the next skb */
2062 2063 2064
		rx_queue->skb_currx =
		    (rx_queue->skb_currx + 1) &
		    RX_RING_MOD_MASK(rx_queue->rx_ring_size);
L
Linus Torvalds 已提交
2065 2066 2067
	}

	/* Update the current rxbd pointer to be the next one */
2068
	rx_queue->cur_rx = bdp;
L
Linus Torvalds 已提交
2069 2070 2071 2072

	return howmany;
}

2073
static int gfar_poll(struct napi_struct *napi, int budget)
L
Linus Torvalds 已提交
2074
{
2075 2076 2077 2078
	struct gfar_priv_rx_q *rx_queue = container_of(napi,
			struct gfar_priv_rx_q, napi);
	struct net_device *dev = rx_queue->dev;
	struct gfar_private *priv = netdev_priv(dev);
2079
	struct gfar __iomem *regs = priv->gfargrp.regs;
2080
	struct gfar_priv_tx_q *tx_queue = NULL;
2081 2082
	int tx_cleaned = 0;
	int rx_cleaned = 0;
D
Dai Haruki 已提交
2083 2084
	unsigned long flags;

2085 2086
	/* Clear IEVENT, so interrupts aren't called again
	 * because of the packets that have already arrived */
2087
	gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2088
	tx_queue = priv->tx_queue;
2089

D
Dai Haruki 已提交
2090
	/* If we fail to get the lock, don't bother with the TX BDs */
2091 2092 2093
	if (spin_trylock_irqsave(&tx_queue->txlock, flags)) {
		tx_cleaned = gfar_clean_tx_ring(tx_queue);
		spin_unlock_irqrestore(&tx_queue->txlock, flags);
D
Dai Haruki 已提交
2094
	}
L
Linus Torvalds 已提交
2095

2096
	rx_cleaned = gfar_clean_rx_ring(rx_queue, budget);
L
Linus Torvalds 已提交
2097

2098 2099 2100 2101
	if (tx_cleaned)
		return budget;

	if (rx_cleaned < budget) {
2102
		napi_complete(napi);
L
Linus Torvalds 已提交
2103 2104

		/* Clear the halt bit in RSTAT */
2105
		gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
L
Linus Torvalds 已提交
2106

2107
		gfar_write(&regs->imask, IMASK_DEFAULT);
L
Linus Torvalds 已提交
2108 2109 2110

		/* If we are coalescing interrupts, update the timer */
		/* Otherwise, clear it */
2111
		if (likely(rx_queue->rxcoalescing)) {
2112 2113
			gfar_write(&regs->rxic, 0);
			gfar_write(&regs->rxic, rx_queue->rxic);
2114
		}
2115
		if (likely(tx_queue->txcoalescing)) {
2116 2117
			gfar_write(&regs->txic, 0);
			gfar_write(&regs->txic, tx_queue->txic);
2118
		}
L
Linus Torvalds 已提交
2119 2120
	}

2121
	return rx_cleaned;
L
Linus Torvalds 已提交
2122 2123
}

2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
 * Polling 'interrupt' - used by things like netconsole to send skbs
 * without having to re-enable interrupts. It's not called while
 * the interrupt routine is executing.
 */
static void gfar_netpoll(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);

	/* If the device has multiple interrupts, run tx/rx */
2135
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2136 2137 2138 2139 2140 2141 2142
		disable_irq(priv->gfargrp.interruptTransmit);
		disable_irq(priv->gfargrp.interruptReceive);
		disable_irq(priv->gfargrp.interruptError);
		gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
		enable_irq(priv->gfargrp.interruptError);
		enable_irq(priv->gfargrp.interruptReceive);
		enable_irq(priv->gfargrp.interruptTransmit);
2143
	} else {
2144 2145 2146
		disable_irq(priv->gfargrp.interruptTransmit);
		gfar_interrupt(priv->gfargrp.interruptTransmit, &priv->gfargrp);
		enable_irq(priv->gfargrp.interruptTransmit);
2147 2148 2149 2150
	}
}
#endif

L
Linus Torvalds 已提交
2151
/* The interrupt handler for devices with one interrupt */
2152
static irqreturn_t gfar_interrupt(int irq, void *grp_id)
L
Linus Torvalds 已提交
2153
{
2154
	struct gfar_priv_grp *gfargrp = grp_id;
L
Linus Torvalds 已提交
2155 2156

	/* Save ievent for future reference */
2157
	u32 events = gfar_read(&gfargrp->regs->ievent);
L
Linus Torvalds 已提交
2158 2159

	/* Check for reception */
2160
	if (events & IEVENT_RX_MASK)
2161
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
2162 2163

	/* Check for transmit completion */
2164
	if (events & IEVENT_TX_MASK)
2165
		gfar_transmit(irq, grp_id);
L
Linus Torvalds 已提交
2166

2167 2168
	/* Check for errors */
	if (events & IEVENT_ERR_MASK)
2169
		gfar_error(irq, grp_id);
L
Linus Torvalds 已提交
2170 2171 2172 2173 2174 2175

	return IRQ_HANDLED;
}

/* Called every time the controller might need to be made
 * aware of new link state.  The PHY code conveys this
2176
 * information through variables in the phydev structure, and this
L
Linus Torvalds 已提交
2177 2178 2179 2180 2181 2182
 * function converts those variables into the appropriate
 * register values, and can bring down the device if needed.
 */
static void adjust_link(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
2183
	struct gfar_priv_tx_q *tx_queue = NULL;
2184
	struct gfar __iomem *regs = priv->gfargrp.regs;
2185 2186 2187 2188
	unsigned long flags;
	struct phy_device *phydev = priv->phydev;
	int new_state = 0;

2189 2190
	tx_queue = priv->tx_queue;
	spin_lock_irqsave(&tx_queue->txlock, flags);
2191 2192
	if (phydev->link) {
		u32 tempval = gfar_read(&regs->maccfg2);
2193
		u32 ecntrl = gfar_read(&regs->ecntrl);
L
Linus Torvalds 已提交
2194 2195 2196

		/* Now we make sure that we can be in full duplex mode.
		 * If not, we operate in half-duplex mode. */
2197 2198 2199
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
L
Linus Torvalds 已提交
2200
				tempval &= ~(MACCFG2_FULL_DUPLEX);
2201
			else
L
Linus Torvalds 已提交
2202 2203
				tempval |= MACCFG2_FULL_DUPLEX;

2204
			priv->oldduplex = phydev->duplex;
L
Linus Torvalds 已提交
2205 2206
		}

2207 2208 2209
		if (phydev->speed != priv->oldspeed) {
			new_state = 1;
			switch (phydev->speed) {
L
Linus Torvalds 已提交
2210 2211 2212
			case 1000:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
2213 2214

				ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
2215 2216 2217 2218 2219
				break;
			case 100:
			case 10:
				tempval =
				    ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
2220 2221 2222 2223 2224 2225 2226

				/* Reduced mode distinguishes
				 * between 10 and 100 */
				if (phydev->speed == SPEED_100)
					ecntrl |= ECNTRL_R100;
				else
					ecntrl &= ~(ECNTRL_R100);
L
Linus Torvalds 已提交
2227 2228
				break;
			default:
2229 2230
				if (netif_msg_link(priv))
					printk(KERN_WARNING
2231 2232
						"%s: Ack!  Speed (%d) is not 10/100/1000!\n",
						dev->name, phydev->speed);
L
Linus Torvalds 已提交
2233 2234 2235
				break;
			}

2236
			priv->oldspeed = phydev->speed;
L
Linus Torvalds 已提交
2237 2238
		}

2239
		gfar_write(&regs->maccfg2, tempval);
2240
		gfar_write(&regs->ecntrl, ecntrl);
2241

L
Linus Torvalds 已提交
2242
		if (!priv->oldlink) {
2243
			new_state = 1;
L
Linus Torvalds 已提交
2244 2245
			priv->oldlink = 1;
		}
2246 2247 2248 2249 2250
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
		priv->oldspeed = 0;
		priv->oldduplex = -1;
L
Linus Torvalds 已提交
2251 2252
	}

2253 2254 2255
	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);

2256
	spin_unlock_irqrestore(&tx_queue->txlock, flags);
2257
}
L
Linus Torvalds 已提交
2258 2259 2260 2261 2262 2263 2264 2265 2266

/* Update the hash table based on the current list of multicast
 * addresses we subscribe to.  Also, change the promiscuity of
 * the device based on the flags (this function is called
 * whenever dev->flags is changed */
static void gfar_set_multi(struct net_device *dev)
{
	struct dev_mc_list *mc_ptr;
	struct gfar_private *priv = netdev_priv(dev);
2267
	struct gfar __iomem *regs = priv->gfargrp.regs;
L
Linus Torvalds 已提交
2268 2269
	u32 tempval;

2270
	if (dev->flags & IFF_PROMISC) {
L
Linus Torvalds 已提交
2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
		/* Set RCTRL to PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval |= RCTRL_PROM;
		gfar_write(&regs->rctrl, tempval);
	} else {
		/* Set RCTRL to not PROM */
		tempval = gfar_read(&regs->rctrl);
		tempval &= ~(RCTRL_PROM);
		gfar_write(&regs->rctrl, tempval);
	}
2281

2282
	if (dev->flags & IFF_ALLMULTI) {
L
Linus Torvalds 已提交
2283
		/* Set the hash to rx all multicast frames */
2284 2285 2286 2287 2288 2289 2290 2291
		gfar_write(&regs->igaddr0, 0xffffffff);
		gfar_write(&regs->igaddr1, 0xffffffff);
		gfar_write(&regs->igaddr2, 0xffffffff);
		gfar_write(&regs->igaddr3, 0xffffffff);
		gfar_write(&regs->igaddr4, 0xffffffff);
		gfar_write(&regs->igaddr5, 0xffffffff);
		gfar_write(&regs->igaddr6, 0xffffffff);
		gfar_write(&regs->igaddr7, 0xffffffff);
L
Linus Torvalds 已提交
2292 2293 2294 2295 2296 2297 2298 2299 2300
		gfar_write(&regs->gaddr0, 0xffffffff);
		gfar_write(&regs->gaddr1, 0xffffffff);
		gfar_write(&regs->gaddr2, 0xffffffff);
		gfar_write(&regs->gaddr3, 0xffffffff);
		gfar_write(&regs->gaddr4, 0xffffffff);
		gfar_write(&regs->gaddr5, 0xffffffff);
		gfar_write(&regs->gaddr6, 0xffffffff);
		gfar_write(&regs->gaddr7, 0xffffffff);
	} else {
2301 2302 2303
		int em_num;
		int idx;

L
Linus Torvalds 已提交
2304
		/* zero out the hash */
2305 2306 2307 2308 2309 2310 2311 2312
		gfar_write(&regs->igaddr0, 0x0);
		gfar_write(&regs->igaddr1, 0x0);
		gfar_write(&regs->igaddr2, 0x0);
		gfar_write(&regs->igaddr3, 0x0);
		gfar_write(&regs->igaddr4, 0x0);
		gfar_write(&regs->igaddr5, 0x0);
		gfar_write(&regs->igaddr6, 0x0);
		gfar_write(&regs->igaddr7, 0x0);
L
Linus Torvalds 已提交
2313 2314 2315 2316 2317 2318 2319 2320 2321
		gfar_write(&regs->gaddr0, 0x0);
		gfar_write(&regs->gaddr1, 0x0);
		gfar_write(&regs->gaddr2, 0x0);
		gfar_write(&regs->gaddr3, 0x0);
		gfar_write(&regs->gaddr4, 0x0);
		gfar_write(&regs->gaddr5, 0x0);
		gfar_write(&regs->gaddr6, 0x0);
		gfar_write(&regs->gaddr7, 0x0);

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
		/* If we have extended hash tables, we need to
		 * clear the exact match registers to prepare for
		 * setting them */
		if (priv->extended_hash) {
			em_num = GFAR_EM_NUM + 1;
			gfar_clear_exact_match(dev);
			idx = 1;
		} else {
			idx = 0;
			em_num = 0;
		}

2334
		if (dev->mc_count == 0)
L
Linus Torvalds 已提交
2335 2336 2337 2338
			return;

		/* Parse the list, and set the appropriate bits */
		for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
2339 2340 2341 2342 2343 2344
			if (idx < em_num) {
				gfar_set_mac_for_addr(dev, idx,
						mc_ptr->dmi_addr);
				idx++;
			} else
				gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
L
Linus Torvalds 已提交
2345 2346 2347 2348 2349 2350
		}
	}

	return;
}

2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362

/* Clears each of the exact match registers to zero, so they
 * don't interfere with normal reception */
static void gfar_clear_exact_match(struct net_device *dev)
{
	int idx;
	u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};

	for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
		gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
}

L
Linus Torvalds 已提交
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
/* Set the appropriate hash bit for the given addr */
/* The algorithm works like so:
 * 1) Take the Destination Address (ie the multicast address), and
 * do a CRC on it (little endian), and reverse the bits of the
 * result.
 * 2) Use the 8 most significant bits as a hash into a 256-entry
 * table.  The table is controlled through 8 32-bit registers:
 * gaddr0-7.  gaddr0's MSB is entry 0, and gaddr7's LSB is
 * gaddr7.  This means that the 3 most significant bits in the
 * hash index which gaddr register to use, and the 5 other bits
 * indicate which bit (assuming an IBM numbering scheme, which
 * for PowerPC (tm) is usually the case) in the register holds
 * the entry. */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
	u32 tempval;
	struct gfar_private *priv = netdev_priv(dev);
	u32 result = ether_crc(MAC_ADDR_LEN, addr);
2381 2382 2383
	int width = priv->hash_width;
	u8 whichbit = (result >> (32 - width)) & 0x1f;
	u8 whichreg = result >> (32 - width + 5);
L
Linus Torvalds 已提交
2384 2385
	u32 value = (1 << (31-whichbit));

2386
	tempval = gfar_read(priv->hash_regs[whichreg]);
L
Linus Torvalds 已提交
2387
	tempval |= value;
2388
	gfar_write(priv->hash_regs[whichreg], tempval);
L
Linus Torvalds 已提交
2389 2390 2391 2392

	return;
}

2393 2394 2395 2396 2397 2398 2399

/* There are multiple MAC Address register pairs on some controllers
 * This function sets the numth pair to a given address
 */
static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
{
	struct gfar_private *priv = netdev_priv(dev);
2400
	struct gfar __iomem *regs = priv->gfargrp.regs;
2401 2402 2403
	int idx;
	char tmpbuf[MAC_ADDR_LEN];
	u32 tempval;
2404
	u32 __iomem *macptr = &regs->macstnaddr1;
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419

	macptr += num*2;

	/* Now copy it into the mac registers backwards, cuz */
	/* little endian is silly */
	for (idx = 0; idx < MAC_ADDR_LEN; idx++)
		tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];

	gfar_write(macptr, *((u32 *) (tmpbuf)));

	tempval = *((u32 *) (tmpbuf + 4));

	gfar_write(macptr+1, tempval);
}

L
Linus Torvalds 已提交
2420
/* GFAR error interrupt handler */
2421
static irqreturn_t gfar_error(int irq, void *grp_id)
L
Linus Torvalds 已提交
2422
{
2423 2424 2425 2426
	struct gfar_priv_grp *gfargrp = grp_id;
	struct gfar __iomem *regs = gfargrp->regs;
	struct gfar_private *priv= gfargrp->priv;
	struct net_device *dev = priv->ndev;
L
Linus Torvalds 已提交
2427 2428

	/* Save ievent for future reference */
2429
	u32 events = gfar_read(&regs->ievent);
L
Linus Torvalds 已提交
2430 2431

	/* Clear IEVENT */
2432
	gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2433 2434

	/* Magic Packet is not an error. */
2435
	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2436 2437
	    (events & IEVENT_MAG))
		events &= ~IEVENT_MAG;
L
Linus Torvalds 已提交
2438 2439

	/* Hmm... */
2440 2441
	if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
		printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2442
		       dev->name, events, gfar_read(&regs->imask));
L
Linus Torvalds 已提交
2443 2444 2445

	/* Update the error counters */
	if (events & IEVENT_TXE) {
2446
		dev->stats.tx_errors++;
L
Linus Torvalds 已提交
2447 2448

		if (events & IEVENT_LC)
2449
			dev->stats.tx_window_errors++;
L
Linus Torvalds 已提交
2450
		if (events & IEVENT_CRL)
2451
			dev->stats.tx_aborted_errors++;
L
Linus Torvalds 已提交
2452
		if (events & IEVENT_XFUN) {
2453
			if (netif_msg_tx_err(priv))
2454 2455
				printk(KERN_DEBUG "%s: TX FIFO underrun, "
				       "packet dropped.\n", dev->name);
2456
			dev->stats.tx_dropped++;
L
Linus Torvalds 已提交
2457 2458 2459
			priv->extra_stats.tx_underrun++;

			/* Reactivate the Tx Queues */
2460
			gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
L
Linus Torvalds 已提交
2461
		}
2462 2463
		if (netif_msg_tx_err(priv))
			printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
L
Linus Torvalds 已提交
2464 2465
	}
	if (events & IEVENT_BSY) {
2466
		dev->stats.rx_errors++;
L
Linus Torvalds 已提交
2467 2468
		priv->extra_stats.rx_bsy++;

2469
		gfar_receive(irq, grp_id);
L
Linus Torvalds 已提交
2470

2471
		if (netif_msg_rx_err(priv))
2472
			printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2473
			       dev->name, gfar_read(&regs->rstat));
L
Linus Torvalds 已提交
2474 2475
	}
	if (events & IEVENT_BABR) {
2476
		dev->stats.rx_errors++;
L
Linus Torvalds 已提交
2477 2478
		priv->extra_stats.rx_babr++;

2479
		if (netif_msg_rx_err(priv))
2480
			printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
L
Linus Torvalds 已提交
2481 2482 2483
	}
	if (events & IEVENT_EBERR) {
		priv->extra_stats.eberr++;
2484
		if (netif_msg_rx_err(priv))
2485
			printk(KERN_DEBUG "%s: bus error\n", dev->name);
L
Linus Torvalds 已提交
2486
	}
2487
	if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
2488
		printk(KERN_DEBUG "%s: control frame\n", dev->name);
L
Linus Torvalds 已提交
2489 2490 2491

	if (events & IEVENT_BABT) {
		priv->extra_stats.tx_babt++;
2492
		if (netif_msg_tx_err(priv))
2493
			printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
L
Linus Torvalds 已提交
2494 2495 2496 2497
	}
	return IRQ_HANDLED;
}

2498 2499 2500 2501 2502 2503 2504 2505
static struct of_device_id gfar_match[] =
{
	{
		.type = "network",
		.compatible = "gianfar",
	},
	{},
};
2506
MODULE_DEVICE_TABLE(of, gfar_match);
2507

L
Linus Torvalds 已提交
2508
/* Structure for a device driver */
2509 2510 2511 2512
static struct of_platform_driver gfar_driver = {
	.name = "fsl-gianfar",
	.match_table = gfar_match,

L
Linus Torvalds 已提交
2513 2514
	.probe = gfar_probe,
	.remove = gfar_remove,
2515 2516 2517
	.suspend = gfar_legacy_suspend,
	.resume = gfar_legacy_resume,
	.driver.pm = GFAR_PM_OPS,
L
Linus Torvalds 已提交
2518 2519 2520 2521
};

static int __init gfar_init(void)
{
2522
	return of_register_platform_driver(&gfar_driver);
L
Linus Torvalds 已提交
2523 2524 2525 2526
}

static void __exit gfar_exit(void)
{
2527
	of_unregister_platform_driver(&gfar_driver);
L
Linus Torvalds 已提交
2528 2529 2530 2531 2532
}

module_init(gfar_init);
module_exit(gfar_exit);