korina.c 31.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
/*
 *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
 *
 *  Copyright 2004 IDT Inc. (rischelp@idt.com)
 *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
 *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
 *
 *  This program is free software; you can redistribute  it and/or modify it
 *  under  the terms of  the GNU General  Public License as published by the
 *  Free Software Foundation;  either version 2 of the  License, or (at your
 *  option) any later version.
 *
 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *  You should have received a copy of the  GNU General Public License along
 *  with this program; if not, write  to the Free Software Foundation, Inc.,
 *  675 Mass Ave, Cambridge, MA 02139, USA.
 *
 *  Writing to a DMA status register:
 *
 *  When writing to the status register, you should mask the bit you have
 *  been testing the status register with. Both Tx and Rx DMA registers
 *  should stick to this procedure.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/ctype.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>

#include <asm/bootinfo.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>

#include <asm/mach-rc32434/rb.h>
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/eth.h>
#include <asm/mach-rc32434/dma_v.h>

#define DRV_NAME        "korina"
#define DRV_VERSION     "0.10"
#define DRV_RELDATE     "04Mar2008"

#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
				   ((dev)->dev_addr[1]))
#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
				   ((dev)->dev_addr[3] << 16) | \
				   ((dev)->dev_addr[4] << 8)  | \
				   ((dev)->dev_addr[5]))

#define MII_CLOCK 1250000 	/* no more than 2.5MHz */

/* the following must be powers of two */
#define KORINA_NUM_RDS	64  /* number of receive descriptors */
#define KORINA_NUM_TDS	64  /* number of transmit descriptors */

84 85 86 87
/* KORINA_RBSIZE is the hardware's default maximum receive
 * frame size in bytes. Having this hardcoded means that there
 * is no support for MTU sizes greater than 1500. */
#define KORINA_RBSIZE	1536 /* size of one resource buffer = Ether MTU */
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define KORINA_RDS_MASK	(KORINA_NUM_RDS - 1)
#define KORINA_TDS_MASK	(KORINA_NUM_TDS - 1)
#define RD_RING_SIZE 	(KORINA_NUM_RDS * sizeof(struct dma_desc))
#define TD_RING_SIZE	(KORINA_NUM_TDS * sizeof(struct dma_desc))

#define TX_TIMEOUT 	(6000 * HZ / 1000)

enum chain_status { desc_filled, desc_empty };
#define IS_DMA_FINISHED(X)   (((X) & (DMA_DESC_FINI)) != 0)
#define IS_DMA_DONE(X)   (((X) & (DMA_DESC_DONE)) != 0)
#define RCVPKT_LENGTH(X)     (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)

/* Information that need to be kept for each board. */
struct korina_private {
	struct eth_regs *eth_regs;
	struct dma_reg *rx_dma_regs;
	struct dma_reg *tx_dma_regs;
	struct dma_desc *td_ring; /* transmit descriptor ring */
	struct dma_desc *rd_ring; /* receive descriptor ring  */

	struct sk_buff *tx_skb[KORINA_NUM_TDS];
	struct sk_buff *rx_skb[KORINA_NUM_RDS];

	int rx_next_done;
	int rx_chain_head;
	int rx_chain_tail;
	enum chain_status rx_chain_status;

	int tx_next_done;
	int tx_chain_head;
	int tx_chain_tail;
	enum chain_status tx_chain_status;
	int tx_count;
	int tx_full;

	int rx_irq;
	int tx_irq;
	int ovr_irq;
	int und_irq;

	spinlock_t lock;        /* NIC xmit lock */

	int dma_halt_cnt;
	int dma_run_cnt;
	struct napi_struct napi;
133
	struct timer_list media_check_timer;
134
	struct mii_if_info mii_if;
135
	struct work_struct restart_task;
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	struct net_device *dev;
	int phy_addr;
};

extern unsigned int idt_cpu_freq;

static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
{
	writel(0, &ch->dmandptr);
	writel(dma_addr, &ch->dmadptr);
}

static inline void korina_abort_dma(struct net_device *dev,
					struct dma_reg *ch)
{
       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
	       writel(0x10, &ch->dmac);

	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
155
		       netif_trans_update(dev);
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

	       writel(0, &ch->dmas);
       }

       writel(0, &ch->dmadptr);
       writel(0, &ch->dmandptr);
}

static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
{
	writel(dma_addr, &ch->dmandptr);
}

static void korina_abort_tx(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);

	korina_abort_dma(dev, lp->tx_dma_regs);
}

static void korina_abort_rx(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);

	korina_abort_dma(dev, lp->rx_dma_regs);
}

static void korina_start_rx(struct korina_private *lp,
					struct dma_desc *rd)
{
	korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
}

static void korina_chain_rx(struct korina_private *lp,
					struct dma_desc *rd)
{
	korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
}

/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
	u32 length;
P
Phil Sutter 已提交
201
	u32 chain_prev, chain_next;
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	struct dma_desc *td;

	spin_lock_irqsave(&lp->lock, flags);

	td = &lp->td_ring[lp->tx_chain_tail];

	/* stop queue when full, drop pkts if queue already full */
	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
		lp->tx_full = 1;

		if (lp->tx_count == (KORINA_NUM_TDS - 2))
			netif_stop_queue(dev);
		else {
			dev->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);

			return NETDEV_TX_BUSY;
		}
	}

	lp->tx_count++;

	lp->tx_skb[lp->tx_chain_tail] = skb;

	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);

	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
P
Phil Sutter 已提交
233 234
	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
235 236 237 238 239 240 241

	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
P
Phil Sutter 已提交
242
			lp->tx_chain_tail = chain_next;
243 244 245 246 247 248 249 250 251 252
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&lp->tx_dma_regs->dmandptr);
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Link to prev */
P
Phil Sutter 已提交
253
			lp->td_ring[chain_prev].control &=
254 255
					~DMA_DESC_COF;
			/* Link to prev */
P
Phil Sutter 已提交
256
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
257
			/* Move tail */
P
Phil Sutter 已提交
258
			lp->tx_chain_tail = chain_next;
259 260 261 262 263 264 265 266 267 268 269 270 271
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&(lp->tx_dma_regs->dmandptr));
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
			lp->tx_chain_status = desc_empty;
		}
	} else {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
P
Phil Sutter 已提交
272
			lp->tx_chain_tail = chain_next;
273 274 275 276 277
			lp->tx_chain_status = desc_filled;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
P
Phil Sutter 已提交
278
			lp->td_ring[chain_prev].control &=
279
					~DMA_DESC_COF;
P
Phil Sutter 已提交
280 281
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			lp->tx_chain_tail = chain_next;
282 283 284 285
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));

286
	netif_trans_update(dev);
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}

static int mdio_read(struct net_device *dev, int mii_id, int reg)
{
	struct korina_private *lp = netdev_priv(dev);
	int ret;

	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);

	writel(0, &lp->eth_regs->miimcfg);
	writel(0, &lp->eth_regs->miimcmd);
	writel(mii_id | reg, &lp->eth_regs->miimaddr);
	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);

	ret = (int)(readl(&lp->eth_regs->miimrdd));
	return ret;
}

static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
{
	struct korina_private *lp = netdev_priv(dev);

	mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);

	writel(0, &lp->eth_regs->miimcfg);
	writel(1, &lp->eth_regs->miimcmd);
	writel(mii_id | reg, &lp->eth_regs->miimaddr);
	writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
	writel(val, &lp->eth_regs->miimwtd);
}

/* Ethernet Rx DMA interrupt */
static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct korina_private *lp = netdev_priv(dev);
	u32 dmas, dmasm;
	irqreturn_t retval;

	dmas = readl(&lp->rx_dma_regs->dmas);
	if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
		dmasm = readl(&lp->rx_dma_regs->dmasm);
		writel(dmasm | (DMA_STAT_DONE |
				DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmasm);

336
		napi_schedule(&lp->napi);
337

338
		if (dmas & DMA_STAT_ERR)
339
			printk(KERN_ERR "%s: DMA error\n", dev->name);
340 341 342 343 344 345 346 347 348 349 350 351 352 353

		retval = IRQ_HANDLED;
	} else
		retval = IRQ_NONE;

	return retval;
}

static int korina_rx(struct net_device *dev, int limit)
{
	struct korina_private *lp = netdev_priv(dev);
	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
	struct sk_buff *skb, *skb_new;
	u8 *pkt_buf;
354
	u32 devcs, pkt_len, dmas;
355 356 357 358 359
	int count;

	dma_cache_inv((u32)rd, sizeof(*rd));

	for (count = 0; count < limit; count++) {
360 361
		skb = lp->rx_skb[lp->rx_next_done];
		skb_new = NULL;
362 363 364

		devcs = rd->devcs;

365 366 367
		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
			break;

368 369 370 371 372 373 374 375
		/* Update statistics counters */
		if (devcs & ETH_RX_CRC)
			dev->stats.rx_crc_errors++;
		if (devcs & ETH_RX_LOR)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_LE)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_OVR)
376
			dev->stats.rx_fifo_errors++;
377 378 379 380 381 382 383 384 385 386 387 388 389
		if (devcs & ETH_RX_CV)
			dev->stats.rx_frame_errors++;
		if (devcs & ETH_RX_CES)
			dev->stats.rx_length_errors++;
		if (devcs & ETH_RX_MP)
			dev->stats.multicast++;

		if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
			/* check that this is a whole packet
			 * WARNING: DMA_FD bit incorrectly set
			 * in Rc32434 (errata ref #077) */
			dev->stats.rx_errors++;
			dev->stats.rx_dropped++;
390
		} else if ((devcs & ETH_RX_ROK)) {
391
			pkt_len = RCVPKT_LENGTH(devcs);
392 393 394 395 396 397 398 399 400

			/* must be the (first and) last
			 * descriptor then */
			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;

			/* invalidate the cache */
			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);

			/* Malloc up new buffer. */
401
			skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418

			if (!skb_new)
				break;
			/* Do not count the CRC */
			skb_put(skb, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, dev);

			/* Pass the packet to upper layers */
			netif_receive_skb(skb);
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pkt_len;

			/* Update the mcast stats */
			if (devcs & ETH_RX_MP)
				dev->stats.multicast++;

			lp->rx_skb[lp->rx_next_done] = skb_new;
419
		}
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438

		rd->devcs = 0;

		/* Restore descriptor's curr_addr */
		if (skb_new)
			rd->ca = CPHYSADDR(skb_new->data);
		else
			rd->ca = CPHYSADDR(skb->data);

		rd->control = DMA_COUNT(KORINA_RBSIZE) |
			DMA_DESC_COD | DMA_DESC_IOD;
		lp->rd_ring[(lp->rx_next_done - 1) &
			KORINA_RDS_MASK].control &=
			~DMA_DESC_COD;

		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
		dma_cache_wback((u32)rd, sizeof(*rd));
		rd = &lp->rd_ring[lp->rx_next_done];
		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	}

	dmas = readl(&lp->rx_dma_regs->dmas);

	if (dmas & DMA_STAT_HALT) {
		writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
				&lp->rx_dma_regs->dmas);

		lp->dma_halt_cnt++;
		rd->devcs = 0;
		skb = lp->rx_skb[lp->rx_next_done];
		rd->ca = CPHYSADDR(skb->data);
		dma_cache_wback((u32)rd, sizeof(*rd));
		korina_chain_rx(lp, rd);
	}

	return count;
}

static int korina_poll(struct napi_struct *napi, int budget)
{
	struct korina_private *lp =
		container_of(napi, struct korina_private, napi);
	struct net_device *dev = lp->dev;
	int work_done;

	work_done = korina_rx(dev, budget);
	if (work_done < budget) {
467
		napi_complete(napi);
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

		writel(readl(&lp->rx_dma_regs->dmasm) &
			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
			&lp->rx_dma_regs->dmasm);
	}
	return work_done;
}

/*
 * Set or clear the multicast filter for this adaptor.
 */
static void korina_multicast_list(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
483
	struct netdev_hw_addr *ha;
484 485 486 487 488 489
	u32 recognise = ETH_ARC_AB;	/* always accept broadcasts */

	/* Set promiscuous mode */
	if (dev->flags & IFF_PROMISC)
		recognise |= ETH_ARC_PRO;

490
	else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
491 492 493 494
		/* All multicast and broadcast */
		recognise |= ETH_ARC_AM;

	/* Build the hash table */
495
	if (netdev_mc_count(dev) > 4) {
496
		u16 hash_table[4] = { 0 };
497 498
		u32 crc;

499
		netdev_for_each_mc_addr(ha, dev) {
500
			crc = ether_crc_le(6, ha->addr);
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
			crc >>= 26;
			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
		}
		/* Accept filtered multicast */
		recognise |= ETH_ARC_AFM;

		/* Fill the MAC hash tables with their values */
		writel((u32)(hash_table[1] << 16 | hash_table[0]),
					&lp->eth_regs->ethhash0);
		writel((u32)(hash_table[3] << 16 | hash_table[2]),
					&lp->eth_regs->ethhash1);
	}

	spin_lock_irqsave(&lp->lock, flags);
	writel(recognise, &lp->eth_regs->etharc);
	spin_unlock_irqrestore(&lp->lock, flags);
}

static void korina_tx(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
	u32 devcs;
	u32 dmas;

	spin_lock(&lp->lock);

	/* Process all desc that are done */
	while (IS_DMA_FINISHED(td->control)) {
		if (lp->tx_full == 1) {
			netif_wake_queue(dev);
			lp->tx_full = 0;
		}

		devcs = lp->td_ring[lp->tx_next_done].devcs;
		if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
				(ETH_TX_FD | ETH_TX_LD)) {
			dev->stats.tx_errors++;
			dev->stats.tx_dropped++;

			/* Should never happen */
542
			printk(KERN_ERR "%s: split tx ignored\n",
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
							dev->name);
		} else if (devcs & ETH_TX_TOK) {
			dev->stats.tx_packets++;
			dev->stats.tx_bytes +=
					lp->tx_skb[lp->tx_next_done]->len;
		} else {
			dev->stats.tx_errors++;
			dev->stats.tx_dropped++;

			/* Underflow */
			if (devcs & ETH_TX_UND)
				dev->stats.tx_fifo_errors++;

			/* Oversized frame */
			if (devcs & ETH_TX_OF)
				dev->stats.tx_aborted_errors++;

			/* Excessive deferrals */
			if (devcs & ETH_TX_ED)
				dev->stats.tx_carrier_errors++;

			/* Collisions: medium busy */
			if (devcs & ETH_TX_EC)
				dev->stats.collisions++;

			/* Late collision */
			if (devcs & ETH_TX_LC)
				dev->stats.tx_window_errors++;
		}

		/* We must always free the original skb */
		if (lp->tx_skb[lp->tx_next_done]) {
			dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
			lp->tx_skb[lp->tx_next_done] = NULL;
		}

		lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
		lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
		lp->td_ring[lp->tx_next_done].link = 0;
		lp->td_ring[lp->tx_next_done].ca = 0;
		lp->tx_count--;

		/* Go on to next transmission */
		lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
		td = &lp->td_ring[lp->tx_next_done];

	}

	/* Clear the DMA status register */
	dmas = readl(&lp->tx_dma_regs->dmas);
	writel(~dmas, &lp->tx_dma_regs->dmas);

	writel(readl(&lp->tx_dma_regs->dmasm) &
			~(DMA_STAT_FINI | DMA_STAT_ERR),
			&lp->tx_dma_regs->dmasm);

	spin_unlock(&lp->lock);
}

static irqreturn_t
korina_tx_dma_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct korina_private *lp = netdev_priv(dev);
	u32 dmas, dmasm;
	irqreturn_t retval;

	dmas = readl(&lp->tx_dma_regs->dmas);

	if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
		dmasm = readl(&lp->tx_dma_regs->dmasm);
		writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
				&lp->tx_dma_regs->dmasm);

617 618
		korina_tx(dev);

619 620 621 622 623 624
		if (lp->tx_chain_status == desc_filled &&
			(readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
				&(lp->tx_dma_regs->dmandptr));
			lp->tx_chain_status = desc_empty;
			lp->tx_chain_head = lp->tx_chain_tail;
625
			netif_trans_update(dev);
626 627
		}
		if (dmas & DMA_STAT_ERR)
628
			printk(KERN_ERR "%s: DMA error\n", dev->name);
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651

		retval = IRQ_HANDLED;
	} else
		retval = IRQ_NONE;

	return retval;
}


static void korina_check_media(struct net_device *dev, unsigned int init_media)
{
	struct korina_private *lp = netdev_priv(dev);

	mii_check_media(&lp->mii_if, 0, init_media);

	if (lp->mii_if.full_duplex)
		writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
						&lp->eth_regs->ethmac2);
	else
		writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
						&lp->eth_regs->ethmac2);
}

652 653 654 655 656 657 658 659 660
static void korina_poll_media(unsigned long data)
{
	struct net_device *dev = (struct net_device *) data;
	struct korina_private *lp = netdev_priv(dev);

	korina_check_media(dev, 0);
	mod_timer(&lp->media_check_timer, jiffies + HZ);
}

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
static void korina_set_carrier(struct mii_if_info *mii)
{
	if (mii->force_media) {
		/* autoneg is off: Link is always assumed to be up */
		if (!netif_carrier_ok(mii->dev))
			netif_carrier_on(mii->dev);
	} else  /* Let MMI library update carrier status */
		korina_check_media(mii->dev, 0);
}

static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct korina_private *lp = netdev_priv(dev);
	struct mii_ioctl_data *data = if_mii(rq);
	int rc;

	if (!netif_running(dev))
		return -EINVAL;
	spin_lock_irq(&lp->lock);
	rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
	spin_unlock_irq(&lp->lock);
	korina_set_carrier(&lp->mii_if);

	return rc;
}

/* ethtool helpers */
static void netdev_get_drvinfo(struct net_device *dev,
			struct ethtool_drvinfo *info)
{
	struct korina_private *lp = netdev_priv(dev);

693 694 695
	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
696 697
}

698 699
static int netdev_get_link_ksettings(struct net_device *dev,
				     struct ethtool_link_ksettings *cmd)
700 701 702 703 704
{
	struct korina_private *lp = netdev_priv(dev);
	int rc;

	spin_lock_irq(&lp->lock);
705
	rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
706 707 708 709 710
	spin_unlock_irq(&lp->lock);

	return rc;
}

711 712
static int netdev_set_link_ksettings(struct net_device *dev,
				     const struct ethtool_link_ksettings *cmd)
713 714 715 716 717
{
	struct korina_private *lp = netdev_priv(dev);
	int rc;

	spin_lock_irq(&lp->lock);
718
	rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
719 720 721 722 723 724 725 726 727 728 729 730 731
	spin_unlock_irq(&lp->lock);
	korina_set_carrier(&lp->mii_if);

	return rc;
}

static u32 netdev_get_link(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);

	return mii_link_ok(&lp->mii_if);
}

732
static const struct ethtool_ops netdev_ethtool_ops = {
733 734
	.get_drvinfo            = netdev_get_drvinfo,
	.get_link               = netdev_get_link,
735 736
	.get_link_ksettings     = netdev_get_link_ksettings,
	.set_link_ksettings     = netdev_set_link_ksettings,
737 738
};

739
static int korina_alloc_ring(struct net_device *dev)
740 741
{
	struct korina_private *lp = netdev_priv(dev);
P
Phil Sutter 已提交
742
	struct sk_buff *skb;
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
	int i;

	/* Initialize the transmit descriptors */
	for (i = 0; i < KORINA_NUM_TDS; i++) {
		lp->td_ring[i].control = DMA_DESC_IOF;
		lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
		lp->td_ring[i].ca = 0;
		lp->td_ring[i].link = 0;
	}
	lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
			lp->tx_full = lp->tx_count = 0;
	lp->tx_chain_status = desc_empty;

	/* Initialize the receive descriptors */
	for (i = 0; i < KORINA_NUM_RDS; i++) {
758
		skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
759
		if (!skb)
760
			return -ENOMEM;
761 762 763 764 765 766 767 768
		lp->rx_skb[i] = skb;
		lp->rd_ring[i].control = DMA_DESC_IOD |
				DMA_COUNT(KORINA_RBSIZE);
		lp->rd_ring[i].devcs = 0;
		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
		lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
	}

769 770 771 772
	/* loop back receive descriptors, so the last
	 * descriptor points to the first one */
	lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
	lp->rd_ring[i - 1].control |= DMA_DESC_COD;
773

774
	lp->rx_next_done  = 0;
775 776 777
	lp->rx_chain_head = 0;
	lp->rx_chain_tail = 0;
	lp->rx_chain_status = desc_empty;
778 779

	return 0;
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
}

static void korina_free_ring(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	int i;

	for (i = 0; i < KORINA_NUM_RDS; i++) {
		lp->rd_ring[i].control = 0;
		if (lp->rx_skb[i])
			dev_kfree_skb_any(lp->rx_skb[i]);
		lp->rx_skb[i] = NULL;
	}

	for (i = 0; i < KORINA_NUM_TDS; i++) {
		lp->td_ring[i].control = 0;
		if (lp->tx_skb[i])
			dev_kfree_skb_any(lp->tx_skb[i]);
		lp->tx_skb[i] = NULL;
	}
}

/*
 * Initialize the RC32434 ethernet controller.
 */
static int korina_init(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);

	/* Disable DMA */
	korina_abort_tx(dev);
	korina_abort_rx(dev);

	/* reset ethernet logic */
	writel(0, &lp->eth_regs->ethintfc);
	while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
816
		netif_trans_update(dev);
817 818 819 820 821

	/* Enable Ethernet Interface */
	writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);

	/* Allocate rings */
822 823 824 825 826
	if (korina_alloc_ring(dev)) {
		printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
		korina_free_ring(dev);
		return -ENOMEM;
	}
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883

	writel(0, &lp->rx_dma_regs->dmas);
	/* Start Rx DMA */
	korina_start_rx(lp, &lp->rd_ring[0]);

	writel(readl(&lp->tx_dma_regs->dmasm) &
			~(DMA_STAT_FINI | DMA_STAT_ERR),
			&lp->tx_dma_regs->dmasm);
	writel(readl(&lp->rx_dma_regs->dmasm) &
			~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
			&lp->rx_dma_regs->dmasm);

	/* Accept only packets destined for this Ethernet device address */
	writel(ETH_ARC_AB, &lp->eth_regs->etharc);

	/* Set all Ether station address registers to their initial values */
	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);

	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);

	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);

	writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
	writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);


	/* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
	writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
			&lp->eth_regs->ethmac2);

	/* Back to back inter-packet-gap */
	writel(0x15, &lp->eth_regs->ethipgt);
	/* Non - Back to back inter-packet-gap */
	writel(0x12, &lp->eth_regs->ethipgr);

	/* Management Clock Prescaler Divisor
	 * Clock independent setting */
	writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
		       &lp->eth_regs->ethmcp);

	/* don't transmit until fifo contains 48b */
	writel(48, &lp->eth_regs->ethfifott);

	writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);

	napi_enable(&lp->napi);
	netif_start_queue(dev);

	return 0;
}

/*
 * Restart the RC32434 ethernet controller.
 */
884
static void korina_restart_task(struct work_struct *work)
885
{
886 887 888
	struct korina_private *lp = container_of(work,
			struct korina_private, restart_task);
	struct net_device *dev = lp->dev;
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904

	/*
	 * Disable interrupts
	 */
	disable_irq(lp->rx_irq);
	disable_irq(lp->tx_irq);
	disable_irq(lp->ovr_irq);
	disable_irq(lp->und_irq);

	writel(readl(&lp->tx_dma_regs->dmasm) |
				DMA_STAT_FINI | DMA_STAT_ERR,
				&lp->tx_dma_regs->dmasm);
	writel(readl(&lp->rx_dma_regs->dmasm) |
				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
				&lp->rx_dma_regs->dmasm);

905 906
	napi_disable(&lp->napi);

907 908
	korina_free_ring(dev);

909
	if (korina_init(dev) < 0) {
910
		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
911
		return;
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
	}
	korina_multicast_list(dev);

	enable_irq(lp->und_irq);
	enable_irq(lp->ovr_irq);
	enable_irq(lp->tx_irq);
	enable_irq(lp->rx_irq);
}

static void korina_clear_and_restart(struct net_device *dev, u32 value)
{
	struct korina_private *lp = netdev_priv(dev);

	netif_stop_queue(dev);
	writel(value, &lp->eth_regs->ethintfc);
927
	schedule_work(&lp->restart_task);
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
}

/* Ethernet Tx Underflow interrupt */
static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct korina_private *lp = netdev_priv(dev);
	unsigned int und;

	spin_lock(&lp->lock);

	und = readl(&lp->eth_regs->ethintfc);

	if (und & ETH_INT_FC_UND)
		korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);

	spin_unlock(&lp->lock);

	return IRQ_HANDLED;
}

static void korina_tx_timeout(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);

953
	schedule_work(&lp->restart_task);
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
}

/* Ethernet Rx Overflow interrupt */
static irqreturn_t
korina_ovr_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = dev_id;
	struct korina_private *lp = netdev_priv(dev);
	unsigned int ovr;

	spin_lock(&lp->lock);
	ovr = readl(&lp->eth_regs->ethintfc);

	if (ovr & ETH_INT_FC_OVR)
		korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);

	spin_unlock(&lp->lock);

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void korina_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	korina_tx_dma_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

static int korina_open(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
F
Francois Romieu 已提交
987
	int ret;
988 989 990 991

	/* Initialize */
	ret = korina_init(dev);
	if (ret < 0) {
992
		printk(KERN_ERR "%s: cannot open device\n", dev->name);
993 994 995 996 997 998
		goto out;
	}

	/* Install the interrupt handler
	 * that handles the Done Finished
	 * Ovr and Und Events */
999
	ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
1000
			0, "Korina ethernet Rx", dev);
1001
	if (ret < 0) {
1002
		printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1003 1004 1005
		    dev->name, lp->rx_irq);
		goto err_release;
	}
1006
	ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1007
			0, "Korina ethernet Tx", dev);
1008
	if (ret < 0) {
1009
		printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1010 1011 1012 1013 1014
		    dev->name, lp->tx_irq);
		goto err_free_rx_irq;
	}

	/* Install handler for overrun error. */
1015
	ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1016
			0, "Ethernet Overflow", dev);
1017
	if (ret < 0) {
1018
		printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
1019 1020 1021 1022 1023
		    dev->name, lp->ovr_irq);
		goto err_free_tx_irq;
	}

	/* Install handler for underflow error. */
1024
	ret = request_irq(lp->und_irq, korina_und_interrupt,
1025
			0, "Ethernet Underflow", dev);
1026
	if (ret < 0) {
1027
		printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
1028 1029 1030
		    dev->name, lp->und_irq);
		goto err_free_ovr_irq;
	}
1031
	mod_timer(&lp->media_check_timer, jiffies + 1);
1032 1033
out:
	return ret;
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

err_free_ovr_irq:
	free_irq(lp->ovr_irq, dev);
err_free_tx_irq:
	free_irq(lp->tx_irq, dev);
err_free_rx_irq:
	free_irq(lp->rx_irq, dev);
err_release:
	korina_free_ring(dev);
	goto out;
}

static int korina_close(struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	u32 tmp;

1051 1052
	del_timer(&lp->media_check_timer);

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	/* Disable interrupts */
	disable_irq(lp->rx_irq);
	disable_irq(lp->tx_irq);
	disable_irq(lp->ovr_irq);
	disable_irq(lp->und_irq);

	korina_abort_tx(dev);
	tmp = readl(&lp->tx_dma_regs->dmasm);
	tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
	writel(tmp, &lp->tx_dma_regs->dmasm);

	korina_abort_rx(dev);
	tmp = readl(&lp->rx_dma_regs->dmasm);
	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
	writel(tmp, &lp->rx_dma_regs->dmasm);

1069 1070
	napi_disable(&lp->napi);

1071 1072
	cancel_work_sync(&lp->restart_task);

1073 1074
	korina_free_ring(dev);

1075 1076 1077 1078 1079 1080 1081 1082
	free_irq(lp->rx_irq, dev);
	free_irq(lp->tx_irq, dev);
	free_irq(lp->ovr_irq, dev);
	free_irq(lp->und_irq, dev);

	return 0;
}

1083 1084 1085 1086
static const struct net_device_ops korina_netdev_ops = {
	.ndo_open		= korina_open,
	.ndo_stop		= korina_close,
	.ndo_start_xmit		= korina_send_packet,
1087
	.ndo_set_rx_mode	= korina_multicast_list,
1088 1089 1090 1091 1092 1093 1094 1095 1096
	.ndo_tx_timeout		= korina_tx_timeout,
	.ndo_do_ioctl		= korina_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_mac_address	= eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= korina_poll_controller,
#endif
};

1097 1098 1099 1100 1101 1102
static int korina_probe(struct platform_device *pdev)
{
	struct korina_device *bif = platform_get_drvdata(pdev);
	struct korina_private *lp;
	struct net_device *dev;
	struct resource *r;
F
Francois Romieu 已提交
1103
	int rc;
1104 1105

	dev = alloc_etherdev(sizeof(struct korina_private));
1106
	if (!dev)
1107
		return -ENOMEM;
1108

1109 1110 1111 1112
	SET_NETDEV_DEV(dev, &pdev->dev);
	lp = netdev_priv(dev);

	bif->dev = dev;
1113
	memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
1114 1115 1116 1117 1118 1119 1120 1121

	lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
	lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
	lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
	lp->und_irq = platform_get_irq_byname(pdev, "korina_und");

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
	dev->base_addr = r->start;
D
Dan Carpenter 已提交
1122
	lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1123
	if (!lp->eth_regs) {
1124
		printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
F
Francois Romieu 已提交
1125
		rc = -ENXIO;
1126 1127 1128 1129
		goto probe_err_out;
	}

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
D
Dan Carpenter 已提交
1130
	lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1131
	if (!lp->rx_dma_regs) {
1132
		printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
F
Francois Romieu 已提交
1133
		rc = -ENXIO;
1134 1135 1136 1137
		goto probe_err_dma_rx;
	}

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
D
Dan Carpenter 已提交
1138
	lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1139
	if (!lp->tx_dma_regs) {
1140
		printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
F
Francois Romieu 已提交
1141
		rc = -ENXIO;
1142 1143 1144 1145 1146
		goto probe_err_dma_tx;
	}

	lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
	if (!lp->td_ring) {
F
Francois Romieu 已提交
1147
		rc = -ENXIO;
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
		goto probe_err_td_ring;
	}

	dma_cache_inv((unsigned long)(lp->td_ring),
			TD_RING_SIZE + RD_RING_SIZE);

	/* now convert TD_RING pointer to KSEG1 */
	lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
	lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];

	spin_lock_init(&lp->lock);
	/* just use the rx dma irq */
	dev->irq = lp->rx_irq;
	lp->dev = dev;

1163
	dev->netdev_ops = &korina_netdev_ops;
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	dev->ethtool_ops = &netdev_ethtool_ops;
	dev->watchdog_timeo = TX_TIMEOUT;
	netif_napi_add(dev, &lp->napi, korina_poll, 64);

	lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
	lp->mii_if.dev = dev;
	lp->mii_if.mdio_read = mdio_read;
	lp->mii_if.mdio_write = mdio_write;
	lp->mii_if.phy_id = lp->phy_addr;
	lp->mii_if.phy_id_mask = 0x1f;
	lp->mii_if.reg_num_mask = 0x1f;

F
Francois Romieu 已提交
1176 1177
	rc = register_netdev(dev);
	if (rc < 0) {
1178
		printk(KERN_ERR DRV_NAME
1179
			": cannot register net device: %d\n", rc);
1180 1181
		goto probe_err_register;
	}
1182
	setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1183

1184 1185
	INIT_WORK(&lp->restart_task, korina_restart_task);

1186 1187
	printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
			dev->name);
F
Francois Romieu 已提交
1188 1189
out:
	return rc;
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

probe_err_register:
	kfree(lp->td_ring);
probe_err_td_ring:
	iounmap(lp->tx_dma_regs);
probe_err_dma_tx:
	iounmap(lp->rx_dma_regs);
probe_err_dma_rx:
	iounmap(lp->eth_regs);
probe_err_out:
	free_netdev(dev);
F
Francois Romieu 已提交
1201
	goto out;
1202 1203 1204 1205 1206 1207 1208
}

static int korina_remove(struct platform_device *pdev)
{
	struct korina_device *bif = platform_get_drvdata(pdev);
	struct korina_private *lp = netdev_priv(bif->dev);

F
Francois Romieu 已提交
1209 1210 1211
	iounmap(lp->eth_regs);
	iounmap(lp->rx_dma_regs);
	iounmap(lp->tx_dma_regs);
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224

	unregister_netdev(bif->dev);
	free_netdev(bif->dev);

	return 0;
}

static struct platform_driver korina_driver = {
	.driver.name = "korina",
	.probe = korina_probe,
	.remove = korina_remove,
};

1225
module_platform_driver(korina_driver);
1226 1227 1228 1229 1230 1231

MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
MODULE_LICENSE("GPL");