mv643xx_eth.c 57.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57 58
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.0";
59

60 61 62
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
63

64
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65 66 67 68 69
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

70
#define ETH_HW_IP_ALIGN		2
71 72 73 74

/*
 * Registers shared between all ports.
 */
75 76 77 78 79 80 81
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
82 83 84 85

/*
 * Per-port registers.
 */
86
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
87
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
88 89 90 91 92 93
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
94
#define  TX_FIFO_EMPTY			0x00000400
95 96 97
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
98 99
#define  INT_RX				0x00000804
#define  INT_EXT			0x00000002
100
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
101 102 103 104 105
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
#define  INT_EXT_TX			0x00000101
106 107 108 109 110 111 112 113 114 115
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
#define RXQ_CURRENT_DESC_PTR(p)		(0x060c + ((p) << 10))
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
#define TXQ_CURRENT_DESC_PTR(p)		(0x06c0 + ((p) << 10))
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
116

117 118 119 120

/*
 * SDMA configuration register.
 */
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
#define RX_BURST_SIZE_4_64BIT		(2 << 1)
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
#define TX_BURST_SIZE_4_64BIT		(2 << 22)

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		TX_BURST_SIZE_4_64BIT
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
		TX_BURST_SIZE_4_64BIT
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

140 141 142 143 144 145 146

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
147 148 149
#define MAX_RX_PACKET_1522BYTE			(1 << 17)
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
#define MAX_RX_PACKET_MASK			(7 << 17)
150 151 152 153 154 155 156
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
157

158 159
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
160 161

/* SMI reg */
162 163 164 165
#define SMI_BUSY		0x10000000	/* 0 - Write, 1 - Read	*/
#define SMI_READ_VALID		0x08000000	/* 0 - Write, 1 - Read	*/
#define SMI_OPCODE_WRITE	0		/* Completion of Read	*/
#define SMI_OPCODE_READ		0x04000000	/* Operation is in progress */
166 167


168 169
/*
 * RX/TX descriptors.
170 171
 */
#if defined(__BIG_ENDIAN)
172
struct rx_desc {
173 174 175 176 177 178 179
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

180
struct tx_desc {
181 182 183 184 185 186 187
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
188
struct rx_desc {
189 190 191 192 193 194 195
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

196
struct tx_desc {
197 198 199 200 201 202 203 204 205 206
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

207
/* RX & TX descriptor command */
208
#define BUFFER_OWNED_BY_DMA		0x80000000
209 210

/* RX & TX descriptor status */
211
#define ERROR_SUMMARY			0x00000001
212 213

/* RX descriptor status */
214 215 216 217
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
218 219

/* TX descriptor command */
220 221 222 223 224 225 226 227
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
228

229
#define TX_IHL_SHIFT			11
230 231


232
/* global *******************************************************************/
233
struct mv643xx_eth_shared_private {
234
	void __iomem *base;
235 236 237 238 239 240 241 242 243 244 245

	/* used to protect SMI_REG, which is shared across ports */
	spinlock_t phy_lock;

	u32 win_protect;

	unsigned int t_clk;
};


/* per-port *****************************************************************/
246
struct mib_counters {
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
struct rx_queue {
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

	struct timer_list rx_oom;
};

294 295
struct tx_queue {
	int tx_ring_size;
296

297 298 299
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
300

301
	struct tx_desc *tx_desc_area;
302 303 304
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
305 306 307 308 309 310 311
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
	int port_num;			/* User Ethernet port number	*/

	struct mv643xx_eth_shared_private *shared_smi;
312 313 314 315

	struct work_struct tx_timeout_task;

	struct net_device *dev;
316
	struct mib_counters mib_counters;
317 318 319
	spinlock_t lock;

	struct mii_if_info mii;
320 321 322 323 324 325 326 327 328

	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
	struct napi_struct napi;
	struct rx_queue rxq[1];
329 330 331 332 333 334 335 336 337 338 339

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
	struct tx_queue txq[1];
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
340
};
L
Linus Torvalds 已提交
341

342

343
/* port register accessors **************************************************/
344
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
345
{
346
	return readl(mp->shared->base + offset);
347
}
348

349
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
350
{
351
	writel(data, mp->shared->base + offset);
352
}
353 354


355
/* rxq/txq helper functions *************************************************/
356
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
357
{
358
	return container_of(rxq, struct mv643xx_eth_private, rxq[0]);
359
}
360

361 362 363 364 365
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
	return container_of(txq, struct mv643xx_eth_private, txq[0]);
}

366
static void rxq_enable(struct rx_queue *rxq)
367
{
368 369 370
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	wrl(mp, RXQ_COMMAND(mp->port_num), 1);
}
L
Linus Torvalds 已提交
371

372 373 374 375
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	u8 mask = 1;
L
Linus Torvalds 已提交
376

377 378 379
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
380 381
}

382
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
383
{
384 385
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	wrl(mp, TXQ_COMMAND(mp->port_num), 1);
L
Linus Torvalds 已提交
386 387
}

388
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
389
{
390 391
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	u8 mask = 1;
392

393 394 395 396 397 398 399 400 401 402 403
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(mp->dev);
L
Linus Torvalds 已提交
404 405
}

406 407

/* rx ***********************************************************************/
408
static void txq_reclaim(struct tx_queue *txq, int force);
409

410
static void rxq_refill(struct rx_queue *rxq)
L
Linus Torvalds 已提交
411
{
412
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
413
	unsigned long flags;
L
Linus Torvalds 已提交
414

415
	spin_lock_irqsave(&mp->lock, flags);
416

417 418
	while (rxq->rx_desc_count < rxq->rx_ring_size) {
		int skb_size;
419 420 421 422
		struct sk_buff *skb;
		int unaligned;
		int rx;

423 424 425 426 427 428 429 430 431 432
		/*
		 * Reserve 2+14 bytes for an ethernet header (the
		 * hardware automatically prepends 2 bytes of dummy
		 * data to each received packet), 4 bytes for a VLAN
		 * header, and 4 bytes for the trailing FCS -- 24
		 * bytes total.
		 */
		skb_size = mp->dev->mtu + 24;

		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
433
		if (skb == NULL)
L
Linus Torvalds 已提交
434
			break;
435

R
Ralf Baechle 已提交
436
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
437
		if (unaligned)
R
Ralf Baechle 已提交
438
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
439

440 441 442
		rxq->rx_desc_count++;
		rx = rxq->rx_used_desc;
		rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
443

444 445 446 447
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
						skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = skb_size;
		rxq->rx_skb[rx] = skb;
448
		wmb();
449
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
450 451 452
						RX_ENABLE_INTERRUPT;
		wmb();

453
		skb_reserve(skb, ETH_HW_IP_ALIGN);
L
Linus Torvalds 已提交
454
	}
455

456 457 458
	if (rxq->rx_desc_count == 0) {
		rxq->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&rxq->rx_oom);
L
Linus Torvalds 已提交
459
	}
460 461

	spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
462 463
}

464
static inline void rxq_refill_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
465
{
466
	rxq_refill((struct rx_queue *)data);
L
Linus Torvalds 已提交
467 468
}

469
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
470
{
471 472 473
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
474

475 476
	rx = 0;
	while (rx < budget) {
477 478 479 480
		struct sk_buff *skb;
		volatile struct rx_desc *rx_desc;
		unsigned int cmd_sts;
		unsigned long flags;
481

482
		spin_lock_irqsave(&mp->lock, flags);
483

484
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
485

486 487 488 489 490 491
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
492

493 494
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
495

496
		rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
497

498
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
499

500
		dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN,
501 502 503
					mp->dev->mtu + 24, DMA_FROM_DEVICE);
		rxq->rx_desc_count--;
		rx++;
504

505 506 507 508
		/*
		 * Update statistics.
		 * Note byte count includes 4 byte CRC count
		 */
L
Linus Torvalds 已提交
509
		stats->rx_packets++;
510 511
		stats->rx_bytes += rx_desc->byte_cnt - ETH_HW_IP_ALIGN;

L
Linus Torvalds 已提交
512 513 514 515
		/*
		 * In case received a packet without first / last bits on OR
		 * the error summary bit is on, the packets needs to be dropeed.
		 */
516
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
517
					(RX_FIRST_DESC | RX_LAST_DESC))
518
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
519
			stats->rx_dropped++;
520
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
521
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
522 523 524 525
				if (net_ratelimit())
					printk(KERN_ERR
						"%s: Received packet spread "
						"on multiple descriptors\n",
526
						mp->dev->name);
L
Linus Torvalds 已提交
527
			}
528
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
529 530 531 532 533 534 535 536
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
537
			skb_put(skb, rx_desc->byte_cnt - ETH_HW_IP_ALIGN - 4);
L
Linus Torvalds 已提交
538

539
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
540 541
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
542
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
543
			}
544
			skb->protocol = eth_type_trans(skb, mp->dev);
545
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
546 547 548 549 550
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
551
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
552
	}
553
	rxq_refill(rxq);
L
Linus Torvalds 已提交
554

555
	return rx;
L
Linus Torvalds 已提交
556 557
}

558 559
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
560
{
561 562 563 564
	struct mv643xx_eth_private *mp;
	int rx;

	mp = container_of(napi, struct mv643xx_eth_private, napi);
565

566
#ifdef MV643XX_ETH_TX_FAST_REFILL
567
	if (++mp->tx_clean_threshold > 5) {
568
		txq_reclaim(mp->txq, 0);
569
		mp->tx_clean_threshold = 0;
570
	}
571
#endif
572

573
	rx = rxq_process(mp->rxq, budget);
574

575 576 577 578 579
	if (rx < budget) {
		netif_rx_complete(mp->dev, napi);
		wrl(mp, INT_CAUSE(mp->port_num), 0);
		wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
		wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
580
	}
581

582
	return rx;
583
}
584
#endif
585

586 587 588

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
589
{
590
	int frag;
L
Linus Torvalds 已提交
591

592
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
593 594
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
595
			return 1;
L
Linus Torvalds 已提交
596
	}
597

598 599
	return 0;
}
600

601
static int txq_alloc_desc_index(struct tx_queue *txq)
602 603
{
	int tx_desc_curr;
604

605
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
606

607 608
	tx_desc_curr = txq->tx_curr_desc;
	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
609

610
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
611

612 613
	return tx_desc_curr;
}
614

615
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
616
{
617
	int nr_frags = skb_shinfo(skb)->nr_frags;
618
	int frag;
L
Linus Torvalds 已提交
619

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
			txq->tx_skb[tx_index] = skb;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}

643 644 645 646 647 648 649
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
650 651
}

652 653 654 655
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
656

657
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
658
{
659
	int nr_frags = skb_shinfo(skb)->nr_frags;
660
	int tx_index;
661
	struct tx_desc *desc;
662 663
	u32 cmd_sts;
	int length;
L
Linus Torvalds 已提交
664

665
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
666

667 668
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];
669 670

	if (nr_frags) {
671
		txq_submit_frag_skb(txq, skb);
672 673

		length = skb_headlen(skb);
674
		txq->tx_skb[tx_index] = NULL;
675
	} else {
676
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
677
		length = skb->len;
678
		txq->tx_skb[tx_index] = skb;
679 680 681 682 683 684 685 686
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		BUG_ON(skb->protocol != htons(ETH_P_IP));

687 688 689
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
690 691 692

		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
693
			cmd_sts |= UDP_FRAME;
694 695 696 697 698 699 700 701 702 703
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
704
		cmd_sts |= 5 << TX_IHL_SHIFT;
705 706 707 708 709 710 711 712 713
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

	/* ensure all descriptors are written before poking hardware */
	wmb();
714
	txq_enable(txq);
715

716
	txq->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
717 718
}

719
static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
720
{
721
	struct mv643xx_eth_private *mp = netdev_priv(dev);
722
	struct net_device_stats *stats = &dev->stats;
723
	struct tx_queue *txq;
724
	unsigned long flags;
725

726
	BUG_ON(netif_queue_stopped(dev));
727

728 729 730 731 732 733 734 735 736
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
		printk(KERN_DEBUG "%s: failed to linearize tiny "
				"unaligned fragment\n", dev->name);
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

737 738 739
	txq = mp->txq;

	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
740 741 742 743 744 745
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&mp->lock, flags);
		return NETDEV_TX_BUSY;
	}

746
	txq_submit_skb(txq, skb);
747 748 749 750
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

751
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB)
752 753 754 755 756
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
757 758
}

759 760

/* mii management interface *************************************************/
761
static int phy_addr_get(struct mv643xx_eth_private *mp);
762

763
static void read_smi_reg(struct mv643xx_eth_private *mp,
764
				unsigned int phy_reg, unsigned int *value)
L
Linus Torvalds 已提交
765
{
766 767
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
	int phy_addr = phy_addr_get(mp);
768
	unsigned long flags;
L
Linus Torvalds 已提交
769 770
	int i;

771 772 773 774
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
775
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
776
		if (i == 1000) {
777 778 779
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
780
		udelay(10);
L
Linus Torvalds 已提交
781 782
	}

783
	writel((phy_addr << 16) | (phy_reg << 21) | SMI_OPCODE_READ, smi_reg);
L
Linus Torvalds 已提交
784

785
	/* now wait for the data to be valid */
786
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
787
		if (i == 1000) {
788 789 790
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
791
		udelay(10);
792 793 794 795 796
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
797 798
}

799
static void write_smi_reg(struct mv643xx_eth_private *mp,
800
				   unsigned int phy_reg, unsigned int value)
L
Linus Torvalds 已提交
801
{
802 803
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
	int phy_addr = phy_addr_get(mp);
804
	unsigned long flags;
L
Linus Torvalds 已提交
805 806
	int i;

807 808 809 810
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
811
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
812
		if (i == 1000) {
813 814 815
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
816
		udelay(10);
L
Linus Torvalds 已提交
817 818
	}

819
	writel((phy_addr << 16) | (phy_reg << 21) |
820
		SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
821 822 823
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
824

825 826

/* mib counters *************************************************************/
827
static void clear_mib_counters(struct mv643xx_eth_private *mp)
828 829 830 831 832
{
	unsigned int port_num = mp->port_num;
	int i;

	/* Perform dummy reads from MIB counters */
833
	for (i = 0; i < 0x80; i += 4)
834
		rdl(mp, MIB_COUNTERS(port_num) + i);
L
Linus Torvalds 已提交
835 836
}

837
static inline u32 read_mib(struct mv643xx_eth_private *mp, int offset)
838
{
839
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
840
}
841

842
static void update_mib_counters(struct mv643xx_eth_private *mp)
843
{
844
	struct mib_counters *p = &mp->mib_counters;
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877

	p->good_octets_received += read_mib(mp, 0x00);
	p->good_octets_received += (u64)read_mib(mp, 0x04) << 32;
	p->bad_octets_received += read_mib(mp, 0x08);
	p->internal_mac_transmit_err += read_mib(mp, 0x0c);
	p->good_frames_received += read_mib(mp, 0x10);
	p->bad_frames_received += read_mib(mp, 0x14);
	p->broadcast_frames_received += read_mib(mp, 0x18);
	p->multicast_frames_received += read_mib(mp, 0x1c);
	p->frames_64_octets += read_mib(mp, 0x20);
	p->frames_65_to_127_octets += read_mib(mp, 0x24);
	p->frames_128_to_255_octets += read_mib(mp, 0x28);
	p->frames_256_to_511_octets += read_mib(mp, 0x2c);
	p->frames_512_to_1023_octets += read_mib(mp, 0x30);
	p->frames_1024_to_max_octets += read_mib(mp, 0x34);
	p->good_octets_sent += read_mib(mp, 0x38);
	p->good_octets_sent += (u64)read_mib(mp, 0x3c) << 32;
	p->good_frames_sent += read_mib(mp, 0x40);
	p->excessive_collision += read_mib(mp, 0x44);
	p->multicast_frames_sent += read_mib(mp, 0x48);
	p->broadcast_frames_sent += read_mib(mp, 0x4c);
	p->unrec_mac_control_received += read_mib(mp, 0x50);
	p->fc_sent += read_mib(mp, 0x54);
	p->good_fc_received += read_mib(mp, 0x58);
	p->bad_fc_received += read_mib(mp, 0x5c);
	p->undersize_received += read_mib(mp, 0x60);
	p->fragments_received += read_mib(mp, 0x64);
	p->oversize_received += read_mib(mp, 0x68);
	p->jabber_received += read_mib(mp, 0x6c);
	p->mac_receive_error += read_mib(mp, 0x70);
	p->bad_crc_event += read_mib(mp, 0x74);
	p->collision += read_mib(mp, 0x78);
	p->late_collision += read_mib(mp, 0x7c);
878 879
}

880 881

/* ethtool ******************************************************************/
882
struct mv643xx_eth_stats {
883 884
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
885 886
	int netdev_off;
	int mp_off;
887 888
};

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
936 937
};

938
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
939
{
940
	struct mv643xx_eth_private *mp = netdev_priv(dev);
941 942 943 944 945 946 947 948 949 950 951 952 953
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

	/* The PHY may support 1000baseT_Half, but the mv643xx does not */
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

954
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
955
{
956
	struct mv643xx_eth_private *mp = netdev_priv(dev);
957 958
	int err;

959 960 961
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
962

963 964
	return err;
}
L
Linus Torvalds 已提交
965

966
static void mv643xx_eth_get_drvinfo(struct net_device *netdev,
967 968
				struct ethtool_drvinfo *drvinfo)
{
969 970
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
971 972
	strncpy(drvinfo->fw_version, "N/A", 32);
	strncpy(drvinfo->bus_info, "mv643xx", 32);
973
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
974
}
L
Linus Torvalds 已提交
975

976 977
static int mv643xx_eth_nway_restart(struct net_device *dev)
{
978
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
979

980 981
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
982

983 984
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
985
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
986

987 988
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
989

990
static void mv643xx_eth_get_strings(struct net_device *netdev, uint32_t stringset,
991 992 993
				uint8_t *data)
{
	int i;
L
Linus Torvalds 已提交
994

995 996
	switch(stringset) {
	case ETH_SS_STATS:
997
		for (i=0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
998
			memcpy(data + i * ETH_GSTRING_LEN,
999
				mv643xx_eth_stats[i].stat_string,
1000
				ETH_GSTRING_LEN);
1001 1002 1003 1004
		}
		break;
	}
}
L
Linus Torvalds 已提交
1005

1006
static void mv643xx_eth_get_ethtool_stats(struct net_device *netdev,
1007 1008
				struct ethtool_stats *stats, uint64_t *data)
{
1009
	struct mv643xx_eth_private *mp = netdev->priv;
1010
	int i;
L
Linus Torvalds 已提交
1011

1012
	update_mib_counters(mp);
L
Linus Torvalds 已提交
1013

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1027
	}
1028
}
L
Linus Torvalds 已提交
1029

1030
static int mv643xx_eth_get_sset_count(struct net_device *netdev, int sset)
1031 1032 1033
{
	switch (sset) {
	case ETH_SS_STATS:
1034
		return ARRAY_SIZE(mv643xx_eth_stats);
1035 1036 1037 1038
	default:
		return -EOPNOTSUPP;
	}
}
L
Linus Torvalds 已提交
1039

1040 1041 1042 1043
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
	.get_settings           = mv643xx_eth_get_settings,
	.set_settings           = mv643xx_eth_set_settings,
	.get_drvinfo            = mv643xx_eth_get_drvinfo,
1044 1045
	.get_link               = mv643xx_eth_get_link,
	.set_sg			= ethtool_op_set_sg,
1046 1047 1048
	.get_sset_count		= mv643xx_eth_get_sset_count,
	.get_ethtool_stats      = mv643xx_eth_get_ethtool_stats,
	.get_strings            = mv643xx_eth_get_strings,
1049 1050
	.nway_reset		= mv643xx_eth_nway_restart,
};
L
Linus Torvalds 已提交
1051

1052

1053
/* address handling *********************************************************/
1054
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1055 1056 1057 1058
{
	unsigned int port_num = mp->port_num;
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1059

1060 1061
	mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
L
Linus Torvalds 已提交
1062

1063 1064 1065 1066 1067 1068
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1069
}
L
Linus Torvalds 已提交
1070

1071
static void init_mac_tables(struct mv643xx_eth_private *mp)
1072 1073 1074
{
	unsigned int port_num = mp->port_num;
	int table_index;
L
Linus Torvalds 已提交
1075

1076 1077
	/* Clear DA filter unicast table (Ex_dFUT) */
	for (table_index = 0; table_index <= 0xC; table_index += 4)
1078
		wrl(mp, UNICAST_TABLE(port_num) + table_index, 0);
L
Linus Torvalds 已提交
1079

1080 1081
	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
		/* Clear DA filter special multicast table (Ex_dFSMT) */
1082
		wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1083
		/* Clear DA filter other multicast table (Ex_dFOMT) */
1084
		wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
1085 1086
	}
}
1087

1088
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
1089 1090 1091 1092 1093
					    int table, unsigned char entry)
{
	unsigned int table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;
1094

1095 1096
	tbl_offset = (entry / 4) * 4;	/* Register offset of DA table entry */
	reg_offset = entry % 4;		/* Entry offset within the register */
1097

1098 1099 1100 1101
	/* Set "accepts frame bit" at specified table entry */
	table_reg = rdl(mp, table + tbl_offset);
	table_reg |= 0x01 << (8 * reg_offset);
	wrl(mp, table + tbl_offset, table_reg);
L
Linus Torvalds 已提交
1102 1103
}

1104
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1105
{
1106 1107 1108 1109
	unsigned int port_num = mp->port_num;
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1110

1111 1112 1113
	mac_l = (addr[4] << 8) | (addr[5]);
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
							(addr[3] << 0);
1114

1115 1116
	wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
L
Linus Torvalds 已提交
1117

1118
	/* Accept frames with this address */
1119
	table = UNICAST_TABLE(port_num);
1120
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1121 1122
}

1123
static void mv643xx_eth_update_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
1124
{
1125
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1126

1127 1128
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
1129
}
L
Linus Torvalds 已提交
1130

1131
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1132
{
1133
	int i;
L
Linus Torvalds 已提交
1134

1135 1136 1137 1138
	for (i = 0; i < 6; i++)
		/* +2 is for the offset of the HW addr type */
		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
	mv643xx_eth_update_mac_address(dev);
L
Linus Torvalds 已提交
1139 1140 1141
	return 0;
}

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

1160
static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1161 1162
{
	unsigned int port_num = mp->port_num;
1163
	int table;
1164
	int crc;
L
Linus Torvalds 已提交
1165

1166 1167
	if ((addr[0] == 0x01) && (addr[1] == 0x00) &&
	    (addr[2] == 0x5E) && (addr[3] == 0x00) && (addr[4] == 0x00)) {
1168
		table = SPECIAL_MCAST_TABLE(port_num);
1169
		set_filter_table_entry(mp, table, addr[5]);
1170
		return;
L
Linus Torvalds 已提交
1171 1172
	}

1173
	crc = addr_crc(addr);
1174

1175
	table = OTHER_MCAST_TABLE(port_num);
1176
	set_filter_table_entry(mp, table, crc);
1177 1178
}

1179
static void set_multicast_list(struct net_device *dev)
L
Linus Torvalds 已提交
1180 1181
{

1182 1183 1184
	struct dev_mc_list	*mc_list;
	int			i;
	int			table_index;
1185
	struct mv643xx_eth_private	*mp = netdev_priv(dev);
1186
	unsigned int		port_num = mp->port_num;
1187

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	/* If the device is in promiscuous mode or in all multicast mode,
	 * we will fully populate both multicast tables with accept.
	 * This is guaranteed to yield a match on all multicast addresses...
	 */
	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
		for (table_index = 0; table_index <= 0xFC; table_index += 4) {
			/* Set all entries in DA filter special multicast
			 * table (Ex_dFSMT)
			 * Set for ETH_Q0 for now
			 * Bits
			 * 0	  Accept=1, Drop=0
			 * 3-1  Queue	 ETH_Q0=0
			 * 7-4  Reserved = 0;
			 */
1202
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0x01010101);
1203

1204 1205 1206 1207 1208 1209 1210 1211
			/* Set all entries in DA filter other multicast
			 * table (Ex_dFOMT)
			 * Set for ETH_Q0 for now
			 * Bits
			 * 0	  Accept=1, Drop=0
			 * 3-1  Queue	 ETH_Q0=0
			 * 7-4  Reserved = 0;
			 */
1212
			wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0x01010101);
1213 1214 1215
		}
		return;
	}
1216

1217 1218 1219 1220 1221
	/* We will clear out multicast tables every time we get the list.
	 * Then add the entire new list...
	 */
	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
		/* Clear DA filter special multicast table (Ex_dFSMT) */
1222
		wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1223 1224

		/* Clear DA filter other multicast table (Ex_dFOMT) */
1225
		wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
L
Linus Torvalds 已提交
1226 1227
	}

1228 1229 1230 1231 1232
	/* Get pointer to net_device multicast list and add each one... */
	for (i = 0, mc_list = dev->mc_list;
			(i < 256) && (mc_list != NULL) && (i < dev->mc_count);
			i++, mc_list = mc_list->next)
		if (mc_list->dmi_addrlen == 6)
1233
			mc_addr(mp, mc_list->dmi_addr);
1234 1235
}

1236
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1237
{
1238
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1239
	u32 config_reg;
L
Linus Torvalds 已提交
1240

1241
	config_reg = rdl(mp, PORT_CONFIG(mp->port_num));
1242
	if (dev->flags & IFF_PROMISC)
1243
		config_reg |= UNICAST_PROMISCUOUS_MODE;
1244
	else
1245
		config_reg &= ~UNICAST_PROMISCUOUS_MODE;
1246
	wrl(mp, PORT_CONFIG(mp->port_num), config_reg);
L
Linus Torvalds 已提交
1247

1248
	set_multicast_list(dev);
1249
}
1250 1251


1252
/* rx/tx queue initialisation ***********************************************/
1253
static int rxq_init(struct mv643xx_eth_private *mp)
1254
{
1255 1256 1257
	struct rx_queue *rxq = mp->rxq;
	struct rx_desc *rx_desc;
	int size;
1258 1259
	int i;

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

	if (size <= mp->rx_desc_sram_size) {
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1276 1277
	}

1278 1279 1280 1281 1282 1283
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1284

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
		int nexti = (i + 1) % rxq->rx_ring_size;
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	init_timer(&rxq->rx_oom);
	rxq->rx_oom.data = (unsigned long)rxq;
	rxq->rx_oom.function = rxq_refill_timer_wrapper;

	return 0;


out_free:
	if (size <= mp->rx_desc_sram_size)
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1318
}
1319

1320
static void rxq_deinit(struct rx_queue *rxq)
1321
{
1322 1323 1324 1325
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1326

1327
	del_timer_sync(&rxq->rx_oom);
1328

1329 1330 1331 1332
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1333
		}
1334
	}
L
Linus Torvalds 已提交
1335

1336 1337 1338 1339 1340 1341 1342 1343
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

	if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
		iounmap(rxq->rx_desc_area);
1344
	else
1345 1346 1347 1348
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1349
}
L
Linus Torvalds 已提交
1350

1351
static int txq_init(struct mv643xx_eth_private *mp)
1352
{
1353 1354 1355
	struct tx_queue *txq = mp->txq;
	struct tx_desc *tx_desc;
	int size;
1356
	int i;
L
Linus Torvalds 已提交
1357

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

	if (size <= mp->tx_desc_sram_size) {
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
1380
	}
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
		int nexti = (i + 1) % txq->tx_ring_size;
		tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
					nexti * sizeof(struct tx_desc);
	}

	return 0;

1401

1402 1403 1404 1405 1406 1407 1408
out_free:
	if (size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);
1409

1410 1411
out:
	return -ENOMEM;
1412
}
L
Linus Torvalds 已提交
1413

1414
static void txq_reclaim(struct tx_queue *txq, int force)
1415
{
1416
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1417
	unsigned long flags;
L
Linus Torvalds 已提交
1418

1419 1420 1421 1422 1423 1424 1425 1426
	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;
		dma_addr_t addr;
		int count;
1427

1428 1429
		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
1430
		cmd_sts = desc->cmd_sts;
1431

1432 1433
		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
			break;
L
Linus Torvalds 已提交
1434

1435 1436
		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
		txq->tx_desc_count--;
L
Linus Torvalds 已提交
1437

1438 1439
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
1440 1441
		skb = txq->tx_skb[tx_index];
		txq->tx_skb[tx_index] = NULL;
1442

1443
		if (cmd_sts & ERROR_SUMMARY) {
1444 1445
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
1446
		}
L
Linus Torvalds 已提交
1447

1448 1449 1450
		/*
		 * Drop mp->lock while we free the skb.
		 */
1451
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1452

1453
		if (cmd_sts & TX_FIRST_DESC)
1454 1455 1456
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1457

1458 1459
		if (skb)
			dev_kfree_skb_irq(skb);
1460

1461
		spin_lock_irqsave(&mp->lock, flags);
1462
	}
1463
	spin_unlock_irqrestore(&mp->lock, flags);
1464
}
L
Linus Torvalds 已提交
1465

1466
static void txq_deinit(struct tx_queue *txq)
1467
{
1468
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1469

1470 1471
	txq_disable(txq);
	txq_reclaim(txq, 1);
L
Linus Torvalds 已提交
1472

1473
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1474

1475 1476
	if (txq->tx_desc_area_size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
1477
	else
1478 1479 1480 1481
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
1482
}
L
Linus Torvalds 已提交
1483 1484


1485
/* netdev ops and related ***************************************************/
1486
static void port_reset(struct mv643xx_eth_private *mp);
L
Linus Torvalds 已提交
1487

1488
static void mv643xx_eth_update_pscr(struct mv643xx_eth_private *mp,
1489 1490
				    struct ethtool_cmd *ecmd)
{
1491 1492
	u32 pscr_o;
	u32 pscr_n;
L
Linus Torvalds 已提交
1493

1494
	pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1495

1496
	/* clear speed, duplex and rx buffer size fields */
1497 1498 1499 1500
	pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100   |
			    SET_GMII_SPEED_TO_1000 |
			    SET_FULL_DUPLEX_MODE   |
			    MAX_RX_PACKET_MASK);
L
Linus Torvalds 已提交
1501

1502 1503 1504
	if (ecmd->speed == SPEED_1000) {
		pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
	} else {
1505
		if (ecmd->speed == SPEED_100)
1506 1507
			pscr_n |= SET_MII_SPEED_TO_100;
		pscr_n |= MAX_RX_PACKET_1522BYTE;
1508
	}
L
Linus Torvalds 已提交
1509

1510 1511 1512 1513 1514 1515
	if (ecmd->duplex == DUPLEX_FULL)
		pscr_n |= SET_FULL_DUPLEX_MODE;

	if (pscr_n != pscr_o) {
		if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
1516
		else {
1517 1518 1519 1520 1521 1522
			txq_disable(mp->txq);
			pscr_o &= ~SERIAL_PORT_ENABLE;
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			txq_enable(mp->txq);
1523 1524 1525
		}
	}
}
1526

1527 1528 1529
static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
1530
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1531
	u32 int_cause, int_cause_ext = 0;
1532

1533
	/* Read interrupt cause registers */
1534
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT);
1535
	if (int_cause & INT_EXT) {
1536
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1537
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1538
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1539
	}
L
Linus Torvalds 已提交
1540

1541
	/* PHY status changed */
1542
	if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) {
1543
		if (mii_link_ok(&mp->mii)) {
1544 1545
			struct ethtool_cmd cmd;

1546
			mii_ethtool_gset(&mp->mii, &cmd);
1547 1548
			mv643xx_eth_update_pscr(mp, &cmd);
			txq_enable(mp->txq);
1549 1550
			if (!netif_carrier_ok(dev)) {
				netif_carrier_on(dev);
1551
				__txq_maybe_wake(mp->txq);
1552 1553 1554 1555 1556 1557
			}
		} else if (netif_carrier_ok(dev)) {
			netif_stop_queue(dev);
			netif_carrier_off(dev);
		}
	}
L
Linus Torvalds 已提交
1558

1559
#ifdef MV643XX_ETH_NAPI
1560
	if (int_cause & INT_RX) {
1561
		/* schedule the NAPI poll routine to maintain port */
1562
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
L
Linus Torvalds 已提交
1563

1564
		/* wait for previous write to complete */
1565
		rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1566

1567
		netif_rx_schedule(dev, &mp->napi);
1568
	}
1569
#else
1570
	if (int_cause & INT_RX)
1571
		rxq_process(mp->rxq, INT_MAX);
1572
#endif
1573 1574 1575 1576
	if (int_cause_ext & INT_EXT_TX) {
		txq_reclaim(mp->txq, 0);
		__txq_maybe_wake(mp->txq);
	}
L
Linus Torvalds 已提交
1577

1578
	/*
1579 1580
	 * If no real interrupt occured, exit.
	 * This can happen when using gigE interrupt coalescing mechanism.
1581
	 */
1582
	if ((int_cause == 0x0) && (int_cause_ext == 0x0))
1583
		return IRQ_NONE;
L
Linus Torvalds 已提交
1584

1585
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1586 1587
}

1588
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1589
{
1590
	unsigned int phy_reg_data;
L
Linus Torvalds 已提交
1591

1592
	/* Reset the PHY */
1593
	read_smi_reg(mp, 0, &phy_reg_data);
1594
	phy_reg_data |= 0x8000;	/* Set bit 15 to reset the PHY */
1595
	write_smi_reg(mp, 0, phy_reg_data);
L
Linus Torvalds 已提交
1596

1597 1598 1599
	/* wait for PHY to come out of reset */
	do {
		udelay(1);
1600
		read_smi_reg(mp, 0, &phy_reg_data);
1601
	} while (phy_reg_data & 0x8000);
L
Linus Torvalds 已提交
1602 1603
}

1604
static void port_start(struct net_device *dev)
L
Linus Torvalds 已提交
1605
{
1606
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1607 1608
	u32 pscr;
	struct ethtool_cmd ethtool_cmd;
1609
	int i;
L
Linus Torvalds 已提交
1610

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
		DISABLE_AUTO_NEG_SPEED_GMII    |
		DISABLE_AUTO_NEG_FOR_DUPLEX    |
		DO_NOT_FORCE_LINK_FAIL	       |
		SERIAL_PORT_CONTROL_RESERVED;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
L
Linus Torvalds 已提交
1625

1626 1627 1628 1629 1630
	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

	mv643xx_eth_get_settings(dev, &ethtool_cmd);
	phy_reset(mp);
	mv643xx_eth_set_settings(dev, &ethtool_cmd);
L
Linus Torvalds 已提交
1631

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
	/*
	 * Configure TX path and queues.
	 */
	wrl(mp, TX_BW_MTU(mp->port_num), 0);
	for (i = 0; i < 1; i++) {
		struct tx_queue *txq = mp->txq;
		int off = TXQ_CURRENT_DESC_PTR(mp->port_num);
		u32 addr;

		addr = (u32)txq->tx_desc_dma;
		addr += txq->tx_curr_desc * sizeof(struct tx_desc);
		wrl(mp, off, addr);
	}

L
Linus Torvalds 已提交
1646
	/* Add the assigned Ethernet address to the port's address table */
1647
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1648

1649 1650 1651 1652
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
1653
	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1654

1655 1656 1657
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1658
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1659

1660 1661 1662 1663 1664 1665 1666
	/*
	 * Enable the receive queue.
	 */
	for (i = 0; i < 1; i++) {
		struct rx_queue *rxq = mp->rxq;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num);
		u32 addr;
L
Linus Torvalds 已提交
1667

1668 1669 1670
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
1671

1672 1673
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
1674 1675
}

1676
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1677
{
1678
	unsigned int port_num = mp->port_num;
1679
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1680

1681
	/* Set RX Coalescing mechanism */
1682
	wrl(mp, SDMA_CONFIG(port_num),
1683
		((coal & 0x3fff) << 8) |
1684
		(rdl(mp, SDMA_CONFIG(port_num))
1685
			& 0xffc000ff));
L
Linus Torvalds 已提交
1686 1687
}

1688
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1689
{
1690
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1691

1692
	/* Set TX Coalescing mechanism */
1693
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), coal << 4);
L
Linus Torvalds 已提交
1694 1695
}

1696
static void port_init(struct mv643xx_eth_private *mp)
1697
{
1698
	port_reset(mp);
1699

1700
	init_mac_tables(mp);
1701 1702
}

1703
static int mv643xx_eth_open(struct net_device *dev)
1704
{
1705
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1706
	unsigned int port_num = mp->port_num;
1707
	int err;
1708

1709
	/* Clear any pending ethernet port interrupts */
1710 1711
	wrl(mp, INT_CAUSE(port_num), 0);
	wrl(mp, INT_CAUSE_EXT(port_num), 0);
1712
	/* wait for previous write to complete */
1713
	rdl(mp, INT_CAUSE_EXT(port_num));
1714 1715 1716 1717 1718 1719

	err = request_irq(dev->irq, mv643xx_eth_int_handler,
			IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
	if (err) {
		printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
		return -EAGAIN;
1720 1721
	}

1722
	port_init(mp);
1723

1724 1725
	err = rxq_init(mp);
	if (err)
1726
		goto out_free_irq;
1727 1728
	rxq_refill(mp->rxq);

1729 1730
	err = txq_init(mp);
	if (err)
1731
		goto out_free_rx_skb;
1732

1733
#ifdef MV643XX_ETH_NAPI
1734 1735
	napi_enable(&mp->napi);
#endif
1736

1737
	port_start(dev);
1738

1739 1740
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
1741

1742
	/* Unmask phy and link status changes interrupts */
1743
	wrl(mp, INT_MASK_EXT(port_num), INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1744

1745
	/* Unmask RX buffer and TX end interrupt */
1746
	wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
1747

1748 1749
	return 0;

1750

1751
out_free_rx_skb:
1752
	rxq_deinit(mp->rxq);
1753 1754 1755 1756
out_free_irq:
	free_irq(dev->irq, dev);

	return err;
1757 1758
}

1759
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1760
{
1761
	unsigned int port_num = mp->port_num;
1762
	unsigned int reg_data;
L
Linus Torvalds 已提交
1763

1764
	txq_disable(mp->txq);
1765
	rxq_disable(mp->rxq);
1766 1767
	while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
		udelay(10);
L
Linus Torvalds 已提交
1768

1769
	/* Clear all MIB counters */
1770
	clear_mib_counters(mp);
1771 1772

	/* Reset the Enable bit in the Configuration Register */
1773
	reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1774 1775 1776
	reg_data &= ~(SERIAL_PORT_ENABLE		|
			DO_NOT_FORCE_LINK_FAIL	|
			FORCE_LINK_PASS);
1777
	wrl(mp, PORT_SERIAL_CONTROL(port_num), reg_data);
L
Linus Torvalds 已提交
1778 1779
}

1780
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
1781
{
1782
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1783
	unsigned int port_num = mp->port_num;
L
Linus Torvalds 已提交
1784

1785
	/* Mask all interrupts on ethernet port */
1786
	wrl(mp, INT_MASK(port_num), 0x00000000);
1787
	/* wait for previous write to complete */
1788
	rdl(mp, INT_MASK(port_num));
L
Linus Torvalds 已提交
1789

1790
#ifdef MV643XX_ETH_NAPI
1791 1792 1793 1794
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
1795

1796
	port_reset(mp);
L
Linus Torvalds 已提交
1797

1798
	txq_deinit(mp->txq);
1799
	rxq_deinit(mp->rxq);
L
Linus Torvalds 已提交
1800

1801
	free_irq(dev->irq, dev);
L
Linus Torvalds 已提交
1802

1803
	return 0;
L
Linus Torvalds 已提交
1804 1805
}

1806
static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
1807
{
1808
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1809

1810
	return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
L
Linus Torvalds 已提交
1811 1812
}

1813
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
1814
{
1815 1816
	if ((new_mtu > 9500) || (new_mtu < 64))
		return -EINVAL;
L
Linus Torvalds 已提交
1817

1818 1819 1820
	dev->mtu = new_mtu;
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
1821

1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
	 * due to memory being full, which might fail the open function.
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
		printk(KERN_ERR "%s: Fatal error on opening device\n",
			dev->name);
	}

	return 0;
L
Linus Torvalds 已提交
1835 1836
}

1837
static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
1838
{
1839
	struct mv643xx_eth_private *mp = container_of(ugly, struct mv643xx_eth_private,
1840 1841
						  tx_timeout_task);
	struct net_device *dev = mp->dev;
L
Linus Torvalds 已提交
1842

1843 1844
	if (!netif_running(dev))
		return;
L
Linus Torvalds 已提交
1845

1846 1847
	netif_stop_queue(dev);

1848 1849
	port_reset(mp);
	port_start(dev);
1850

1851
	__txq_maybe_wake(mp->txq);
1852 1853 1854
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
1855
{
1856
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1857

1858
	printk(KERN_INFO "%s: TX timeout  ", dev->name);
1859

1860 1861
	/* Do the reset outside of interrupt context */
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
1862 1863
}

1864
#ifdef CONFIG_NET_POLL_CONTROLLER
1865
static void mv643xx_eth_netpoll(struct net_device *netdev)
1866
{
1867
	struct mv643xx_eth_private *mp = netdev_priv(netdev);
1868 1869
	int port_num = mp->port_num;

1870
	wrl(mp, INT_MASK(port_num), 0x00000000);
1871
	/* wait for previous write to complete */
1872
	rdl(mp, INT_MASK(port_num));
1873 1874 1875

	mv643xx_eth_int_handler(netdev->irq, netdev);

1876
	wrl(mp, INT_MASK(port_num), INT_RX | INT_CAUSE_EXT);
1877
}
1878
#endif
1879

1880
static int mv643xx_eth_mdio_read(struct net_device *dev, int phy_id, int location)
1881
{
1882
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1883 1884
	int val;

1885
	read_smi_reg(mp, location, &val);
1886
	return val;
1887 1888
}

1889
static void mv643xx_eth_mdio_write(struct net_device *dev, int phy_id, int location, int val)
1890
{
1891
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1892
	write_smi_reg(mp, location, val);
1893
}
1894 1895


1896
/* platform glue ************************************************************/
1897 1898 1899
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
1900
{
1901
	void __iomem *base = msp->base;
1902 1903 1904
	u32 win_enable;
	u32 win_protect;
	int i;
1905

1906 1907 1908 1909 1910
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
1911 1912
	}

1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
1930 1931
}

1932
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1933
{
1934
	static int mv643xx_eth_version_printed = 0;
1935
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
1936
	struct mv643xx_eth_shared_private *msp;
1937 1938
	struct resource *res;
	int ret;
1939

1940
	if (!mv643xx_eth_version_printed++)
1941
		printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1942

1943 1944 1945 1946
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
1947

1948 1949 1950 1951 1952 1953
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

1954 1955
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
		goto out_free;

	spin_lock_init(&msp->phy_lock);
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;

	platform_set_drvdata(pdev, msp);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
1979
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
1980

1981
	iounmap(msp->base);
1982 1983 1984
	kfree(msp);

	return 0;
1985 1986
}

1987 1988 1989 1990 1991 1992 1993 1994 1995
static struct platform_driver mv643xx_eth_shared_driver = {
	.probe = mv643xx_eth_shared_probe,
	.remove = mv643xx_eth_shared_remove,
	.driver = {
		.name = MV643XX_ETH_SHARED_NAME,
		.owner	= THIS_MODULE,
	},
};

1996
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
1997
{
1998 1999
	u32 reg_data;
	int addr_shift = 5 * mp->port_num;
L
Linus Torvalds 已提交
2000

2001
	reg_data = rdl(mp, PHY_ADDR);
2002 2003
	reg_data &= ~(0x1f << addr_shift);
	reg_data |= (phy_addr & 0x1f) << addr_shift;
2004
	wrl(mp, PHY_ADDR, reg_data);
L
Linus Torvalds 已提交
2005 2006
}

2007
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2008
{
2009
	unsigned int reg_data;
L
Linus Torvalds 已提交
2010

2011
	reg_data = rdl(mp, PHY_ADDR);
L
Linus Torvalds 已提交
2012

2013
	return ((reg_data >> (5 * mp->port_num)) & 0x1f);
L
Linus Torvalds 已提交
2014 2015
}

2016
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2017
{
2018 2019
	unsigned int phy_reg_data0;
	int auto_neg;
L
Linus Torvalds 已提交
2020

2021
	read_smi_reg(mp, 0, &phy_reg_data0);
2022 2023
	auto_neg = phy_reg_data0 & 0x1000;
	phy_reg_data0 ^= 0x1000;	/* invert auto_neg */
2024
	write_smi_reg(mp, 0, phy_reg_data0);
L
Linus Torvalds 已提交
2025

2026
	read_smi_reg(mp, 0, &phy_reg_data0);
2027 2028
	if ((phy_reg_data0 & 0x1000) == auto_neg)
		return -ENODEV;				/* change didn't take */
L
Linus Torvalds 已提交
2029

2030
	phy_reg_data0 ^= 0x1000;
2031
	write_smi_reg(mp, 0, phy_reg_data0);
2032
	return 0;
L
Linus Torvalds 已提交
2033 2034
}

2035 2036 2037
static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
				     int speed, int duplex,
				     struct ethtool_cmd *cmd)
2038
{
2039
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2040

2041
	memset(cmd, 0, sizeof(*cmd));
2042

2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	cmd->port = PORT_MII;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->phy_address = phy_address;

	if (speed == 0) {
		cmd->autoneg = AUTONEG_ENABLE;
		/* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
		cmd->speed = SPEED_100;
		cmd->advertising = ADVERTISED_10baseT_Half  |
				   ADVERTISED_10baseT_Full  |
				   ADVERTISED_100baseT_Half |
				   ADVERTISED_100baseT_Full;
		if (mp->mii.supports_gmii)
			cmd->advertising |= ADVERTISED_1000baseT_Full;
	} else {
		cmd->autoneg = AUTONEG_DISABLE;
		cmd->speed = speed;
		cmd->duplex = duplex;
	}
2062 2063
}

2064
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2065
{
2066 2067
	struct mv643xx_eth_platform_data *pd;
	int port_num;
2068
	struct mv643xx_eth_private *mp;
2069 2070 2071 2072 2073 2074 2075 2076
	struct net_device *dev;
	u8 *p;
	struct resource *res;
	int err;
	struct ethtool_cmd cmd;
	int duplex = DUPLEX_HALF;
	int speed = 0;			/* default to auto-negotiation */
	DECLARE_MAC_BUF(mac);
L
Linus Torvalds 已提交
2077

2078 2079 2080 2081 2082
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
		printk(KERN_ERR "No mv643xx_eth_platform_data\n");
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2083

2084 2085 2086 2087
	if (pd->shared == NULL) {
		printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
		return -ENODEV;
	}
2088

2089
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2090 2091
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2092

2093
	platform_set_drvdata(pdev, dev);
L
Linus Torvalds 已提交
2094

2095 2096
	mp = netdev_priv(dev);
	mp->dev = dev;
2097 2098
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2099
#endif
L
Linus Torvalds 已提交
2100

2101 2102 2103
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2104

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->hard_start_xmit = mv643xx_eth_start_xmit;
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;

	/* No need to Tx Timeout */
	dev->tx_timeout = mv643xx_eth_tx_timeout;

#ifdef CONFIG_NET_POLL_CONTROLLER
2115
	dev->poll_controller = mv643xx_eth_netpoll;
2116 2117 2118 2119 2120 2121
#endif

	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
	dev->change_mtu = mv643xx_eth_change_mtu;
	dev->do_ioctl = mv643xx_eth_do_ioctl;
2122
	SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
L
Linus Torvalds 已提交
2123

2124
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2125
#ifdef MAX_SKB_FRAGS
2126
	/*
2127 2128
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2129
	 */
2130 2131 2132
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
#endif
L
Linus Torvalds 已提交
2133

2134 2135
	/* Configure the timeout task */
	INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
L
Linus Torvalds 已提交
2136

2137
	spin_lock_init(&mp->lock);
L
Linus Torvalds 已提交
2138

2139 2140
	mp->shared = platform_get_drvdata(pd->shared);
	port_num = mp->port_num = pd->port_number;
2141

2142 2143
	if (mp->shared->win_protect)
		wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2144

2145 2146 2147 2148 2149
	mp->shared_smi = mp->shared;
	if (pd->shared_smi != NULL)
		mp->shared_smi = platform_get_drvdata(pd->shared_smi);

	/* set default config values */
2150
	uc_addr_get(mp, dev->dev_addr);
2151 2152 2153 2154 2155

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);

	if (pd->phy_addr || pd->force_phy_addr)
2156
		phy_addr_set(mp, pd->phy_addr);
2157

2158
	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2159
	if (pd->rx_queue_size)
2160
		mp->default_rx_ring_size = pd->rx_queue_size;
L
Linus Torvalds 已提交
2161

2162
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2163
	if (pd->tx_queue_size)
2164
		mp->default_tx_ring_size = pd->tx_queue_size;
L
Linus Torvalds 已提交
2165

2166
	if (pd->tx_sram_size) {
2167 2168
		mp->tx_desc_sram_size = pd->tx_sram_size;
		mp->tx_desc_sram_addr = pd->tx_sram_addr;
2169
	}
L
Linus Torvalds 已提交
2170

2171
	if (pd->rx_sram_size) {
2172 2173
		mp->rx_desc_sram_addr = pd->rx_sram_addr;
		mp->rx_desc_sram_size = pd->rx_sram_size;
2174
	}
L
Linus Torvalds 已提交
2175

2176 2177
	duplex = pd->duplex;
	speed = pd->speed;
L
Linus Torvalds 已提交
2178

2179 2180
	/* Hook up MII support for ethtool */
	mp->mii.dev = dev;
2181 2182
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2183
	mp->mii.phy_id = phy_addr_get(mp);
2184 2185
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
L
Linus Torvalds 已提交
2186

2187
	err = phy_detect(mp);
2188 2189
	if (err) {
		pr_debug("%s: No PHY detected at addr %d\n",
2190
				dev->name, phy_addr_get(mp));
2191 2192
		goto out;
	}
L
Linus Torvalds 已提交
2193

2194
	phy_reset(mp);
2195 2196
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
	mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
2197
	mv643xx_eth_update_pscr(mp, &cmd);
2198
	mv643xx_eth_set_settings(dev, &cmd);
2199

2200 2201 2202 2203
	SET_NETDEV_DEV(dev, &pdev->dev);
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2204

2205 2206 2207 2208
	p = dev->dev_addr;
	printk(KERN_NOTICE
		"%s: port %d with MAC address %s\n",
		dev->name, port_num, print_mac(mac, p));
L
Linus Torvalds 已提交
2209

2210 2211
	if (dev->features & NETIF_F_SG)
		printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
L
Linus Torvalds 已提交
2212

2213 2214 2215
	if (dev->features & NETIF_F_IP_CSUM)
		printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
								dev->name);
L
Linus Torvalds 已提交
2216

2217
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2218 2219
	printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
#endif
L
Linus Torvalds 已提交
2220

2221
#ifdef MV643XX_ETH_COAL
2222 2223 2224
	printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
								dev->name);
#endif
L
Linus Torvalds 已提交
2225

2226
#ifdef MV643XX_ETH_NAPI
2227 2228
	printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
#endif
L
Linus Torvalds 已提交
2229

2230
	if (mp->tx_desc_sram_size > 0)
2231
		printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
L
Linus Torvalds 已提交
2232

2233
	return 0;
L
Linus Torvalds 已提交
2234

2235 2236
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2237

2238
	return err;
L
Linus Torvalds 已提交
2239 2240
}

2241
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2242
{
2243
	struct net_device *dev = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2244

2245 2246 2247 2248 2249 2250
	unregister_netdev(dev);
	flush_scheduled_work();

	free_netdev(dev);
	platform_set_drvdata(pdev, NULL);
	return 0;
L
Linus Torvalds 已提交
2251 2252
}

2253
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2254
{
2255
	struct net_device *dev = platform_get_drvdata(pdev);
2256
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2257
	unsigned int port_num = mp->port_num;
2258

2259
	/* Mask all interrupts on ethernet port */
2260 2261
	wrl(mp, INT_MASK(port_num), 0);
	rdl(mp, INT_MASK(port_num));
2262

2263
	port_reset(mp);
2264 2265
}

2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
static struct platform_driver mv643xx_eth_driver = {
	.probe = mv643xx_eth_probe,
	.remove = mv643xx_eth_remove,
	.shutdown = mv643xx_eth_shutdown,
	.driver = {
		.name = MV643XX_ETH_NAME,
		.owner	= THIS_MODULE,
	},
};

2276
static int __init mv643xx_eth_init_module(void)
2277
{
2278
	int rc;
2279

2280 2281 2282 2283 2284 2285 2286
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
	return rc;
2287 2288
}

2289
static void __exit mv643xx_eth_cleanup_module(void)
2290
{
2291 2292
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2293 2294
}

2295 2296
module_init(mv643xx_eth_init_module);
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2297

2298 2299 2300 2301 2302 2303
MODULE_LICENSE("GPL");
MODULE_AUTHOR(	"Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
		" and Dale Farnsworth");
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);