mv643xx_eth.c 66.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57 58
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.0";
59

60 61 62 63
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
#undef	MV643XX_ETH_COAL
64

65 66 67
#define MV643XX_ETH_TX_COAL 100
#ifdef MV643XX_ETH_COAL
#define MV643XX_ETH_RX_COAL 100
68 69
#endif

70
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

#define ETH_VLAN_HLEN		4
#define ETH_FCS_LEN		4
#define ETH_HW_IP_ALIGN		2		/* hw aligns IP header */
#define ETH_WRAPPER_LEN		(ETH_HW_IP_ALIGN + ETH_HLEN + \
					ETH_VLAN_HLEN + ETH_FCS_LEN)
#define ETH_RX_SKB_SIZE		(dev->mtu + ETH_WRAPPER_LEN + \
					dma_get_cache_alignment())

/*
 * Registers shared between all ports.
 */
87 88 89 90 91 92 93
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
94 95 96 97

/*
 * Per-port registers.
 */
98
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
99
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
100 101 102 103 104 105
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
106
#define  TX_FIFO_EMPTY			0x00000400
107 108 109
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
110 111
#define  INT_RX				0x00000804
#define  INT_EXT			0x00000002
112
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
113 114 115 116 117
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
#define  INT_EXT_TX			0x00000101
118 119 120 121 122 123 124 125 126 127
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
#define RXQ_CURRENT_DESC_PTR(p)		(0x060c + ((p) << 10))
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
#define TXQ_CURRENT_DESC_PTR(p)		(0x06c0 + ((p) << 10))
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
128

129 130 131 132

/*
 * SDMA configuration register.
 */
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
#define RX_BURST_SIZE_4_64BIT		(2 << 1)
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
#define TX_BURST_SIZE_4_64BIT		(2 << 22)

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		TX_BURST_SIZE_4_64BIT
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
		TX_BURST_SIZE_4_64BIT
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

152 153 154 155 156 157 158

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
159 160 161
#define MAX_RX_PACKET_1522BYTE			(1 << 17)
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
#define MAX_RX_PACKET_MASK			(7 << 17)
162 163 164 165 166 167 168
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
169

170 171
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
172 173

/* SMI reg */
174 175 176 177
#define SMI_BUSY		0x10000000	/* 0 - Write, 1 - Read	*/
#define SMI_READ_VALID		0x08000000	/* 0 - Write, 1 - Read	*/
#define SMI_OPCODE_WRITE	0		/* Completion of Read	*/
#define SMI_OPCODE_READ		0x04000000	/* Operation is in progress */
178 179 180

/* typedefs */

181
typedef enum _func_ret_status {
182 183 184 185 186 187
	ETH_OK,			/* Returned as expected.		*/
	ETH_ERROR,		/* Fundamental error.			*/
	ETH_RETRY,		/* Could not process request. Try later.*/
	ETH_END_OF_JOB,		/* Ring has nothing to process.		*/
	ETH_QUEUE_FULL,		/* Ring resource error.			*/
	ETH_QUEUE_LAST_RESOURCE	/* Ring resources about to exhaust.	*/
188
} FUNC_RET_STATUS;
189

190 191
/*
 * RX/TX descriptors.
192 193
 */
#if defined(__BIG_ENDIAN)
194
struct rx_desc {
195 196 197 198 199 200 201
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

202
struct tx_desc {
203 204 205 206 207 208 209
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
210
struct rx_desc {
211 212 213 214 215 216 217
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

218
struct tx_desc {
219 220 221 222 223 224 225 226 227 228
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

229
/* RX & TX descriptor command */
230
#define BUFFER_OWNED_BY_DMA		0x80000000
231 232

/* RX & TX descriptor status */
233
#define ERROR_SUMMARY			0x00000001
234 235

/* RX descriptor status */
236 237 238 239
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
240 241

/* TX descriptor command */
242 243 244 245 246 247 248 249
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
250

251
#define TX_IHL_SHIFT			11
252 253


254 255 256 257 258 259 260 261 262 263
/* Unified struct for Rx and Tx operations. The user is not required to	*/
/* be familier with neither Tx nor Rx descriptors.			*/
struct pkt_info {
	unsigned short byte_cnt;	/* Descriptor buffer byte count	*/
	unsigned short l4i_chk;		/* Tx CPU provided TCP Checksum	*/
	unsigned int cmd_sts;		/* Descriptor command status	*/
	dma_addr_t buf_ptr;		/* Descriptor buffer pointer	*/
	struct sk_buff *return_info;	/* User resource return information */
};

264 265

/* global *******************************************************************/
266
struct mv643xx_eth_shared_private {
267
	void __iomem *base;
268 269 270 271 272 273 274 275 276 277 278

	/* used to protect SMI_REG, which is shared across ports */
	spinlock_t phy_lock;

	u32 win_protect;

	unsigned int t_clk;
};


/* per-port *****************************************************************/
279
struct mib_counters {
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

312 313
struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
314 315
	int port_num;			/* User Ethernet port number	*/

316
	struct mv643xx_eth_shared_private *shared_smi;
317

318 319 320 321 322 323 324 325
	u32 rx_sram_addr;		/* Base address of rx sram area */
	u32 rx_sram_size;		/* Size of rx sram area		*/
	u32 tx_sram_addr;		/* Base address of tx sram area */
	u32 tx_sram_size;		/* Size of tx sram area		*/

	/* Tx/Rx rings managment indexes fields. For driver use */

	/* Next available and first returning Rx resource */
326
	int rx_curr_desc, rx_used_desc;
327 328

	/* Next available and first returning Tx resource */
329
	int tx_curr_desc, tx_used_desc;
330

331
#ifdef MV643XX_ETH_TX_FAST_REFILL
332 333 334
	u32 tx_clean_threshold;
#endif

335
	struct rx_desc *rx_desc_area;
336 337 338 339
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

340
	struct tx_desc *tx_desc_area;
341 342 343 344 345 346 347 348 349
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;

	struct work_struct tx_timeout_task;

	struct net_device *dev;
	struct napi_struct napi;
	struct net_device_stats stats;
350
	struct mib_counters mib_counters;
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	spinlock_t lock;
	/* Size of Tx Ring per queue */
	int tx_ring_size;
	/* Number of tx descriptors in use */
	int tx_desc_count;
	/* Size of Rx Ring per queue */
	int rx_ring_size;
	/* Number of rx descriptors in use */
	int rx_desc_count;

	/*
	 * Used in case RX Ring is empty, which can be caused when
	 * system does not have resources (skb's)
	 */
	struct timer_list timeout;

	u32 rx_int_coal;
	u32 tx_int_coal;
	struct mii_if_info mii;
};
L
Linus Torvalds 已提交
371

372

373
/* port register accessors **************************************************/
374
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
375
{
376
	return readl(mp->shared->base + offset);
377
}
378

379
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
380
{
381
	writel(data, mp->shared->base + offset);
382
}
383 384


385
/* rxq/txq helper functions *************************************************/
386
static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private *mp,
387 388
					unsigned int queues)
{
389
	wrl(mp, RXQ_COMMAND(mp->port_num), queues);
390
}
391

392
static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private *mp)
393 394 395
{
	unsigned int port_num = mp->port_num;
	u32 queues;
396

397
	/* Stop Rx port activity. Check port Rx activity. */
398
	queues = rdl(mp, RXQ_COMMAND(port_num)) & 0xFF;
399 400
	if (queues) {
		/* Issue stop command for active queues only */
401
		wrl(mp, RXQ_COMMAND(port_num), (queues << 8));
L
Linus Torvalds 已提交
402

403 404
		/* Wait for all Rx activity to terminate. */
		/* Check port cause register that all Rx queues are stopped */
405
		while (rdl(mp, RXQ_COMMAND(port_num)) & 0xFF)
406
			udelay(10);
407
	}
L
Linus Torvalds 已提交
408

409 410 411
	return queues;
}

412
static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp,
413
					unsigned int queues)
L
Linus Torvalds 已提交
414
{
415
	wrl(mp, TXQ_COMMAND(mp->port_num), queues);
L
Linus Torvalds 已提交
416 417
}

418
static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
419
{
420 421 422 423
	unsigned int port_num = mp->port_num;
	u32 queues;

	/* Stop Tx port activity. Check port Tx activity. */
424
	queues = rdl(mp, TXQ_COMMAND(port_num)) & 0xFF;
425 426
	if (queues) {
		/* Issue stop command for active queues only */
427
		wrl(mp, TXQ_COMMAND(port_num), (queues << 8));
428 429 430

		/* Wait for all Tx activity to terminate. */
		/* Check port cause register that all Tx queues are stopped */
431
		while (rdl(mp, TXQ_COMMAND(port_num)) & 0xFF)
432
			udelay(10);
433 434

		/* Wait for Tx FIFO to empty */
435
		while (rdl(mp, PORT_STATUS(port_num)) & TX_FIFO_EMPTY)
436
			udelay(10);
437 438 439
	}

	return queues;
L
Linus Torvalds 已提交
440 441
}

442 443 444 445

/* rx ***********************************************************************/
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);

446
static FUNC_RET_STATUS rx_return_buff(struct mv643xx_eth_private *mp,
447
						struct pkt_info *pkt_info)
L
Linus Torvalds 已提交
448
{
449
	int used_rx_desc;	/* Where to return Rx resource */
450
	volatile struct rx_desc *rx_desc;
451
	unsigned long flags;
L
Linus Torvalds 已提交
452

453
	spin_lock_irqsave(&mp->lock, flags);
454

455
	/* Get 'used' Rx descriptor */
456 457
	used_rx_desc = mp->rx_used_desc;
	rx_desc = &mp->rx_desc_area[used_rx_desc];
L
Linus Torvalds 已提交
458

459 460 461
	rx_desc->buf_ptr = pkt_info->buf_ptr;
	rx_desc->buf_size = pkt_info->byte_cnt;
	mp->rx_skb[used_rx_desc] = pkt_info->return_info;
462 463 464 465 466

	/* Flush the write pipe */

	/* Return the descriptor to DMA ownership */
	wmb();
467
	rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
468 469 470
	wmb();

	/* Move the used descriptor pointer to the next descriptor */
471
	mp->rx_used_desc = (used_rx_desc + 1) % mp->rx_ring_size;
472 473 474 475

	spin_unlock_irqrestore(&mp->lock, flags);

	return ETH_OK;
L
Linus Torvalds 已提交
476 477
}

478
static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
L
Linus Torvalds 已提交
479
{
480
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
481 482
	struct pkt_info pkt_info;
	struct sk_buff *skb;
483
	int unaligned;
L
Linus Torvalds 已提交
484

485
	while (mp->rx_desc_count < mp->rx_ring_size) {
R
Ralf Baechle 已提交
486
		skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
L
Linus Torvalds 已提交
487 488
		if (!skb)
			break;
489
		mp->rx_desc_count++;
R
Ralf Baechle 已提交
490
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
491
		if (unaligned)
R
Ralf Baechle 已提交
492
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
493
		pkt_info.cmd_sts = RX_ENABLE_INTERRUPT;
494 495 496
		pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
		pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
					ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
497
		pkt_info.return_info = skb;
498
		if (rx_return_buff(mp, &pkt_info) != ETH_OK) {
L
Linus Torvalds 已提交
499 500 501 502
			printk(KERN_ERR
				"%s: Error allocating RX Ring\n", dev->name);
			break;
		}
503
		skb_reserve(skb, ETH_HW_IP_ALIGN);
L
Linus Torvalds 已提交
504 505 506
	}
	/*
	 * If RX ring is empty of SKB, set a timer to try allocating
507
	 * again at a later time.
L
Linus Torvalds 已提交
508
	 */
509
	if (mp->rx_desc_count == 0) {
L
Linus Torvalds 已提交
510
		printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
511
		mp->timeout.expires = jiffies + (HZ / 10);	/* 100 mSec */
L
Linus Torvalds 已提交
512 513 514 515
		add_timer(&mp->timeout);
	}
}

516
static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
517
{
518
	mv643xx_eth_rx_refill_descs((struct net_device *)data);
L
Linus Torvalds 已提交
519 520
}

521
static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
L
Linus Torvalds 已提交
522
{
523 524 525
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	unsigned int received_packets = 0;
L
Linus Torvalds 已提交
526

527 528 529 530 531
	while (budget-- > 0) {
		struct sk_buff *skb;
		volatile struct rx_desc *rx_desc;
		unsigned int cmd_sts;
		unsigned long flags;
532

533
		spin_lock_irqsave(&mp->lock, flags);
534

535
		rx_desc = &mp->rx_desc_area[mp->rx_curr_desc];
L
Linus Torvalds 已提交
536

537 538 539 540 541 542
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
543

544 545
		skb = mp->rx_skb[mp->rx_curr_desc];
		mp->rx_skb[mp->rx_curr_desc] = NULL;
546

547
		mp->rx_curr_desc = (mp->rx_curr_desc + 1) % mp->rx_ring_size;
548

549
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
550

551 552
		dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN,
					ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
553
		mp->rx_desc_count--;
L
Linus Torvalds 已提交
554
		received_packets++;
555

556 557 558 559
		/*
		 * Update statistics.
		 * Note byte count includes 4 byte CRC count
		 */
L
Linus Torvalds 已提交
560
		stats->rx_packets++;
561 562
		stats->rx_bytes += rx_desc->byte_cnt - ETH_HW_IP_ALIGN;

L
Linus Torvalds 已提交
563 564 565 566
		/*
		 * In case received a packet without first / last bits on OR
		 * the error summary bit is on, the packets needs to be dropeed.
		 */
567
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
568
					(RX_FIRST_DESC | RX_LAST_DESC))
569
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
570
			stats->rx_dropped++;
571
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
572
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
573 574 575 576 577 578
				if (net_ratelimit())
					printk(KERN_ERR
						"%s: Received packet spread "
						"on multiple descriptors\n",
						dev->name);
			}
579
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
580 581 582 583 584 585 586 587
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
588
			skb_put(skb, rx_desc->byte_cnt - ETH_HW_IP_ALIGN - 4);
L
Linus Torvalds 已提交
589

590
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
591 592
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
593
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
594 595
			}
			skb->protocol = eth_type_trans(skb, dev);
596
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
597 598 599 600 601
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
602
		dev->last_rx = jiffies;
L
Linus Torvalds 已提交
603
	}
604
	mv643xx_eth_rx_refill_descs(dev);	/* Fill RX ring with skb's */
L
Linus Torvalds 已提交
605 606 607 608

	return received_packets;
}

609 610
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
611
{
612
	struct mv643xx_eth_private *mp = container_of(napi, struct mv643xx_eth_private, napi);
613 614 615
	struct net_device *dev = mp->dev;
	unsigned int port_num = mp->port_num;
	int work_done;
616

617
#ifdef MV643XX_ETH_TX_FAST_REFILL
618 619 620
	if (++mp->tx_clean_threshold > 5) {
		mv643xx_eth_free_completed_tx_descs(dev);
		mp->tx_clean_threshold = 0;
621
	}
622
#endif
623

624
	work_done = 0;
625
	if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num)))
626
	    != (u32) mp->rx_used_desc)
627
		work_done = mv643xx_eth_receive_queue(dev, budget);
628

629 630
	if (work_done < budget) {
		netif_rx_complete(dev, napi);
631 632
		wrl(mp, INT_CAUSE(port_num), 0);
		wrl(mp, INT_CAUSE_EXT(port_num), 0);
633
		wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
634
	}
635 636

	return work_done;
637
}
638
#endif
639

640 641 642

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
643
{
644 645
	unsigned int frag;
	skb_frag_t *fragp;
L
Linus Torvalds 已提交
646

647 648 649 650
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
		fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 0x7)
			return 1;
L
Linus Torvalds 已提交
651
	}
652 653
	return 0;
}
654

655
static int alloc_tx_desc_index(struct mv643xx_eth_private *mp)
656 657
{
	int tx_desc_curr;
658

659
	BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
L
Linus Torvalds 已提交
660

661 662
	tx_desc_curr = mp->tx_curr_desc;
	mp->tx_curr_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
663

664
	BUG_ON(mp->tx_curr_desc == mp->tx_used_desc);
665

666 667
	return tx_desc_curr;
}
668

669
static void tx_fill_frag_descs(struct mv643xx_eth_private *mp,
670 671 672 673
				   struct sk_buff *skb)
{
	int frag;
	int tx_index;
674
	struct tx_desc *desc;
L
Linus Torvalds 已提交
675

676 677 678
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
		skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];

679
		tx_index = alloc_tx_desc_index(mp);
680
		desc = &mp->tx_desc_area[tx_index];
681

682
		desc->cmd_sts = BUFFER_OWNED_BY_DMA;
683 684
		/* Last Frag enables interrupt and frees the skb */
		if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
685 686 687
			desc->cmd_sts |= ZERO_PADDING |
					 TX_LAST_DESC |
					 TX_ENABLE_INTERRUPT;
688 689 690 691
			mp->tx_skb[tx_index] = skb;
		} else
			mp->tx_skb[tx_index] = NULL;

692
		desc = &mp->tx_desc_area[tx_index];
693 694 695 696 697 698 699
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
700 701
}

702 703 704 705
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
706

707
static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp,
708
					struct sk_buff *skb)
L
Linus Torvalds 已提交
709
{
710
	int tx_index;
711
	struct tx_desc *desc;
712 713 714
	u32 cmd_sts;
	int length;
	int nr_frags = skb_shinfo(skb)->nr_frags;
L
Linus Torvalds 已提交
715

716
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
717

718
	tx_index = alloc_tx_desc_index(mp);
719
	desc = &mp->tx_desc_area[tx_index];
720 721

	if (nr_frags) {
722
		tx_fill_frag_descs(mp, skb);
723 724 725 726

		length = skb_headlen(skb);
		mp->tx_skb[tx_index] = NULL;
	} else {
727
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
728 729 730 731 732 733 734 735 736 737
		length = skb->len;
		mp->tx_skb[tx_index] = skb;
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		BUG_ON(skb->protocol != htons(ETH_P_IP));

738 739 740
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
741 742 743

		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
744
			cmd_sts |= UDP_FRAME;
745 746 747 748 749 750 751 752 753 754
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
755
		cmd_sts |= 5 << TX_IHL_SHIFT;
756 757 758 759 760 761 762 763 764
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

	/* ensure all descriptors are written before poking hardware */
	wmb();
765
	mv643xx_eth_port_enable_tx(mp, 1);
766 767

	mp->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
768 769
}

770
static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
771
{
772
	struct mv643xx_eth_private *mp = netdev_priv(dev);
773 774
	struct net_device_stats *stats = &dev->stats;
	unsigned long flags;
775

776
	BUG_ON(netif_queue_stopped(dev));
777

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
		printk(KERN_DEBUG "%s: failed to linearize tiny "
				"unaligned fragment\n", dev->name);
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&mp->lock, flags);
		return NETDEV_TX_BUSY;
	}

794
	tx_submit_descs_for_skb(mp, skb);
795 796 797 798 799 800 801 802 803 804
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
805 806
}

807 808

/* mii management interface *************************************************/
809
static int phy_addr_get(struct mv643xx_eth_private *mp);
810

811
static void read_smi_reg(struct mv643xx_eth_private *mp,
812
				unsigned int phy_reg, unsigned int *value)
L
Linus Torvalds 已提交
813
{
814 815
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
	int phy_addr = phy_addr_get(mp);
816
	unsigned long flags;
L
Linus Torvalds 已提交
817 818
	int i;

819 820 821 822
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
823
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
824
		if (i == 1000) {
825 826 827
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
828
		udelay(10);
L
Linus Torvalds 已提交
829 830
	}

831
	writel((phy_addr << 16) | (phy_reg << 21) | SMI_OPCODE_READ, smi_reg);
L
Linus Torvalds 已提交
832

833
	/* now wait for the data to be valid */
834
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
835
		if (i == 1000) {
836 837 838
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
839
		udelay(10);
840 841 842 843 844
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
845 846
}

847
static void write_smi_reg(struct mv643xx_eth_private *mp,
848
				   unsigned int phy_reg, unsigned int value)
L
Linus Torvalds 已提交
849
{
850 851
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
	int phy_addr = phy_addr_get(mp);
852
	unsigned long flags;
L
Linus Torvalds 已提交
853 854
	int i;

855 856 857 858
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
859
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
860
		if (i == 1000) {
861 862 863
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
864
		udelay(10);
L
Linus Torvalds 已提交
865 866
	}

867
	writel((phy_addr << 16) | (phy_reg << 21) |
868
		SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
869 870 871
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
872

873 874

/* mib counters *************************************************************/
875
static void clear_mib_counters(struct mv643xx_eth_private *mp)
876 877 878 879 880
{
	unsigned int port_num = mp->port_num;
	int i;

	/* Perform dummy reads from MIB counters */
881
	for (i = 0; i < 0x80; i += 4)
882
		rdl(mp, MIB_COUNTERS(port_num) + i);
L
Linus Torvalds 已提交
883 884
}

885
static inline u32 read_mib(struct mv643xx_eth_private *mp, int offset)
886
{
887
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
888
}
889

890
static void update_mib_counters(struct mv643xx_eth_private *mp)
891
{
892
	struct mib_counters *p = &mp->mib_counters;
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

	p->good_octets_received += read_mib(mp, 0x00);
	p->good_octets_received += (u64)read_mib(mp, 0x04) << 32;
	p->bad_octets_received += read_mib(mp, 0x08);
	p->internal_mac_transmit_err += read_mib(mp, 0x0c);
	p->good_frames_received += read_mib(mp, 0x10);
	p->bad_frames_received += read_mib(mp, 0x14);
	p->broadcast_frames_received += read_mib(mp, 0x18);
	p->multicast_frames_received += read_mib(mp, 0x1c);
	p->frames_64_octets += read_mib(mp, 0x20);
	p->frames_65_to_127_octets += read_mib(mp, 0x24);
	p->frames_128_to_255_octets += read_mib(mp, 0x28);
	p->frames_256_to_511_octets += read_mib(mp, 0x2c);
	p->frames_512_to_1023_octets += read_mib(mp, 0x30);
	p->frames_1024_to_max_octets += read_mib(mp, 0x34);
	p->good_octets_sent += read_mib(mp, 0x38);
	p->good_octets_sent += (u64)read_mib(mp, 0x3c) << 32;
	p->good_frames_sent += read_mib(mp, 0x40);
	p->excessive_collision += read_mib(mp, 0x44);
	p->multicast_frames_sent += read_mib(mp, 0x48);
	p->broadcast_frames_sent += read_mib(mp, 0x4c);
	p->unrec_mac_control_received += read_mib(mp, 0x50);
	p->fc_sent += read_mib(mp, 0x54);
	p->good_fc_received += read_mib(mp, 0x58);
	p->bad_fc_received += read_mib(mp, 0x5c);
	p->undersize_received += read_mib(mp, 0x60);
	p->fragments_received += read_mib(mp, 0x64);
	p->oversize_received += read_mib(mp, 0x68);
	p->jabber_received += read_mib(mp, 0x6c);
	p->mac_receive_error += read_mib(mp, 0x70);
	p->bad_crc_event += read_mib(mp, 0x74);
	p->collision += read_mib(mp, 0x78);
	p->late_collision += read_mib(mp, 0x7c);
926 927
}

928 929

/* ethtool ******************************************************************/
930
struct mv643xx_eth_stats {
931 932 933 934 935
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
	int stat_offset;
};

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
#define MV643XX_ETH_STAT(m) FIELD_SIZEOF(struct mv643xx_eth_private, m), \
					offsetof(struct mv643xx_eth_private, m)

static const struct mv643xx_eth_stats mv643xx_eth_gstrings_stats[] = {
	{ "rx_packets", MV643XX_ETH_STAT(stats.rx_packets) },
	{ "tx_packets", MV643XX_ETH_STAT(stats.tx_packets) },
	{ "rx_bytes", MV643XX_ETH_STAT(stats.rx_bytes) },
	{ "tx_bytes", MV643XX_ETH_STAT(stats.tx_bytes) },
	{ "rx_errors", MV643XX_ETH_STAT(stats.rx_errors) },
	{ "tx_errors", MV643XX_ETH_STAT(stats.tx_errors) },
	{ "rx_dropped", MV643XX_ETH_STAT(stats.rx_dropped) },
	{ "tx_dropped", MV643XX_ETH_STAT(stats.tx_dropped) },
	{ "good_octets_received", MV643XX_ETH_STAT(mib_counters.good_octets_received) },
	{ "bad_octets_received", MV643XX_ETH_STAT(mib_counters.bad_octets_received) },
	{ "internal_mac_transmit_err", MV643XX_ETH_STAT(mib_counters.internal_mac_transmit_err) },
	{ "good_frames_received", MV643XX_ETH_STAT(mib_counters.good_frames_received) },
	{ "bad_frames_received", MV643XX_ETH_STAT(mib_counters.bad_frames_received) },
	{ "broadcast_frames_received", MV643XX_ETH_STAT(mib_counters.broadcast_frames_received) },
	{ "multicast_frames_received", MV643XX_ETH_STAT(mib_counters.multicast_frames_received) },
	{ "frames_64_octets", MV643XX_ETH_STAT(mib_counters.frames_64_octets) },
	{ "frames_65_to_127_octets", MV643XX_ETH_STAT(mib_counters.frames_65_to_127_octets) },
	{ "frames_128_to_255_octets", MV643XX_ETH_STAT(mib_counters.frames_128_to_255_octets) },
	{ "frames_256_to_511_octets", MV643XX_ETH_STAT(mib_counters.frames_256_to_511_octets) },
	{ "frames_512_to_1023_octets", MV643XX_ETH_STAT(mib_counters.frames_512_to_1023_octets) },
	{ "frames_1024_to_max_octets", MV643XX_ETH_STAT(mib_counters.frames_1024_to_max_octets) },
	{ "good_octets_sent", MV643XX_ETH_STAT(mib_counters.good_octets_sent) },
	{ "good_frames_sent", MV643XX_ETH_STAT(mib_counters.good_frames_sent) },
	{ "excessive_collision", MV643XX_ETH_STAT(mib_counters.excessive_collision) },
	{ "multicast_frames_sent", MV643XX_ETH_STAT(mib_counters.multicast_frames_sent) },
	{ "broadcast_frames_sent", MV643XX_ETH_STAT(mib_counters.broadcast_frames_sent) },
	{ "unrec_mac_control_received", MV643XX_ETH_STAT(mib_counters.unrec_mac_control_received) },
	{ "fc_sent", MV643XX_ETH_STAT(mib_counters.fc_sent) },
	{ "good_fc_received", MV643XX_ETH_STAT(mib_counters.good_fc_received) },
	{ "bad_fc_received", MV643XX_ETH_STAT(mib_counters.bad_fc_received) },
	{ "undersize_received", MV643XX_ETH_STAT(mib_counters.undersize_received) },
	{ "fragments_received", MV643XX_ETH_STAT(mib_counters.fragments_received) },
	{ "oversize_received", MV643XX_ETH_STAT(mib_counters.oversize_received) },
	{ "jabber_received", MV643XX_ETH_STAT(mib_counters.jabber_received) },
	{ "mac_receive_error", MV643XX_ETH_STAT(mib_counters.mac_receive_error) },
	{ "bad_crc_event", MV643XX_ETH_STAT(mib_counters.bad_crc_event) },
	{ "collision", MV643XX_ETH_STAT(mib_counters.collision) },
	{ "late_collision", MV643XX_ETH_STAT(mib_counters.late_collision) },
978 979
};

980
#define MV643XX_ETH_STATS_LEN	ARRAY_SIZE(mv643xx_eth_gstrings_stats)
981

982
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
983
{
984
	struct mv643xx_eth_private *mp = netdev_priv(dev);
985 986 987 988 989 990 991 992 993 994 995 996 997
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

	/* The PHY may support 1000baseT_Half, but the mv643xx does not */
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

998
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
999
{
1000
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1001 1002
	int err;

1003 1004 1005
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
1006

1007 1008
	return err;
}
L
Linus Torvalds 已提交
1009

1010
static void mv643xx_eth_get_drvinfo(struct net_device *netdev,
1011 1012
				struct ethtool_drvinfo *drvinfo)
{
1013 1014
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1015 1016
	strncpy(drvinfo->fw_version, "N/A", 32);
	strncpy(drvinfo->bus_info, "mv643xx", 32);
1017
	drvinfo->n_stats = MV643XX_ETH_STATS_LEN;
1018
}
L
Linus Torvalds 已提交
1019

1020 1021
static int mv643xx_eth_nway_restart(struct net_device *dev)
{
1022
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1023

1024 1025
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
1026

1027 1028
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1029
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1030

1031 1032
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
1033

1034
static void mv643xx_eth_get_strings(struct net_device *netdev, uint32_t stringset,
1035 1036 1037
				uint8_t *data)
{
	int i;
L
Linus Torvalds 已提交
1038

1039 1040
	switch(stringset) {
	case ETH_SS_STATS:
1041
		for (i=0; i < MV643XX_ETH_STATS_LEN; i++) {
1042
			memcpy(data + i * ETH_GSTRING_LEN,
1043 1044
				mv643xx_eth_gstrings_stats[i].stat_string,
				ETH_GSTRING_LEN);
1045 1046 1047 1048
		}
		break;
	}
}
L
Linus Torvalds 已提交
1049

1050
static void mv643xx_eth_get_ethtool_stats(struct net_device *netdev,
1051 1052
				struct ethtool_stats *stats, uint64_t *data)
{
1053
	struct mv643xx_eth_private *mp = netdev->priv;
1054
	int i;
L
Linus Torvalds 已提交
1055

1056
	update_mib_counters(mp);
L
Linus Torvalds 已提交
1057

1058 1059 1060
	for (i = 0; i < MV643XX_ETH_STATS_LEN; i++) {
		char *p = (char *)mp+mv643xx_eth_gstrings_stats[i].stat_offset;
		data[i] = (mv643xx_eth_gstrings_stats[i].sizeof_stat ==
1061
			sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1062
	}
1063
}
L
Linus Torvalds 已提交
1064

1065
static int mv643xx_eth_get_sset_count(struct net_device *netdev, int sset)
1066 1067 1068
{
	switch (sset) {
	case ETH_SS_STATS:
1069
		return MV643XX_ETH_STATS_LEN;
1070 1071 1072 1073
	default:
		return -EOPNOTSUPP;
	}
}
L
Linus Torvalds 已提交
1074

1075 1076 1077 1078
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
	.get_settings           = mv643xx_eth_get_settings,
	.set_settings           = mv643xx_eth_set_settings,
	.get_drvinfo            = mv643xx_eth_get_drvinfo,
1079 1080
	.get_link               = mv643xx_eth_get_link,
	.set_sg			= ethtool_op_set_sg,
1081 1082 1083
	.get_sset_count		= mv643xx_eth_get_sset_count,
	.get_ethtool_stats      = mv643xx_eth_get_ethtool_stats,
	.get_strings            = mv643xx_eth_get_strings,
1084 1085
	.nway_reset		= mv643xx_eth_nway_restart,
};
L
Linus Torvalds 已提交
1086

1087

1088
/* address handling *********************************************************/
1089
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1090 1091 1092 1093
{
	unsigned int port_num = mp->port_num;
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1094

1095 1096
	mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
L
Linus Torvalds 已提交
1097

1098 1099 1100 1101 1102 1103
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1104
}
L
Linus Torvalds 已提交
1105

1106
static void init_mac_tables(struct mv643xx_eth_private *mp)
1107 1108 1109
{
	unsigned int port_num = mp->port_num;
	int table_index;
L
Linus Torvalds 已提交
1110

1111 1112
	/* Clear DA filter unicast table (Ex_dFUT) */
	for (table_index = 0; table_index <= 0xC; table_index += 4)
1113
		wrl(mp, UNICAST_TABLE(port_num) + table_index, 0);
L
Linus Torvalds 已提交
1114

1115 1116
	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
		/* Clear DA filter special multicast table (Ex_dFSMT) */
1117
		wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1118
		/* Clear DA filter other multicast table (Ex_dFOMT) */
1119
		wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
1120 1121
	}
}
1122

1123
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
1124 1125 1126 1127 1128
					    int table, unsigned char entry)
{
	unsigned int table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;
1129

1130 1131
	tbl_offset = (entry / 4) * 4;	/* Register offset of DA table entry */
	reg_offset = entry % 4;		/* Entry offset within the register */
1132

1133 1134 1135 1136
	/* Set "accepts frame bit" at specified table entry */
	table_reg = rdl(mp, table + tbl_offset);
	table_reg |= 0x01 << (8 * reg_offset);
	wrl(mp, table + tbl_offset, table_reg);
L
Linus Torvalds 已提交
1137 1138
}

1139
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1140
{
1141 1142 1143 1144
	unsigned int port_num = mp->port_num;
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1145

1146 1147 1148
	mac_l = (addr[4] << 8) | (addr[5]);
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
							(addr[3] << 0);
1149

1150 1151
	wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
L
Linus Torvalds 已提交
1152

1153
	/* Accept frames with this address */
1154
	table = UNICAST_TABLE(port_num);
1155
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1156 1157
}

1158
static void mv643xx_eth_update_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
1159
{
1160
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1161

1162 1163
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
1164
}
L
Linus Torvalds 已提交
1165

1166
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1167
{
1168
	int i;
L
Linus Torvalds 已提交
1169

1170 1171 1172 1173
	for (i = 0; i < 6; i++)
		/* +2 is for the offset of the HW addr type */
		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
	mv643xx_eth_update_mac_address(dev);
L
Linus Torvalds 已提交
1174 1175 1176
	return 0;
}

1177
static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1178 1179
{
	unsigned int port_num = mp->port_num;
1180 1181 1182 1183 1184 1185 1186
	unsigned int mac_h;
	unsigned int mac_l;
	unsigned char crc_result = 0;
	int table;
	int mac_array[48];
	int crc[8];
	int i;
L
Linus Torvalds 已提交
1187

1188 1189
	if ((addr[0] == 0x01) && (addr[1] == 0x00) &&
	    (addr[2] == 0x5E) && (addr[3] == 0x00) && (addr[4] == 0x00)) {
1190
		table = SPECIAL_MCAST_TABLE(port_num);
1191
		set_filter_table_entry(mp, table, addr[5]);
1192
		return;
L
Linus Torvalds 已提交
1193 1194
	}

1195
	/* Calculate CRC-8 out of the given address */
1196 1197 1198
	mac_h = (addr[0] << 8) | (addr[1]);
	mac_l = (addr[2] << 24) | (addr[3] << 16) |
			(addr[4] << 8) | (addr[5] << 0);
L
Linus Torvalds 已提交
1199

1200 1201 1202 1203
	for (i = 0; i < 32; i++)
		mac_array[i] = (mac_l >> i) & 0x1;
	for (i = 32; i < 48; i++)
		mac_array[i] = (mac_h >> (i - 32)) & 0x1;
L
Linus Torvalds 已提交
1204

1205 1206 1207 1208 1209
	crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
		 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
		 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
		 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
		 mac_array[8]  ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[0];
L
Linus Torvalds 已提交
1210

1211 1212 1213 1214 1215 1216 1217
	crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
		 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
		 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
		 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
		 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
		 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
		 mac_array[9]  ^ mac_array[6]  ^ mac_array[1]  ^ mac_array[0];
1218

1219 1220 1221 1222 1223 1224
	crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
		 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
		 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
		 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
		 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^
		 mac_array[6]  ^ mac_array[2]  ^ mac_array[1]  ^ mac_array[0];
1225

1226 1227 1228 1229 1230 1231
	crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
		 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
		 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
		 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
		 mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[7]  ^
		 mac_array[3]  ^ mac_array[2]  ^ mac_array[1];
1232

1233 1234 1235 1236 1237 1238
	crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
		 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
		 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
		 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
		 mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^ mac_array[4]  ^
		 mac_array[3]  ^ mac_array[2];
1239

1240 1241 1242 1243 1244 1245
	crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
		 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
		 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
		 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
		 mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[5]  ^
		 mac_array[4]  ^ mac_array[3];
1246

1247 1248 1249 1250 1251 1252
	crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
		 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
		 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
		 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
		 mac_array[12] ^ mac_array[10] ^ mac_array[6]  ^ mac_array[5]  ^
		 mac_array[4];
1253

1254 1255 1256 1257 1258
	crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
		 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
		 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
		 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
		 mac_array[11] ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[5];
1259

1260 1261 1262
	for (i = 0; i < 8; i++)
		crc_result = crc_result | (crc[i] << i);

1263
	table = OTHER_MCAST_TABLE(port_num);
1264
	set_filter_table_entry(mp, table, crc_result);
1265 1266
}

1267
static void set_multicast_list(struct net_device *dev)
L
Linus Torvalds 已提交
1268 1269
{

1270 1271 1272
	struct dev_mc_list	*mc_list;
	int			i;
	int			table_index;
1273
	struct mv643xx_eth_private	*mp = netdev_priv(dev);
1274
	unsigned int		port_num = mp->port_num;
1275

1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	/* If the device is in promiscuous mode or in all multicast mode,
	 * we will fully populate both multicast tables with accept.
	 * This is guaranteed to yield a match on all multicast addresses...
	 */
	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
		for (table_index = 0; table_index <= 0xFC; table_index += 4) {
			/* Set all entries in DA filter special multicast
			 * table (Ex_dFSMT)
			 * Set for ETH_Q0 for now
			 * Bits
			 * 0	  Accept=1, Drop=0
			 * 3-1  Queue	 ETH_Q0=0
			 * 7-4  Reserved = 0;
			 */
1290
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0x01010101);
1291

1292 1293 1294 1295 1296 1297 1298 1299
			/* Set all entries in DA filter other multicast
			 * table (Ex_dFOMT)
			 * Set for ETH_Q0 for now
			 * Bits
			 * 0	  Accept=1, Drop=0
			 * 3-1  Queue	 ETH_Q0=0
			 * 7-4  Reserved = 0;
			 */
1300
			wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0x01010101);
1301 1302 1303
		}
		return;
	}
1304

1305 1306 1307 1308 1309
	/* We will clear out multicast tables every time we get the list.
	 * Then add the entire new list...
	 */
	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
		/* Clear DA filter special multicast table (Ex_dFSMT) */
1310
		wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1311 1312

		/* Clear DA filter other multicast table (Ex_dFOMT) */
1313
		wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
L
Linus Torvalds 已提交
1314 1315
	}

1316 1317 1318 1319 1320
	/* Get pointer to net_device multicast list and add each one... */
	for (i = 0, mc_list = dev->mc_list;
			(i < 256) && (mc_list != NULL) && (i < dev->mc_count);
			i++, mc_list = mc_list->next)
		if (mc_list->dmi_addrlen == 6)
1321
			mc_addr(mp, mc_list->dmi_addr);
1322 1323
}

1324
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1325
{
1326
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1327
	u32 config_reg;
L
Linus Torvalds 已提交
1328

1329
	config_reg = rdl(mp, PORT_CONFIG(mp->port_num));
1330
	if (dev->flags & IFF_PROMISC)
1331
		config_reg |= UNICAST_PROMISCUOUS_MODE;
1332
	else
1333
		config_reg &= ~UNICAST_PROMISCUOUS_MODE;
1334
	wrl(mp, PORT_CONFIG(mp->port_num), config_reg);
L
Linus Torvalds 已提交
1335

1336
	set_multicast_list(dev);
1337
}
1338 1339


1340
/* rx/tx queue initialisation ***********************************************/
1341
static void ether_init_rx_desc_ring(struct mv643xx_eth_private *mp)
1342
{
1343
	volatile struct rx_desc *p_rx_desc;
1344 1345 1346 1347
	int rx_desc_num = mp->rx_ring_size;
	int i;

	/* initialize the next_desc_ptr links in the Rx descriptors ring */
1348
	p_rx_desc = (struct rx_desc *)mp->rx_desc_area;
1349 1350
	for (i = 0; i < rx_desc_num; i++) {
		p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
1351
			((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1352 1353
	}

1354
	/* Save Rx desc pointer to driver struct. */
1355 1356
	mp->rx_curr_desc = 0;
	mp->rx_used_desc = 0;
L
Linus Torvalds 已提交
1357

1358
	mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1359
}
1360

1361 1362
static void mv643xx_eth_free_rx_rings(struct net_device *dev)
{
1363
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1364
	int curr;
1365

1366 1367 1368 1369 1370 1371 1372 1373
	/* Stop RX Queues */
	mv643xx_eth_port_disable_rx(mp);

	/* Free preallocated skb's on RX rings */
	for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
		if (mp->rx_skb[curr]) {
			dev_kfree_skb(mp->rx_skb[curr]);
			mp->rx_desc_count--;
L
Linus Torvalds 已提交
1374
		}
1375
	}
L
Linus Torvalds 已提交
1376

1377 1378 1379 1380 1381 1382 1383
	if (mp->rx_desc_count)
		printk(KERN_ERR
			"%s: Error in freeing Rx Ring. %d skb's still"
			" stuck in RX Ring - ignoring them\n", dev->name,
			mp->rx_desc_count);
	/* Free RX ring */
	if (mp->rx_sram_size)
1384
		iounmap(mp->rx_desc_area);
1385 1386
	else
		dma_free_coherent(NULL, mp->rx_desc_area_size,
1387
				mp->rx_desc_area, mp->rx_desc_dma);
1388
}
L
Linus Torvalds 已提交
1389

1390
static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
1391 1392
{
	int tx_desc_num = mp->tx_ring_size;
1393
	struct tx_desc *p_tx_desc;
1394
	int i;
L
Linus Torvalds 已提交
1395

1396
	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
1397
	p_tx_desc = (struct tx_desc *)mp->tx_desc_area;
1398 1399
	for (i = 0; i < tx_desc_num; i++) {
		p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
1400
			((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1401 1402
	}

1403 1404
	mp->tx_curr_desc = 0;
	mp->tx_used_desc = 0;
1405

1406
	mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1407
}
L
Linus Torvalds 已提交
1408

1409
static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
1410
{
1411
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1412
	struct tx_desc *desc;
1413 1414
	u32 cmd_sts;
	struct sk_buff *skb;
1415
	unsigned long flags;
1416 1417 1418 1419
	int tx_index;
	dma_addr_t addr;
	int count;
	int released = 0;
L
Linus Torvalds 已提交
1420

1421 1422
	while (mp->tx_desc_count > 0) {
		spin_lock_irqsave(&mp->lock, flags);
1423

1424 1425 1426 1427 1428
		/* tx_desc_count might have changed before acquiring the lock */
		if (mp->tx_desc_count <= 0) {
			spin_unlock_irqrestore(&mp->lock, flags);
			return released;
		}
1429

1430 1431
		tx_index = mp->tx_used_desc;
		desc = &mp->tx_desc_area[tx_index];
1432
		cmd_sts = desc->cmd_sts;
1433

1434
		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) {
1435 1436 1437
			spin_unlock_irqrestore(&mp->lock, flags);
			return released;
		}
L
Linus Torvalds 已提交
1438

1439
		mp->tx_used_desc = (tx_index + 1) % mp->tx_ring_size;
1440
		mp->tx_desc_count--;
L
Linus Torvalds 已提交
1441

1442 1443 1444 1445 1446
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
		skb = mp->tx_skb[tx_index];
		if (skb)
			mp->tx_skb[tx_index] = NULL;
1447

1448
		if (cmd_sts & ERROR_SUMMARY) {
1449 1450 1451
			printk("%s: Error in TX\n", dev->name);
			dev->stats.tx_errors++;
		}
L
Linus Torvalds 已提交
1452

1453
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1454

1455
		if (cmd_sts & TX_FIRST_DESC)
1456 1457 1458
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1459

1460 1461
		if (skb)
			dev_kfree_skb_irq(skb);
1462

1463 1464
		released = 1;
	}
1465

1466
	return released;
1467 1468
}

1469
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
1470
{
1471
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1472

1473 1474 1475
	if (mv643xx_eth_free_tx_descs(dev, 0) &&
	    mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(dev);
1476 1477
}

1478
static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
L
Linus Torvalds 已提交
1479
{
1480 1481
	mv643xx_eth_free_tx_descs(dev, 1);
}
L
Linus Torvalds 已提交
1482

1483 1484
static void mv643xx_eth_free_tx_rings(struct net_device *dev)
{
1485
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1486

1487 1488
	/* Stop Tx Queues */
	mv643xx_eth_port_disable_tx(mp);
1489

1490 1491
	/* Free outstanding skb's on TX ring */
	mv643xx_eth_free_all_tx_descs(dev);
L
Linus Torvalds 已提交
1492

1493
	BUG_ON(mp->tx_used_desc != mp->tx_curr_desc);
L
Linus Torvalds 已提交
1494

1495 1496
	/* Free TX ring */
	if (mp->tx_sram_size)
1497
		iounmap(mp->tx_desc_area);
1498 1499
	else
		dma_free_coherent(NULL, mp->tx_desc_area_size,
1500
				mp->tx_desc_area, mp->tx_desc_dma);
1501
}
L
Linus Torvalds 已提交
1502 1503


1504
/* netdev ops and related ***************************************************/
1505
static void port_reset(struct mv643xx_eth_private *mp);
L
Linus Torvalds 已提交
1506

1507 1508 1509
static void mv643xx_eth_update_pscr(struct net_device *dev,
				    struct ethtool_cmd *ecmd)
{
1510
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1511 1512 1513
	int port_num = mp->port_num;
	u32 o_pscr, n_pscr;
	unsigned int queues;
L
Linus Torvalds 已提交
1514

1515
	o_pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1516
	n_pscr = o_pscr;
1517

1518 1519 1520 1521 1522
	/* clear speed, duplex and rx buffer size fields */
	n_pscr &= ~(SET_MII_SPEED_TO_100  |
		   SET_GMII_SPEED_TO_1000 |
		   SET_FULL_DUPLEX_MODE   |
		   MAX_RX_PACKET_MASK);
L
Linus Torvalds 已提交
1523

1524 1525
	if (ecmd->duplex == DUPLEX_FULL)
		n_pscr |= SET_FULL_DUPLEX_MODE;
L
Linus Torvalds 已提交
1526

1527 1528 1529 1530 1531 1532 1533 1534
	if (ecmd->speed == SPEED_1000)
		n_pscr |= SET_GMII_SPEED_TO_1000 |
			  MAX_RX_PACKET_9700BYTE;
	else {
		if (ecmd->speed == SPEED_100)
			n_pscr |= SET_MII_SPEED_TO_100;
		n_pscr |= MAX_RX_PACKET_1522BYTE;
	}
L
Linus Torvalds 已提交
1535

1536 1537
	if (n_pscr != o_pscr) {
		if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
1538
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
1539 1540
		else {
			queues = mv643xx_eth_port_disable_tx(mp);
L
Linus Torvalds 已提交
1541

1542
			o_pscr &= ~SERIAL_PORT_ENABLE;
1543 1544 1545
			wrl(mp, PORT_SERIAL_CONTROL(port_num), o_pscr);
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
1546 1547 1548 1549 1550
			if (queues)
				mv643xx_eth_port_enable_tx(mp, queues);
		}
	}
}
1551

1552 1553 1554
static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
1555
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1556
	u32 int_cause, int_cause_ext = 0;
1557
	unsigned int port_num = mp->port_num;
1558

1559
	/* Read interrupt cause registers */
1560 1561 1562
	int_cause = rdl(mp, INT_CAUSE(port_num)) & (INT_RX | INT_EXT);
	if (int_cause & INT_EXT) {
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num))
1563
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1564
		wrl(mp, INT_CAUSE_EXT(port_num), ~int_cause_ext);
1565
	}
L
Linus Torvalds 已提交
1566

1567
	/* PHY status changed */
1568
	if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) {
1569
		struct ethtool_cmd cmd;
L
Linus Torvalds 已提交
1570

1571 1572 1573
		if (mii_link_ok(&mp->mii)) {
			mii_ethtool_gset(&mp->mii, &cmd);
			mv643xx_eth_update_pscr(dev, &cmd);
1574
			mv643xx_eth_port_enable_tx(mp, 1);
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
			if (!netif_carrier_ok(dev)) {
				netif_carrier_on(dev);
				if (mp->tx_ring_size - mp->tx_desc_count >=
							MAX_DESCS_PER_SKB)
					netif_wake_queue(dev);
			}
		} else if (netif_carrier_ok(dev)) {
			netif_stop_queue(dev);
			netif_carrier_off(dev);
		}
	}
L
Linus Torvalds 已提交
1586

1587
#ifdef MV643XX_ETH_NAPI
1588
	if (int_cause & INT_RX) {
1589
		/* schedule the NAPI poll routine to maintain port */
1590
		wrl(mp, INT_MASK(port_num), 0x00000000);
L
Linus Torvalds 已提交
1591

1592
		/* wait for previous write to complete */
1593
		rdl(mp, INT_MASK(port_num));
L
Linus Torvalds 已提交
1594

1595
		netif_rx_schedule(dev, &mp->napi);
1596
	}
1597
#else
1598
	if (int_cause & INT_RX)
1599 1600
		mv643xx_eth_receive_queue(dev, INT_MAX);
#endif
1601
	if (int_cause_ext & INT_EXT_TX)
1602
		mv643xx_eth_free_completed_tx_descs(dev);
L
Linus Torvalds 已提交
1603

1604
	/*
1605 1606
	 * If no real interrupt occured, exit.
	 * This can happen when using gigE interrupt coalescing mechanism.
1607
	 */
1608
	if ((int_cause == 0x0) && (int_cause_ext == 0x0))
1609
		return IRQ_NONE;
L
Linus Torvalds 已提交
1610

1611
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1612 1613
}

1614
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1615
{
1616
	unsigned int phy_reg_data;
L
Linus Torvalds 已提交
1617

1618
	/* Reset the PHY */
1619
	read_smi_reg(mp, 0, &phy_reg_data);
1620
	phy_reg_data |= 0x8000;	/* Set bit 15 to reset the PHY */
1621
	write_smi_reg(mp, 0, phy_reg_data);
L
Linus Torvalds 已提交
1622

1623 1624 1625
	/* wait for PHY to come out of reset */
	do {
		udelay(1);
1626
		read_smi_reg(mp, 0, &phy_reg_data);
1627
	} while (phy_reg_data & 0x8000);
L
Linus Torvalds 已提交
1628 1629
}

1630
static void port_start(struct net_device *dev)
L
Linus Torvalds 已提交
1631
{
1632
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1633 1634
	unsigned int port_num = mp->port_num;
	int tx_curr_desc, rx_curr_desc;
1635 1636
	u32 pscr;
	struct ethtool_cmd ethtool_cmd;
L
Linus Torvalds 已提交
1637 1638

	/* Assignment of Tx CTRP of given queue */
1639
	tx_curr_desc = mp->tx_curr_desc;
1640
	wrl(mp, TXQ_CURRENT_DESC_PTR(port_num),
1641
		(u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc));
L
Linus Torvalds 已提交
1642 1643

	/* Assignment of Rx CRDP of given queue */
1644
	rx_curr_desc = mp->rx_curr_desc;
1645
	wrl(mp, RXQ_CURRENT_DESC_PTR(port_num),
1646
		(u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc));
L
Linus Torvalds 已提交
1647 1648

	/* Add the assigned Ethernet address to the port's address table */
1649
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1650

1651 1652 1653 1654 1655
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
	wrl(mp, PORT_CONFIG(port_num), 0x00000000);
1656

1657 1658 1659 1660
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
	wrl(mp, PORT_CONFIG_EXT(port_num), 0x00000000);
L
Linus Torvalds 已提交
1661

1662
	pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1663

1664
	pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1665
	wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
L
Linus Torvalds 已提交
1666

1667 1668
	pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
		DISABLE_AUTO_NEG_SPEED_GMII    |
1669
		DISABLE_AUTO_NEG_FOR_DUPLEX    |
1670 1671
		DO_NOT_FORCE_LINK_FAIL	   |
		SERIAL_PORT_CONTROL_RESERVED;
L
Linus Torvalds 已提交
1672

1673
	wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
L
Linus Torvalds 已提交
1674

1675
	pscr |= SERIAL_PORT_ENABLE;
1676
	wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
L
Linus Torvalds 已提交
1677 1678

	/* Assign port SDMA configuration */
1679
	wrl(mp, SDMA_CONFIG(port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
L
Linus Torvalds 已提交
1680 1681

	/* Enable port Rx. */
1682
	mv643xx_eth_port_enable_rx(mp, 1);
1683 1684

	/* Disable port bandwidth limits by clearing MTU register */
1685
	wrl(mp, TX_BW_MTU(port_num), 0);
1686 1687

	/* save phy settings across reset */
1688
	mv643xx_eth_get_settings(dev, &ethtool_cmd);
1689
	phy_reset(mp);
1690
	mv643xx_eth_set_settings(dev, &ethtool_cmd);
L
Linus Torvalds 已提交
1691 1692
}

1693 1694
#ifdef MV643XX_ETH_COAL
static unsigned int set_rx_coal(struct mv643xx_eth_private *mp,
1695
					unsigned int delay)
L
Linus Torvalds 已提交
1696
{
1697
	unsigned int port_num = mp->port_num;
1698
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1699

1700
	/* Set RX Coalescing mechanism */
1701
	wrl(mp, SDMA_CONFIG(port_num),
1702
		((coal & 0x3fff) << 8) |
1703
		(rdl(mp, SDMA_CONFIG(port_num))
1704
			& 0xffc000ff));
L
Linus Torvalds 已提交
1705

1706
	return coal;
L
Linus Torvalds 已提交
1707
}
1708
#endif
L
Linus Torvalds 已提交
1709

1710
static unsigned int set_tx_coal(struct mv643xx_eth_private *mp,
1711
					unsigned int delay)
L
Linus Torvalds 已提交
1712
{
1713
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1714

1715
	/* Set TX Coalescing mechanism */
1716
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), coal << 4);
L
Linus Torvalds 已提交
1717

1718
	return coal;
L
Linus Torvalds 已提交
1719 1720
}

1721
static void port_init(struct mv643xx_eth_private *mp)
1722
{
1723
	port_reset(mp);
1724

1725
	init_mac_tables(mp);
1726 1727
}

1728
static int mv643xx_eth_open(struct net_device *dev)
1729
{
1730
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1731
	unsigned int port_num = mp->port_num;
1732 1733
	unsigned int size;
	int err;
1734

1735
	/* Clear any pending ethernet port interrupts */
1736 1737
	wrl(mp, INT_CAUSE(port_num), 0);
	wrl(mp, INT_CAUSE_EXT(port_num), 0);
1738
	/* wait for previous write to complete */
1739
	rdl(mp, INT_CAUSE_EXT(port_num));
1740 1741 1742 1743 1744 1745

	err = request_irq(dev->irq, mv643xx_eth_int_handler,
			IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
	if (err) {
		printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
		return -EAGAIN;
1746 1747
	}

1748
	port_init(mp);
1749

1750 1751 1752
	memset(&mp->timeout, 0, sizeof(struct timer_list));
	mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
	mp->timeout.data = (unsigned long)dev;
1753

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	/* Allocate RX and TX skb rings */
	mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
								GFP_KERNEL);
	if (!mp->rx_skb) {
		printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
		err = -ENOMEM;
		goto out_free_irq;
	}
	mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
								GFP_KERNEL);
	if (!mp->tx_skb) {
		printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
		err = -ENOMEM;
		goto out_free_rx_skb;
	}
1769

1770 1771
	/* Allocate TX ring */
	mp->tx_desc_count = 0;
1772
	size = mp->tx_ring_size * sizeof(struct tx_desc);
1773
	mp->tx_desc_area_size = size;
1774

1775
	if (mp->tx_sram_size) {
1776
		mp->tx_desc_area = ioremap(mp->tx_sram_addr,
1777 1778 1779
							mp->tx_sram_size);
		mp->tx_desc_dma = mp->tx_sram_addr;
	} else
1780
		mp->tx_desc_area = dma_alloc_coherent(NULL, size,
1781 1782
							&mp->tx_desc_dma,
							GFP_KERNEL);
1783

1784
	if (!mp->tx_desc_area) {
1785 1786 1787 1788 1789
		printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
							dev->name, size);
		err = -ENOMEM;
		goto out_free_tx_skb;
	}
1790 1791
	BUG_ON((u32) mp->tx_desc_area & 0xf);	/* check 16-byte alignment */
	memset((void *)mp->tx_desc_area, 0, mp->tx_desc_area_size);
1792

1793
	ether_init_tx_desc_ring(mp);
1794

1795 1796
	/* Allocate RX ring */
	mp->rx_desc_count = 0;
1797
	size = mp->rx_ring_size * sizeof(struct rx_desc);
1798
	mp->rx_desc_area_size = size;
1799

1800
	if (mp->rx_sram_size) {
1801
		mp->rx_desc_area = ioremap(mp->rx_sram_addr,
1802 1803 1804
							mp->rx_sram_size);
		mp->rx_desc_dma = mp->rx_sram_addr;
	} else
1805
		mp->rx_desc_area = dma_alloc_coherent(NULL, size,
1806 1807
							&mp->rx_desc_dma,
							GFP_KERNEL);
1808

1809
	if (!mp->rx_desc_area) {
1810 1811 1812 1813 1814
		printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
							dev->name, size);
		printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
							dev->name);
		if (mp->rx_sram_size)
1815
			iounmap(mp->tx_desc_area);
1816 1817
		else
			dma_free_coherent(NULL, mp->tx_desc_area_size,
1818
					mp->tx_desc_area, mp->tx_desc_dma);
1819 1820 1821
		err = -ENOMEM;
		goto out_free_tx_skb;
	}
1822
	memset((void *)mp->rx_desc_area, 0, size);
1823

1824
	ether_init_rx_desc_ring(mp);
1825

1826
	mv643xx_eth_rx_refill_descs(dev);	/* Fill RX ring with skb's */
1827

1828
#ifdef MV643XX_ETH_NAPI
1829 1830
	napi_enable(&mp->napi);
#endif
1831

1832
	port_start(dev);
1833

1834
	/* Interrupt Coalescing */
1835

1836 1837
#ifdef MV643XX_ETH_COAL
	mp->rx_int_coal = set_rx_coal(mp, MV643XX_ETH_RX_COAL);
1838 1839
#endif

1840
	mp->tx_int_coal = set_tx_coal(mp, MV643XX_ETH_TX_COAL);
1841

1842
	/* Unmask phy and link status changes interrupts */
1843
	wrl(mp, INT_MASK_EXT(port_num), INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1844

1845
	/* Unmask RX buffer and TX end interrupt */
1846
	wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
1847

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
	return 0;

out_free_tx_skb:
	kfree(mp->tx_skb);
out_free_rx_skb:
	kfree(mp->rx_skb);
out_free_irq:
	free_irq(dev->irq, dev);

	return err;
1858 1859
}

1860
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1861
{
1862
	unsigned int port_num = mp->port_num;
1863
	unsigned int reg_data;
L
Linus Torvalds 已提交
1864

1865 1866
	mv643xx_eth_port_disable_tx(mp);
	mv643xx_eth_port_disable_rx(mp);
L
Linus Torvalds 已提交
1867

1868
	/* Clear all MIB counters */
1869
	clear_mib_counters(mp);
1870 1871

	/* Reset the Enable bit in the Configuration Register */
1872
	reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1873 1874 1875
	reg_data &= ~(SERIAL_PORT_ENABLE		|
			DO_NOT_FORCE_LINK_FAIL	|
			FORCE_LINK_PASS);
1876
	wrl(mp, PORT_SERIAL_CONTROL(port_num), reg_data);
L
Linus Torvalds 已提交
1877 1878
}

1879
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
1880
{
1881
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1882
	unsigned int port_num = mp->port_num;
L
Linus Torvalds 已提交
1883

1884
	/* Mask all interrupts on ethernet port */
1885
	wrl(mp, INT_MASK(port_num), 0x00000000);
1886
	/* wait for previous write to complete */
1887
	rdl(mp, INT_MASK(port_num));
L
Linus Torvalds 已提交
1888

1889
#ifdef MV643XX_ETH_NAPI
1890 1891 1892 1893
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
1894

1895
	port_reset(mp);
L
Linus Torvalds 已提交
1896

1897 1898
	mv643xx_eth_free_tx_rings(dev);
	mv643xx_eth_free_rx_rings(dev);
L
Linus Torvalds 已提交
1899

1900
	free_irq(dev->irq, dev);
L
Linus Torvalds 已提交
1901

1902
	return 0;
L
Linus Torvalds 已提交
1903 1904
}

1905
static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
1906
{
1907
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1908

1909
	return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
L
Linus Torvalds 已提交
1910 1911
}

1912
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
1913
{
1914 1915
	if ((new_mtu > 9500) || (new_mtu < 64))
		return -EINVAL;
L
Linus Torvalds 已提交
1916

1917 1918 1919
	dev->mtu = new_mtu;
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
1920

1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
	 * due to memory being full, which might fail the open function.
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
		printk(KERN_ERR "%s: Fatal error on opening device\n",
			dev->name);
	}

	return 0;
L
Linus Torvalds 已提交
1934 1935
}

1936
static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
1937
{
1938
	struct mv643xx_eth_private *mp = container_of(ugly, struct mv643xx_eth_private,
1939 1940
						  tx_timeout_task);
	struct net_device *dev = mp->dev;
L
Linus Torvalds 已提交
1941

1942 1943
	if (!netif_running(dev))
		return;
L
Linus Torvalds 已提交
1944

1945 1946
	netif_stop_queue(dev);

1947 1948
	port_reset(mp);
	port_start(dev);
1949 1950 1951 1952 1953 1954

	if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(dev);
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
1955
{
1956
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1957

1958
	printk(KERN_INFO "%s: TX timeout  ", dev->name);
1959

1960 1961
	/* Do the reset outside of interrupt context */
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
1962 1963
}

1964
#ifdef CONFIG_NET_POLL_CONTROLLER
1965
static void mv643xx_eth_netpoll(struct net_device *netdev)
1966
{
1967
	struct mv643xx_eth_private *mp = netdev_priv(netdev);
1968 1969
	int port_num = mp->port_num;

1970
	wrl(mp, INT_MASK(port_num), 0x00000000);
1971
	/* wait for previous write to complete */
1972
	rdl(mp, INT_MASK(port_num));
1973 1974 1975

	mv643xx_eth_int_handler(netdev->irq, netdev);

1976
	wrl(mp, INT_MASK(port_num), INT_RX | INT_CAUSE_EXT);
1977
}
1978
#endif
1979

1980
static int mv643xx_eth_mdio_read(struct net_device *dev, int phy_id, int location)
1981
{
1982
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1983 1984
	int val;

1985
	read_smi_reg(mp, location, &val);
1986
	return val;
1987 1988
}

1989
static void mv643xx_eth_mdio_write(struct net_device *dev, int phy_id, int location, int val)
1990
{
1991
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1992
	write_smi_reg(mp, location, val);
1993
}
1994 1995


1996
/* platform glue ************************************************************/
1997 1998 1999
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
2000
{
2001
	void __iomem *base = msp->base;
2002 2003 2004
	u32 win_enable;
	u32 win_protect;
	int i;
2005

2006 2007 2008 2009 2010
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
2011 2012
	}

2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2030 2031
}

2032
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2033
{
2034
	static int mv643xx_eth_version_printed = 0;
2035
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2036
	struct mv643xx_eth_shared_private *msp;
2037 2038
	struct resource *res;
	int ret;
2039

2040
	if (!mv643xx_eth_version_printed++)
2041
		printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
2042

2043 2044 2045 2046
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2047

2048 2049 2050 2051 2052 2053
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2054 2055
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
		goto out_free;

	spin_lock_init(&msp->phy_lock);
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;

	platform_set_drvdata(pdev, msp);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2079
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2080

2081
	iounmap(msp->base);
2082 2083 2084
	kfree(msp);

	return 0;
2085 2086
}

2087 2088 2089 2090 2091 2092 2093 2094 2095
static struct platform_driver mv643xx_eth_shared_driver = {
	.probe = mv643xx_eth_shared_probe,
	.remove = mv643xx_eth_shared_remove,
	.driver = {
		.name = MV643XX_ETH_SHARED_NAME,
		.owner	= THIS_MODULE,
	},
};

2096
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2097
{
2098 2099
	u32 reg_data;
	int addr_shift = 5 * mp->port_num;
L
Linus Torvalds 已提交
2100

2101
	reg_data = rdl(mp, PHY_ADDR);
2102 2103
	reg_data &= ~(0x1f << addr_shift);
	reg_data |= (phy_addr & 0x1f) << addr_shift;
2104
	wrl(mp, PHY_ADDR, reg_data);
L
Linus Torvalds 已提交
2105 2106
}

2107
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2108
{
2109
	unsigned int reg_data;
L
Linus Torvalds 已提交
2110

2111
	reg_data = rdl(mp, PHY_ADDR);
L
Linus Torvalds 已提交
2112

2113
	return ((reg_data >> (5 * mp->port_num)) & 0x1f);
L
Linus Torvalds 已提交
2114 2115
}

2116
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2117
{
2118 2119
	unsigned int phy_reg_data0;
	int auto_neg;
L
Linus Torvalds 已提交
2120

2121
	read_smi_reg(mp, 0, &phy_reg_data0);
2122 2123
	auto_neg = phy_reg_data0 & 0x1000;
	phy_reg_data0 ^= 0x1000;	/* invert auto_neg */
2124
	write_smi_reg(mp, 0, phy_reg_data0);
L
Linus Torvalds 已提交
2125

2126
	read_smi_reg(mp, 0, &phy_reg_data0);
2127 2128
	if ((phy_reg_data0 & 0x1000) == auto_neg)
		return -ENODEV;				/* change didn't take */
L
Linus Torvalds 已提交
2129

2130
	phy_reg_data0 ^= 0x1000;
2131
	write_smi_reg(mp, 0, phy_reg_data0);
2132
	return 0;
L
Linus Torvalds 已提交
2133 2134
}

2135 2136 2137
static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
				     int speed, int duplex,
				     struct ethtool_cmd *cmd)
2138
{
2139
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2140

2141
	memset(cmd, 0, sizeof(*cmd));
2142

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
	cmd->port = PORT_MII;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->phy_address = phy_address;

	if (speed == 0) {
		cmd->autoneg = AUTONEG_ENABLE;
		/* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
		cmd->speed = SPEED_100;
		cmd->advertising = ADVERTISED_10baseT_Half  |
				   ADVERTISED_10baseT_Full  |
				   ADVERTISED_100baseT_Half |
				   ADVERTISED_100baseT_Full;
		if (mp->mii.supports_gmii)
			cmd->advertising |= ADVERTISED_1000baseT_Full;
	} else {
		cmd->autoneg = AUTONEG_DISABLE;
		cmd->speed = speed;
		cmd->duplex = duplex;
	}
2162 2163
}

2164
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2165
{
2166 2167
	struct mv643xx_eth_platform_data *pd;
	int port_num;
2168
	struct mv643xx_eth_private *mp;
2169 2170 2171 2172 2173 2174 2175 2176
	struct net_device *dev;
	u8 *p;
	struct resource *res;
	int err;
	struct ethtool_cmd cmd;
	int duplex = DUPLEX_HALF;
	int speed = 0;			/* default to auto-negotiation */
	DECLARE_MAC_BUF(mac);
L
Linus Torvalds 已提交
2177

2178 2179 2180 2181 2182
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
		printk(KERN_ERR "No mv643xx_eth_platform_data\n");
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2183

2184 2185 2186 2187
	if (pd->shared == NULL) {
		printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
		return -ENODEV;
	}
2188

2189
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2190 2191
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2192

2193
	platform_set_drvdata(pdev, dev);
L
Linus Torvalds 已提交
2194

2195 2196
	mp = netdev_priv(dev);
	mp->dev = dev;
2197 2198
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2199
#endif
L
Linus Torvalds 已提交
2200

2201 2202 2203
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2204

2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->hard_start_xmit = mv643xx_eth_start_xmit;
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;

	/* No need to Tx Timeout */
	dev->tx_timeout = mv643xx_eth_tx_timeout;

#ifdef CONFIG_NET_POLL_CONTROLLER
2215
	dev->poll_controller = mv643xx_eth_netpoll;
2216 2217 2218 2219 2220 2221
#endif

	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
	dev->change_mtu = mv643xx_eth_change_mtu;
	dev->do_ioctl = mv643xx_eth_do_ioctl;
2222
	SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
L
Linus Torvalds 已提交
2223

2224
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2225
#ifdef MAX_SKB_FRAGS
2226
	/*
2227 2228
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2229
	 */
2230 2231 2232
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
#endif
L
Linus Torvalds 已提交
2233

2234 2235
	/* Configure the timeout task */
	INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
L
Linus Torvalds 已提交
2236

2237
	spin_lock_init(&mp->lock);
L
Linus Torvalds 已提交
2238

2239 2240
	mp->shared = platform_get_drvdata(pd->shared);
	port_num = mp->port_num = pd->port_number;
2241

2242 2243
	if (mp->shared->win_protect)
		wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2244

2245 2246 2247 2248 2249
	mp->shared_smi = mp->shared;
	if (pd->shared_smi != NULL)
		mp->shared_smi = platform_get_drvdata(pd->shared_smi);

	/* set default config values */
2250 2251 2252
	uc_addr_get(mp, dev->dev_addr);
	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2253 2254 2255 2256 2257

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);

	if (pd->phy_addr || pd->force_phy_addr)
2258
		phy_addr_set(mp, pd->phy_addr);
2259

2260 2261
	if (pd->rx_queue_size)
		mp->rx_ring_size = pd->rx_queue_size;
L
Linus Torvalds 已提交
2262

2263 2264
	if (pd->tx_queue_size)
		mp->tx_ring_size = pd->tx_queue_size;
L
Linus Torvalds 已提交
2265

2266 2267 2268 2269
	if (pd->tx_sram_size) {
		mp->tx_sram_size = pd->tx_sram_size;
		mp->tx_sram_addr = pd->tx_sram_addr;
	}
L
Linus Torvalds 已提交
2270

2271 2272 2273 2274
	if (pd->rx_sram_size) {
		mp->rx_sram_size = pd->rx_sram_size;
		mp->rx_sram_addr = pd->rx_sram_addr;
	}
L
Linus Torvalds 已提交
2275

2276 2277
	duplex = pd->duplex;
	speed = pd->speed;
L
Linus Torvalds 已提交
2278

2279 2280
	/* Hook up MII support for ethtool */
	mp->mii.dev = dev;
2281 2282
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2283
	mp->mii.phy_id = phy_addr_get(mp);
2284 2285
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
L
Linus Torvalds 已提交
2286

2287
	err = phy_detect(mp);
2288 2289
	if (err) {
		pr_debug("%s: No PHY detected at addr %d\n",
2290
				dev->name, phy_addr_get(mp));
2291 2292
		goto out;
	}
L
Linus Torvalds 已提交
2293

2294
	phy_reset(mp);
2295 2296 2297
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
	mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
	mv643xx_eth_update_pscr(dev, &cmd);
2298
	mv643xx_eth_set_settings(dev, &cmd);
2299

2300 2301 2302 2303
	SET_NETDEV_DEV(dev, &pdev->dev);
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2304

2305 2306 2307 2308
	p = dev->dev_addr;
	printk(KERN_NOTICE
		"%s: port %d with MAC address %s\n",
		dev->name, port_num, print_mac(mac, p));
L
Linus Torvalds 已提交
2309

2310 2311
	if (dev->features & NETIF_F_SG)
		printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
L
Linus Torvalds 已提交
2312

2313 2314 2315
	if (dev->features & NETIF_F_IP_CSUM)
		printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
								dev->name);
L
Linus Torvalds 已提交
2316

2317
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2318 2319
	printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
#endif
L
Linus Torvalds 已提交
2320

2321
#ifdef MV643XX_ETH_COAL
2322 2323 2324
	printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
								dev->name);
#endif
L
Linus Torvalds 已提交
2325

2326
#ifdef MV643XX_ETH_NAPI
2327 2328
	printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
#endif
L
Linus Torvalds 已提交
2329

2330 2331
	if (mp->tx_sram_size > 0)
		printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
L
Linus Torvalds 已提交
2332

2333
	return 0;
L
Linus Torvalds 已提交
2334

2335 2336
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2337

2338
	return err;
L
Linus Torvalds 已提交
2339 2340
}

2341
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2342
{
2343
	struct net_device *dev = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2344

2345 2346 2347 2348 2349 2350
	unregister_netdev(dev);
	flush_scheduled_work();

	free_netdev(dev);
	platform_set_drvdata(pdev, NULL);
	return 0;
L
Linus Torvalds 已提交
2351 2352
}

2353
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2354
{
2355
	struct net_device *dev = platform_get_drvdata(pdev);
2356
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2357
	unsigned int port_num = mp->port_num;
2358

2359
	/* Mask all interrupts on ethernet port */
2360 2361
	wrl(mp, INT_MASK(port_num), 0);
	rdl(mp, INT_MASK(port_num));
2362

2363
	port_reset(mp);
2364 2365
}

2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
static struct platform_driver mv643xx_eth_driver = {
	.probe = mv643xx_eth_probe,
	.remove = mv643xx_eth_remove,
	.shutdown = mv643xx_eth_shutdown,
	.driver = {
		.name = MV643XX_ETH_NAME,
		.owner	= THIS_MODULE,
	},
};

2376
static int __init mv643xx_eth_init_module(void)
2377
{
2378
	int rc;
2379

2380 2381 2382 2383 2384 2385 2386
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
	return rc;
2387 2388
}

2389
static void __exit mv643xx_eth_cleanup_module(void)
2390
{
2391 2392
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2393 2394
}

2395 2396
module_init(mv643xx_eth_init_module);
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2397

2398 2399 2400 2401 2402 2403
MODULE_LICENSE("GPL");
MODULE_AUTHOR(	"Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
		" and Dale Farnsworth");
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);