mv643xx_eth.c 64.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57 58
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.0";
59

60 61 62 63
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
#undef	MV643XX_ETH_COAL
64

65 66 67
#define MV643XX_ETH_TX_COAL 100
#ifdef MV643XX_ETH_COAL
#define MV643XX_ETH_RX_COAL 100
68 69
#endif

70
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

#define ETH_VLAN_HLEN		4
#define ETH_FCS_LEN		4
#define ETH_HW_IP_ALIGN		2		/* hw aligns IP header */
#define ETH_WRAPPER_LEN		(ETH_HW_IP_ALIGN + ETH_HLEN + \
					ETH_VLAN_HLEN + ETH_FCS_LEN)
#define ETH_RX_SKB_SIZE		(dev->mtu + ETH_WRAPPER_LEN + \
					dma_get_cache_alignment())

/*
 * Registers shared between all ports.
 */
87 88 89 90 91 92 93
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
94 95 96 97

/*
 * Per-port registers.
 */
98
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
99
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
100 101 102 103 104 105
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
106
#define  TX_FIFO_EMPTY			0x00000400
107 108 109
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
110 111
#define  INT_RX				0x00000804
#define  INT_EXT			0x00000002
112
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
113 114 115 116 117
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
#define  INT_EXT_TX			0x00000101
118 119 120 121 122 123 124 125 126 127
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
#define RXQ_CURRENT_DESC_PTR(p)		(0x060c + ((p) << 10))
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
#define TXQ_CURRENT_DESC_PTR(p)		(0x06c0 + ((p) << 10))
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
128

129 130 131 132

/*
 * SDMA configuration register.
 */
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
#define RX_BURST_SIZE_4_64BIT		(2 << 1)
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
#define TX_BURST_SIZE_4_64BIT		(2 << 22)

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		TX_BURST_SIZE_4_64BIT
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
		TX_BURST_SIZE_4_64BIT
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

152 153 154 155 156 157 158

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
159 160 161
#define MAX_RX_PACKET_1522BYTE			(1 << 17)
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
#define MAX_RX_PACKET_MASK			(7 << 17)
162 163 164 165 166 167 168
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
169

170 171
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
172 173

/* SMI reg */
174 175 176 177
#define SMI_BUSY		0x10000000	/* 0 - Write, 1 - Read	*/
#define SMI_READ_VALID		0x08000000	/* 0 - Write, 1 - Read	*/
#define SMI_OPCODE_WRITE	0		/* Completion of Read	*/
#define SMI_OPCODE_READ		0x04000000	/* Operation is in progress */
178 179


180 181
/*
 * RX/TX descriptors.
182 183
 */
#if defined(__BIG_ENDIAN)
184
struct rx_desc {
185 186 187 188 189 190 191
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

192
struct tx_desc {
193 194 195 196 197 198 199
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
200
struct rx_desc {
201 202 203 204 205 206 207
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

208
struct tx_desc {
209 210 211 212 213 214 215 216 217 218
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

219
/* RX & TX descriptor command */
220
#define BUFFER_OWNED_BY_DMA		0x80000000
221 222

/* RX & TX descriptor status */
223
#define ERROR_SUMMARY			0x00000001
224 225

/* RX descriptor status */
226 227 228 229
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
230 231

/* TX descriptor command */
232 233 234 235 236 237 238 239
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
240

241
#define TX_IHL_SHIFT			11
242 243


244
/* global *******************************************************************/
245
struct mv643xx_eth_shared_private {
246
	void __iomem *base;
247 248 249 250 251 252 253 254 255 256 257

	/* used to protect SMI_REG, which is shared across ports */
	spinlock_t phy_lock;

	u32 win_protect;

	unsigned int t_clk;
};


/* per-port *****************************************************************/
258
struct mib_counters {
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

291 292
struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
293 294
	int port_num;			/* User Ethernet port number	*/

295
	struct mv643xx_eth_shared_private *shared_smi;
296

297 298 299 300 301 302 303 304
	u32 rx_sram_addr;		/* Base address of rx sram area */
	u32 rx_sram_size;		/* Size of rx sram area		*/
	u32 tx_sram_addr;		/* Base address of tx sram area */
	u32 tx_sram_size;		/* Size of tx sram area		*/

	/* Tx/Rx rings managment indexes fields. For driver use */

	/* Next available and first returning Rx resource */
305
	int rx_curr_desc, rx_used_desc;
306 307

	/* Next available and first returning Tx resource */
308
	int tx_curr_desc, tx_used_desc;
309

310
#ifdef MV643XX_ETH_TX_FAST_REFILL
311 312 313
	u32 tx_clean_threshold;
#endif

314
	struct rx_desc *rx_desc_area;
315 316 317 318
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

319
	struct tx_desc *tx_desc_area;
320 321 322 323 324 325 326 327 328
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;

	struct work_struct tx_timeout_task;

	struct net_device *dev;
	struct napi_struct napi;
	struct net_device_stats stats;
329
	struct mib_counters mib_counters;
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	spinlock_t lock;
	/* Size of Tx Ring per queue */
	int tx_ring_size;
	/* Number of tx descriptors in use */
	int tx_desc_count;
	/* Size of Rx Ring per queue */
	int rx_ring_size;
	/* Number of rx descriptors in use */
	int rx_desc_count;

	/*
	 * Used in case RX Ring is empty, which can be caused when
	 * system does not have resources (skb's)
	 */
	struct timer_list timeout;

	u32 rx_int_coal;
	u32 tx_int_coal;
	struct mii_if_info mii;
};
L
Linus Torvalds 已提交
350

351

352
/* port register accessors **************************************************/
353
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
354
{
355
	return readl(mp->shared->base + offset);
356
}
357

358
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
359
{
360
	writel(data, mp->shared->base + offset);
361
}
362 363


364
/* rxq/txq helper functions *************************************************/
365
static void mv643xx_eth_port_enable_rx(struct mv643xx_eth_private *mp,
366 367
					unsigned int queues)
{
368
	wrl(mp, RXQ_COMMAND(mp->port_num), queues);
369
}
370

371
static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_eth_private *mp)
372 373 374
{
	unsigned int port_num = mp->port_num;
	u32 queues;
375

376
	/* Stop Rx port activity. Check port Rx activity. */
377
	queues = rdl(mp, RXQ_COMMAND(port_num)) & 0xFF;
378 379
	if (queues) {
		/* Issue stop command for active queues only */
380
		wrl(mp, RXQ_COMMAND(port_num), (queues << 8));
L
Linus Torvalds 已提交
381

382 383
		/* Wait for all Rx activity to terminate. */
		/* Check port cause register that all Rx queues are stopped */
384
		while (rdl(mp, RXQ_COMMAND(port_num)) & 0xFF)
385
			udelay(10);
386
	}
L
Linus Torvalds 已提交
387

388 389 390
	return queues;
}

391
static void mv643xx_eth_port_enable_tx(struct mv643xx_eth_private *mp,
392
					unsigned int queues)
L
Linus Torvalds 已提交
393
{
394
	wrl(mp, TXQ_COMMAND(mp->port_num), queues);
L
Linus Torvalds 已提交
395 396
}

397
static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
398
{
399 400 401 402
	unsigned int port_num = mp->port_num;
	u32 queues;

	/* Stop Tx port activity. Check port Tx activity. */
403
	queues = rdl(mp, TXQ_COMMAND(port_num)) & 0xFF;
404 405
	if (queues) {
		/* Issue stop command for active queues only */
406
		wrl(mp, TXQ_COMMAND(port_num), (queues << 8));
407 408 409

		/* Wait for all Tx activity to terminate. */
		/* Check port cause register that all Tx queues are stopped */
410
		while (rdl(mp, TXQ_COMMAND(port_num)) & 0xFF)
411
			udelay(10);
412 413

		/* Wait for Tx FIFO to empty */
414
		while (rdl(mp, PORT_STATUS(port_num)) & TX_FIFO_EMPTY)
415
			udelay(10);
416 417 418
	}

	return queues;
L
Linus Torvalds 已提交
419 420
}

421 422 423 424

/* rx ***********************************************************************/
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev);

425
static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
L
Linus Torvalds 已提交
426
{
427
	struct mv643xx_eth_private *mp = netdev_priv(dev);
428
	unsigned long flags;
L
Linus Torvalds 已提交
429

430
	spin_lock_irqsave(&mp->lock, flags);
431

432
	while (mp->rx_desc_count < mp->rx_ring_size) {
433 434 435 436
		struct sk_buff *skb;
		int unaligned;
		int rx;

R
Ralf Baechle 已提交
437
		skb = dev_alloc_skb(ETH_RX_SKB_SIZE + dma_get_cache_alignment());
438
		if (skb == NULL)
L
Linus Torvalds 已提交
439
			break;
440

R
Ralf Baechle 已提交
441
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
442
		if (unaligned)
R
Ralf Baechle 已提交
443
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459

		mp->rx_desc_count++;
		rx = mp->rx_used_desc;
		mp->rx_used_desc = (rx + 1) % mp->rx_ring_size;

		mp->rx_desc_area[rx].buf_ptr = dma_map_single(NULL,
							skb->data,
							ETH_RX_SKB_SIZE,
							DMA_FROM_DEVICE);
		mp->rx_desc_area[rx].buf_size = ETH_RX_SKB_SIZE;
		mp->rx_skb[rx] = skb;
		wmb();
		mp->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
						RX_ENABLE_INTERRUPT;
		wmb();

460
		skb_reserve(skb, ETH_HW_IP_ALIGN);
L
Linus Torvalds 已提交
461
	}
462

463
	if (mp->rx_desc_count == 0) {
464
		mp->timeout.expires = jiffies + (HZ / 10);
L
Linus Torvalds 已提交
465 466
		add_timer(&mp->timeout);
	}
467 468

	spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
469 470
}

471
static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
472
{
473
	mv643xx_eth_rx_refill_descs((struct net_device *)data);
L
Linus Torvalds 已提交
474 475
}

476
static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
L
Linus Torvalds 已提交
477
{
478 479 480
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	unsigned int received_packets = 0;
L
Linus Torvalds 已提交
481

482 483 484 485 486
	while (budget-- > 0) {
		struct sk_buff *skb;
		volatile struct rx_desc *rx_desc;
		unsigned int cmd_sts;
		unsigned long flags;
487

488
		spin_lock_irqsave(&mp->lock, flags);
489

490
		rx_desc = &mp->rx_desc_area[mp->rx_curr_desc];
L
Linus Torvalds 已提交
491

492 493 494 495 496 497
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
498

499 500
		skb = mp->rx_skb[mp->rx_curr_desc];
		mp->rx_skb[mp->rx_curr_desc] = NULL;
501

502
		mp->rx_curr_desc = (mp->rx_curr_desc + 1) % mp->rx_ring_size;
503

504
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
505

506 507
		dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN,
					ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
508
		mp->rx_desc_count--;
L
Linus Torvalds 已提交
509
		received_packets++;
510

511 512 513 514
		/*
		 * Update statistics.
		 * Note byte count includes 4 byte CRC count
		 */
L
Linus Torvalds 已提交
515
		stats->rx_packets++;
516 517
		stats->rx_bytes += rx_desc->byte_cnt - ETH_HW_IP_ALIGN;

L
Linus Torvalds 已提交
518 519 520 521
		/*
		 * In case received a packet without first / last bits on OR
		 * the error summary bit is on, the packets needs to be dropeed.
		 */
522
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
523
					(RX_FIRST_DESC | RX_LAST_DESC))
524
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
525
			stats->rx_dropped++;
526
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
527
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
528 529 530 531 532 533
				if (net_ratelimit())
					printk(KERN_ERR
						"%s: Received packet spread "
						"on multiple descriptors\n",
						dev->name);
			}
534
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
535 536 537 538 539 540 541 542
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
543
			skb_put(skb, rx_desc->byte_cnt - ETH_HW_IP_ALIGN - 4);
L
Linus Torvalds 已提交
544

545
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
546 547
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
548
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
549 550
			}
			skb->protocol = eth_type_trans(skb, dev);
551
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
552 553 554 555 556
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
557
		dev->last_rx = jiffies;
L
Linus Torvalds 已提交
558
	}
559
	mv643xx_eth_rx_refill_descs(dev);	/* Fill RX ring with skb's */
L
Linus Torvalds 已提交
560 561 562 563

	return received_packets;
}

564 565
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
566
{
567
	struct mv643xx_eth_private *mp = container_of(napi, struct mv643xx_eth_private, napi);
568 569 570
	struct net_device *dev = mp->dev;
	unsigned int port_num = mp->port_num;
	int work_done;
571

572
#ifdef MV643XX_ETH_TX_FAST_REFILL
573 574 575
	if (++mp->tx_clean_threshold > 5) {
		mv643xx_eth_free_completed_tx_descs(dev);
		mp->tx_clean_threshold = 0;
576
	}
577
#endif
578

579
	work_done = 0;
580
	if ((rdl(mp, RXQ_CURRENT_DESC_PTR(port_num)))
581
	    != (u32) mp->rx_used_desc)
582
		work_done = mv643xx_eth_receive_queue(dev, budget);
583

584 585
	if (work_done < budget) {
		netif_rx_complete(dev, napi);
586 587
		wrl(mp, INT_CAUSE(port_num), 0);
		wrl(mp, INT_CAUSE_EXT(port_num), 0);
588
		wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
589
	}
590 591

	return work_done;
592
}
593
#endif
594

595 596 597

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
598
{
599 600
	unsigned int frag;
	skb_frag_t *fragp;
L
Linus Torvalds 已提交
601

602 603 604 605
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
		fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 0x7)
			return 1;
L
Linus Torvalds 已提交
606
	}
607 608
	return 0;
}
609

610
static int alloc_tx_desc_index(struct mv643xx_eth_private *mp)
611 612
{
	int tx_desc_curr;
613

614
	BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
L
Linus Torvalds 已提交
615

616 617
	tx_desc_curr = mp->tx_curr_desc;
	mp->tx_curr_desc = (tx_desc_curr + 1) % mp->tx_ring_size;
618

619
	BUG_ON(mp->tx_curr_desc == mp->tx_used_desc);
620

621 622
	return tx_desc_curr;
}
623

624
static void tx_fill_frag_descs(struct mv643xx_eth_private *mp,
625 626 627 628
				   struct sk_buff *skb)
{
	int frag;
	int tx_index;
629
	struct tx_desc *desc;
L
Linus Torvalds 已提交
630

631 632 633
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
		skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];

634
		tx_index = alloc_tx_desc_index(mp);
635
		desc = &mp->tx_desc_area[tx_index];
636

637
		desc->cmd_sts = BUFFER_OWNED_BY_DMA;
638 639
		/* Last Frag enables interrupt and frees the skb */
		if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
640 641 642
			desc->cmd_sts |= ZERO_PADDING |
					 TX_LAST_DESC |
					 TX_ENABLE_INTERRUPT;
643 644 645 646
			mp->tx_skb[tx_index] = skb;
		} else
			mp->tx_skb[tx_index] = NULL;

647
		desc = &mp->tx_desc_area[tx_index];
648 649 650 651 652 653 654
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
655 656
}

657 658 659 660
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
661

662
static void tx_submit_descs_for_skb(struct mv643xx_eth_private *mp,
663
					struct sk_buff *skb)
L
Linus Torvalds 已提交
664
{
665
	int tx_index;
666
	struct tx_desc *desc;
667 668 669
	u32 cmd_sts;
	int length;
	int nr_frags = skb_shinfo(skb)->nr_frags;
L
Linus Torvalds 已提交
670

671
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
672

673
	tx_index = alloc_tx_desc_index(mp);
674
	desc = &mp->tx_desc_area[tx_index];
675 676

	if (nr_frags) {
677
		tx_fill_frag_descs(mp, skb);
678 679 680 681

		length = skb_headlen(skb);
		mp->tx_skb[tx_index] = NULL;
	} else {
682
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
683 684 685 686 687 688 689 690 691 692
		length = skb->len;
		mp->tx_skb[tx_index] = skb;
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		BUG_ON(skb->protocol != htons(ETH_P_IP));

693 694 695
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
696 697 698

		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
699
			cmd_sts |= UDP_FRAME;
700 701 702 703 704 705 706 707 708 709
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
710
		cmd_sts |= 5 << TX_IHL_SHIFT;
711 712 713 714 715 716 717 718 719
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

	/* ensure all descriptors are written before poking hardware */
	wmb();
720
	mv643xx_eth_port_enable_tx(mp, 1);
721 722

	mp->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
723 724
}

725
static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
726
{
727
	struct mv643xx_eth_private *mp = netdev_priv(dev);
728 729
	struct net_device_stats *stats = &dev->stats;
	unsigned long flags;
730

731
	BUG_ON(netif_queue_stopped(dev));
732

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
		printk(KERN_DEBUG "%s: failed to linearize tiny "
				"unaligned fragment\n", dev->name);
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&mp->lock, flags);
		return NETDEV_TX_BUSY;
	}

749
	tx_submit_descs_for_skb(mp, skb);
750 751 752 753 754 755 756 757 758 759
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

	if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
760 761
}

762 763

/* mii management interface *************************************************/
764
static int phy_addr_get(struct mv643xx_eth_private *mp);
765

766
static void read_smi_reg(struct mv643xx_eth_private *mp,
767
				unsigned int phy_reg, unsigned int *value)
L
Linus Torvalds 已提交
768
{
769 770
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
	int phy_addr = phy_addr_get(mp);
771
	unsigned long flags;
L
Linus Torvalds 已提交
772 773
	int i;

774 775 776 777
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
778
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
779
		if (i == 1000) {
780 781 782
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
783
		udelay(10);
L
Linus Torvalds 已提交
784 785
	}

786
	writel((phy_addr << 16) | (phy_reg << 21) | SMI_OPCODE_READ, smi_reg);
L
Linus Torvalds 已提交
787

788
	/* now wait for the data to be valid */
789
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
790
		if (i == 1000) {
791 792 793
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
794
		udelay(10);
795 796 797 798 799
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
800 801
}

802
static void write_smi_reg(struct mv643xx_eth_private *mp,
803
				   unsigned int phy_reg, unsigned int value)
L
Linus Torvalds 已提交
804
{
805 806
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
	int phy_addr = phy_addr_get(mp);
807
	unsigned long flags;
L
Linus Torvalds 已提交
808 809
	int i;

810 811 812 813
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
814
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
815
		if (i == 1000) {
816 817 818
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
819
		udelay(10);
L
Linus Torvalds 已提交
820 821
	}

822
	writel((phy_addr << 16) | (phy_reg << 21) |
823
		SMI_OPCODE_WRITE | (value & 0xffff), smi_reg);
824 825 826
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
827

828 829

/* mib counters *************************************************************/
830
static void clear_mib_counters(struct mv643xx_eth_private *mp)
831 832 833 834 835
{
	unsigned int port_num = mp->port_num;
	int i;

	/* Perform dummy reads from MIB counters */
836
	for (i = 0; i < 0x80; i += 4)
837
		rdl(mp, MIB_COUNTERS(port_num) + i);
L
Linus Torvalds 已提交
838 839
}

840
static inline u32 read_mib(struct mv643xx_eth_private *mp, int offset)
841
{
842
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
843
}
844

845
static void update_mib_counters(struct mv643xx_eth_private *mp)
846
{
847
	struct mib_counters *p = &mp->mib_counters;
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880

	p->good_octets_received += read_mib(mp, 0x00);
	p->good_octets_received += (u64)read_mib(mp, 0x04) << 32;
	p->bad_octets_received += read_mib(mp, 0x08);
	p->internal_mac_transmit_err += read_mib(mp, 0x0c);
	p->good_frames_received += read_mib(mp, 0x10);
	p->bad_frames_received += read_mib(mp, 0x14);
	p->broadcast_frames_received += read_mib(mp, 0x18);
	p->multicast_frames_received += read_mib(mp, 0x1c);
	p->frames_64_octets += read_mib(mp, 0x20);
	p->frames_65_to_127_octets += read_mib(mp, 0x24);
	p->frames_128_to_255_octets += read_mib(mp, 0x28);
	p->frames_256_to_511_octets += read_mib(mp, 0x2c);
	p->frames_512_to_1023_octets += read_mib(mp, 0x30);
	p->frames_1024_to_max_octets += read_mib(mp, 0x34);
	p->good_octets_sent += read_mib(mp, 0x38);
	p->good_octets_sent += (u64)read_mib(mp, 0x3c) << 32;
	p->good_frames_sent += read_mib(mp, 0x40);
	p->excessive_collision += read_mib(mp, 0x44);
	p->multicast_frames_sent += read_mib(mp, 0x48);
	p->broadcast_frames_sent += read_mib(mp, 0x4c);
	p->unrec_mac_control_received += read_mib(mp, 0x50);
	p->fc_sent += read_mib(mp, 0x54);
	p->good_fc_received += read_mib(mp, 0x58);
	p->bad_fc_received += read_mib(mp, 0x5c);
	p->undersize_received += read_mib(mp, 0x60);
	p->fragments_received += read_mib(mp, 0x64);
	p->oversize_received += read_mib(mp, 0x68);
	p->jabber_received += read_mib(mp, 0x6c);
	p->mac_receive_error += read_mib(mp, 0x70);
	p->bad_crc_event += read_mib(mp, 0x74);
	p->collision += read_mib(mp, 0x78);
	p->late_collision += read_mib(mp, 0x7c);
881 882
}

883 884

/* ethtool ******************************************************************/
885
struct mv643xx_eth_stats {
886 887 888 889 890
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
	int stat_offset;
};

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
#define MV643XX_ETH_STAT(m) FIELD_SIZEOF(struct mv643xx_eth_private, m), \
					offsetof(struct mv643xx_eth_private, m)

static const struct mv643xx_eth_stats mv643xx_eth_gstrings_stats[] = {
	{ "rx_packets", MV643XX_ETH_STAT(stats.rx_packets) },
	{ "tx_packets", MV643XX_ETH_STAT(stats.tx_packets) },
	{ "rx_bytes", MV643XX_ETH_STAT(stats.rx_bytes) },
	{ "tx_bytes", MV643XX_ETH_STAT(stats.tx_bytes) },
	{ "rx_errors", MV643XX_ETH_STAT(stats.rx_errors) },
	{ "tx_errors", MV643XX_ETH_STAT(stats.tx_errors) },
	{ "rx_dropped", MV643XX_ETH_STAT(stats.rx_dropped) },
	{ "tx_dropped", MV643XX_ETH_STAT(stats.tx_dropped) },
	{ "good_octets_received", MV643XX_ETH_STAT(mib_counters.good_octets_received) },
	{ "bad_octets_received", MV643XX_ETH_STAT(mib_counters.bad_octets_received) },
	{ "internal_mac_transmit_err", MV643XX_ETH_STAT(mib_counters.internal_mac_transmit_err) },
	{ "good_frames_received", MV643XX_ETH_STAT(mib_counters.good_frames_received) },
	{ "bad_frames_received", MV643XX_ETH_STAT(mib_counters.bad_frames_received) },
	{ "broadcast_frames_received", MV643XX_ETH_STAT(mib_counters.broadcast_frames_received) },
	{ "multicast_frames_received", MV643XX_ETH_STAT(mib_counters.multicast_frames_received) },
	{ "frames_64_octets", MV643XX_ETH_STAT(mib_counters.frames_64_octets) },
	{ "frames_65_to_127_octets", MV643XX_ETH_STAT(mib_counters.frames_65_to_127_octets) },
	{ "frames_128_to_255_octets", MV643XX_ETH_STAT(mib_counters.frames_128_to_255_octets) },
	{ "frames_256_to_511_octets", MV643XX_ETH_STAT(mib_counters.frames_256_to_511_octets) },
	{ "frames_512_to_1023_octets", MV643XX_ETH_STAT(mib_counters.frames_512_to_1023_octets) },
	{ "frames_1024_to_max_octets", MV643XX_ETH_STAT(mib_counters.frames_1024_to_max_octets) },
	{ "good_octets_sent", MV643XX_ETH_STAT(mib_counters.good_octets_sent) },
	{ "good_frames_sent", MV643XX_ETH_STAT(mib_counters.good_frames_sent) },
	{ "excessive_collision", MV643XX_ETH_STAT(mib_counters.excessive_collision) },
	{ "multicast_frames_sent", MV643XX_ETH_STAT(mib_counters.multicast_frames_sent) },
	{ "broadcast_frames_sent", MV643XX_ETH_STAT(mib_counters.broadcast_frames_sent) },
	{ "unrec_mac_control_received", MV643XX_ETH_STAT(mib_counters.unrec_mac_control_received) },
	{ "fc_sent", MV643XX_ETH_STAT(mib_counters.fc_sent) },
	{ "good_fc_received", MV643XX_ETH_STAT(mib_counters.good_fc_received) },
	{ "bad_fc_received", MV643XX_ETH_STAT(mib_counters.bad_fc_received) },
	{ "undersize_received", MV643XX_ETH_STAT(mib_counters.undersize_received) },
	{ "fragments_received", MV643XX_ETH_STAT(mib_counters.fragments_received) },
	{ "oversize_received", MV643XX_ETH_STAT(mib_counters.oversize_received) },
	{ "jabber_received", MV643XX_ETH_STAT(mib_counters.jabber_received) },
	{ "mac_receive_error", MV643XX_ETH_STAT(mib_counters.mac_receive_error) },
	{ "bad_crc_event", MV643XX_ETH_STAT(mib_counters.bad_crc_event) },
	{ "collision", MV643XX_ETH_STAT(mib_counters.collision) },
	{ "late_collision", MV643XX_ETH_STAT(mib_counters.late_collision) },
933 934
};

935
#define MV643XX_ETH_STATS_LEN	ARRAY_SIZE(mv643xx_eth_gstrings_stats)
936

937
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
938
{
939
	struct mv643xx_eth_private *mp = netdev_priv(dev);
940 941 942 943 944 945 946 947 948 949 950 951 952
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

	/* The PHY may support 1000baseT_Half, but the mv643xx does not */
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

953
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
954
{
955
	struct mv643xx_eth_private *mp = netdev_priv(dev);
956 957
	int err;

958 959 960
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
961

962 963
	return err;
}
L
Linus Torvalds 已提交
964

965
static void mv643xx_eth_get_drvinfo(struct net_device *netdev,
966 967
				struct ethtool_drvinfo *drvinfo)
{
968 969
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
970 971
	strncpy(drvinfo->fw_version, "N/A", 32);
	strncpy(drvinfo->bus_info, "mv643xx", 32);
972
	drvinfo->n_stats = MV643XX_ETH_STATS_LEN;
973
}
L
Linus Torvalds 已提交
974

975 976
static int mv643xx_eth_nway_restart(struct net_device *dev)
{
977
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
978

979 980
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
981

982 983
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
984
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
985

986 987
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
988

989
static void mv643xx_eth_get_strings(struct net_device *netdev, uint32_t stringset,
990 991 992
				uint8_t *data)
{
	int i;
L
Linus Torvalds 已提交
993

994 995
	switch(stringset) {
	case ETH_SS_STATS:
996
		for (i=0; i < MV643XX_ETH_STATS_LEN; i++) {
997
			memcpy(data + i * ETH_GSTRING_LEN,
998 999
				mv643xx_eth_gstrings_stats[i].stat_string,
				ETH_GSTRING_LEN);
1000 1001 1002 1003
		}
		break;
	}
}
L
Linus Torvalds 已提交
1004

1005
static void mv643xx_eth_get_ethtool_stats(struct net_device *netdev,
1006 1007
				struct ethtool_stats *stats, uint64_t *data)
{
1008
	struct mv643xx_eth_private *mp = netdev->priv;
1009
	int i;
L
Linus Torvalds 已提交
1010

1011
	update_mib_counters(mp);
L
Linus Torvalds 已提交
1012

1013 1014 1015
	for (i = 0; i < MV643XX_ETH_STATS_LEN; i++) {
		char *p = (char *)mp+mv643xx_eth_gstrings_stats[i].stat_offset;
		data[i] = (mv643xx_eth_gstrings_stats[i].sizeof_stat ==
1016
			sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1017
	}
1018
}
L
Linus Torvalds 已提交
1019

1020
static int mv643xx_eth_get_sset_count(struct net_device *netdev, int sset)
1021 1022 1023
{
	switch (sset) {
	case ETH_SS_STATS:
1024
		return MV643XX_ETH_STATS_LEN;
1025 1026 1027 1028
	default:
		return -EOPNOTSUPP;
	}
}
L
Linus Torvalds 已提交
1029

1030 1031 1032 1033
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
	.get_settings           = mv643xx_eth_get_settings,
	.set_settings           = mv643xx_eth_set_settings,
	.get_drvinfo            = mv643xx_eth_get_drvinfo,
1034 1035
	.get_link               = mv643xx_eth_get_link,
	.set_sg			= ethtool_op_set_sg,
1036 1037 1038
	.get_sset_count		= mv643xx_eth_get_sset_count,
	.get_ethtool_stats      = mv643xx_eth_get_ethtool_stats,
	.get_strings            = mv643xx_eth_get_strings,
1039 1040
	.nway_reset		= mv643xx_eth_nway_restart,
};
L
Linus Torvalds 已提交
1041

1042

1043
/* address handling *********************************************************/
1044
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1045 1046 1047 1048
{
	unsigned int port_num = mp->port_num;
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1049

1050 1051
	mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
L
Linus Torvalds 已提交
1052

1053 1054 1055 1056 1057 1058
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1059
}
L
Linus Torvalds 已提交
1060

1061
static void init_mac_tables(struct mv643xx_eth_private *mp)
1062 1063 1064
{
	unsigned int port_num = mp->port_num;
	int table_index;
L
Linus Torvalds 已提交
1065

1066 1067
	/* Clear DA filter unicast table (Ex_dFUT) */
	for (table_index = 0; table_index <= 0xC; table_index += 4)
1068
		wrl(mp, UNICAST_TABLE(port_num) + table_index, 0);
L
Linus Torvalds 已提交
1069

1070 1071
	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
		/* Clear DA filter special multicast table (Ex_dFSMT) */
1072
		wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1073
		/* Clear DA filter other multicast table (Ex_dFOMT) */
1074
		wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
1075 1076
	}
}
1077

1078
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
1079 1080 1081 1082 1083
					    int table, unsigned char entry)
{
	unsigned int table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;
1084

1085 1086
	tbl_offset = (entry / 4) * 4;	/* Register offset of DA table entry */
	reg_offset = entry % 4;		/* Entry offset within the register */
1087

1088 1089 1090 1091
	/* Set "accepts frame bit" at specified table entry */
	table_reg = rdl(mp, table + tbl_offset);
	table_reg |= 0x01 << (8 * reg_offset);
	wrl(mp, table + tbl_offset, table_reg);
L
Linus Torvalds 已提交
1092 1093
}

1094
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1095
{
1096 1097 1098 1099
	unsigned int port_num = mp->port_num;
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1100

1101 1102 1103
	mac_l = (addr[4] << 8) | (addr[5]);
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
							(addr[3] << 0);
1104

1105 1106
	wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
L
Linus Torvalds 已提交
1107

1108
	/* Accept frames with this address */
1109
	table = UNICAST_TABLE(port_num);
1110
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1111 1112
}

1113
static void mv643xx_eth_update_mac_address(struct net_device *dev)
L
Linus Torvalds 已提交
1114
{
1115
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1116

1117 1118
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
1119
}
L
Linus Torvalds 已提交
1120

1121
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1122
{
1123
	int i;
L
Linus Torvalds 已提交
1124

1125 1126 1127 1128
	for (i = 0; i < 6; i++)
		/* +2 is for the offset of the HW addr type */
		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
	mv643xx_eth_update_mac_address(dev);
L
Linus Torvalds 已提交
1129 1130 1131
	return 0;
}

1132
static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1133 1134
{
	unsigned int port_num = mp->port_num;
1135 1136 1137 1138 1139 1140 1141
	unsigned int mac_h;
	unsigned int mac_l;
	unsigned char crc_result = 0;
	int table;
	int mac_array[48];
	int crc[8];
	int i;
L
Linus Torvalds 已提交
1142

1143 1144
	if ((addr[0] == 0x01) && (addr[1] == 0x00) &&
	    (addr[2] == 0x5E) && (addr[3] == 0x00) && (addr[4] == 0x00)) {
1145
		table = SPECIAL_MCAST_TABLE(port_num);
1146
		set_filter_table_entry(mp, table, addr[5]);
1147
		return;
L
Linus Torvalds 已提交
1148 1149
	}

1150
	/* Calculate CRC-8 out of the given address */
1151 1152 1153
	mac_h = (addr[0] << 8) | (addr[1]);
	mac_l = (addr[2] << 24) | (addr[3] << 16) |
			(addr[4] << 8) | (addr[5] << 0);
L
Linus Torvalds 已提交
1154

1155 1156 1157 1158
	for (i = 0; i < 32; i++)
		mac_array[i] = (mac_l >> i) & 0x1;
	for (i = 32; i < 48; i++)
		mac_array[i] = (mac_h >> (i - 32)) & 0x1;
L
Linus Torvalds 已提交
1159

1160 1161 1162 1163 1164
	crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
		 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
		 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
		 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
		 mac_array[8]  ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[0];
L
Linus Torvalds 已提交
1165

1166 1167 1168 1169 1170 1171 1172
	crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
		 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
		 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
		 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
		 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
		 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
		 mac_array[9]  ^ mac_array[6]  ^ mac_array[1]  ^ mac_array[0];
1173

1174 1175 1176 1177 1178 1179
	crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
		 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
		 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
		 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
		 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^
		 mac_array[6]  ^ mac_array[2]  ^ mac_array[1]  ^ mac_array[0];
1180

1181 1182 1183 1184 1185 1186
	crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
		 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
		 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
		 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
		 mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[7]  ^
		 mac_array[3]  ^ mac_array[2]  ^ mac_array[1];
1187

1188 1189 1190 1191 1192 1193
	crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
		 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
		 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
		 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
		 mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^ mac_array[4]  ^
		 mac_array[3]  ^ mac_array[2];
1194

1195 1196 1197 1198 1199 1200
	crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
		 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
		 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
		 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
		 mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[5]  ^
		 mac_array[4]  ^ mac_array[3];
1201

1202 1203 1204 1205 1206 1207
	crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
		 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
		 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
		 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
		 mac_array[12] ^ mac_array[10] ^ mac_array[6]  ^ mac_array[5]  ^
		 mac_array[4];
1208

1209 1210 1211 1212 1213
	crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
		 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
		 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
		 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
		 mac_array[11] ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[5];
1214

1215 1216 1217
	for (i = 0; i < 8; i++)
		crc_result = crc_result | (crc[i] << i);

1218
	table = OTHER_MCAST_TABLE(port_num);
1219
	set_filter_table_entry(mp, table, crc_result);
1220 1221
}

1222
static void set_multicast_list(struct net_device *dev)
L
Linus Torvalds 已提交
1223 1224
{

1225 1226 1227
	struct dev_mc_list	*mc_list;
	int			i;
	int			table_index;
1228
	struct mv643xx_eth_private	*mp = netdev_priv(dev);
1229
	unsigned int		port_num = mp->port_num;
1230

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	/* If the device is in promiscuous mode or in all multicast mode,
	 * we will fully populate both multicast tables with accept.
	 * This is guaranteed to yield a match on all multicast addresses...
	 */
	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
		for (table_index = 0; table_index <= 0xFC; table_index += 4) {
			/* Set all entries in DA filter special multicast
			 * table (Ex_dFSMT)
			 * Set for ETH_Q0 for now
			 * Bits
			 * 0	  Accept=1, Drop=0
			 * 3-1  Queue	 ETH_Q0=0
			 * 7-4  Reserved = 0;
			 */
1245
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0x01010101);
1246

1247 1248 1249 1250 1251 1252 1253 1254
			/* Set all entries in DA filter other multicast
			 * table (Ex_dFOMT)
			 * Set for ETH_Q0 for now
			 * Bits
			 * 0	  Accept=1, Drop=0
			 * 3-1  Queue	 ETH_Q0=0
			 * 7-4  Reserved = 0;
			 */
1255
			wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0x01010101);
1256 1257 1258
		}
		return;
	}
1259

1260 1261 1262 1263 1264
	/* We will clear out multicast tables every time we get the list.
	 * Then add the entire new list...
	 */
	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
		/* Clear DA filter special multicast table (Ex_dFSMT) */
1265
		wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1266 1267

		/* Clear DA filter other multicast table (Ex_dFOMT) */
1268
		wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
L
Linus Torvalds 已提交
1269 1270
	}

1271 1272 1273 1274 1275
	/* Get pointer to net_device multicast list and add each one... */
	for (i = 0, mc_list = dev->mc_list;
			(i < 256) && (mc_list != NULL) && (i < dev->mc_count);
			i++, mc_list = mc_list->next)
		if (mc_list->dmi_addrlen == 6)
1276
			mc_addr(mp, mc_list->dmi_addr);
1277 1278
}

1279
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1280
{
1281
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1282
	u32 config_reg;
L
Linus Torvalds 已提交
1283

1284
	config_reg = rdl(mp, PORT_CONFIG(mp->port_num));
1285
	if (dev->flags & IFF_PROMISC)
1286
		config_reg |= UNICAST_PROMISCUOUS_MODE;
1287
	else
1288
		config_reg &= ~UNICAST_PROMISCUOUS_MODE;
1289
	wrl(mp, PORT_CONFIG(mp->port_num), config_reg);
L
Linus Torvalds 已提交
1290

1291
	set_multicast_list(dev);
1292
}
1293 1294


1295
/* rx/tx queue initialisation ***********************************************/
1296
static void ether_init_rx_desc_ring(struct mv643xx_eth_private *mp)
1297
{
1298
	volatile struct rx_desc *p_rx_desc;
1299 1300 1301 1302
	int rx_desc_num = mp->rx_ring_size;
	int i;

	/* initialize the next_desc_ptr links in the Rx descriptors ring */
1303
	p_rx_desc = (struct rx_desc *)mp->rx_desc_area;
1304 1305
	for (i = 0; i < rx_desc_num; i++) {
		p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
1306
			((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1307 1308
	}

1309
	/* Save Rx desc pointer to driver struct. */
1310 1311
	mp->rx_curr_desc = 0;
	mp->rx_used_desc = 0;
L
Linus Torvalds 已提交
1312

1313
	mp->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1314
}
1315

1316 1317
static void mv643xx_eth_free_rx_rings(struct net_device *dev)
{
1318
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1319
	int curr;
1320

1321 1322 1323 1324 1325 1326 1327 1328
	/* Stop RX Queues */
	mv643xx_eth_port_disable_rx(mp);

	/* Free preallocated skb's on RX rings */
	for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
		if (mp->rx_skb[curr]) {
			dev_kfree_skb(mp->rx_skb[curr]);
			mp->rx_desc_count--;
L
Linus Torvalds 已提交
1329
		}
1330
	}
L
Linus Torvalds 已提交
1331

1332 1333 1334 1335 1336 1337 1338
	if (mp->rx_desc_count)
		printk(KERN_ERR
			"%s: Error in freeing Rx Ring. %d skb's still"
			" stuck in RX Ring - ignoring them\n", dev->name,
			mp->rx_desc_count);
	/* Free RX ring */
	if (mp->rx_sram_size)
1339
		iounmap(mp->rx_desc_area);
1340 1341
	else
		dma_free_coherent(NULL, mp->rx_desc_area_size,
1342
				mp->rx_desc_area, mp->rx_desc_dma);
1343
}
L
Linus Torvalds 已提交
1344

1345
static void ether_init_tx_desc_ring(struct mv643xx_eth_private *mp)
1346 1347
{
	int tx_desc_num = mp->tx_ring_size;
1348
	struct tx_desc *p_tx_desc;
1349
	int i;
L
Linus Torvalds 已提交
1350

1351
	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
1352
	p_tx_desc = (struct tx_desc *)mp->tx_desc_area;
1353 1354
	for (i = 0; i < tx_desc_num; i++) {
		p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
1355
			((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1356 1357
	}

1358 1359
	mp->tx_curr_desc = 0;
	mp->tx_used_desc = 0;
1360

1361
	mp->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1362
}
L
Linus Torvalds 已提交
1363

1364
static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
1365
{
1366
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1367
	struct tx_desc *desc;
1368 1369
	u32 cmd_sts;
	struct sk_buff *skb;
1370
	unsigned long flags;
1371 1372 1373 1374
	int tx_index;
	dma_addr_t addr;
	int count;
	int released = 0;
L
Linus Torvalds 已提交
1375

1376 1377
	while (mp->tx_desc_count > 0) {
		spin_lock_irqsave(&mp->lock, flags);
1378

1379 1380 1381 1382 1383
		/* tx_desc_count might have changed before acquiring the lock */
		if (mp->tx_desc_count <= 0) {
			spin_unlock_irqrestore(&mp->lock, flags);
			return released;
		}
1384

1385 1386
		tx_index = mp->tx_used_desc;
		desc = &mp->tx_desc_area[tx_index];
1387
		cmd_sts = desc->cmd_sts;
1388

1389
		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) {
1390 1391 1392
			spin_unlock_irqrestore(&mp->lock, flags);
			return released;
		}
L
Linus Torvalds 已提交
1393

1394
		mp->tx_used_desc = (tx_index + 1) % mp->tx_ring_size;
1395
		mp->tx_desc_count--;
L
Linus Torvalds 已提交
1396

1397 1398 1399 1400 1401
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
		skb = mp->tx_skb[tx_index];
		if (skb)
			mp->tx_skb[tx_index] = NULL;
1402

1403
		if (cmd_sts & ERROR_SUMMARY) {
1404 1405 1406
			printk("%s: Error in TX\n", dev->name);
			dev->stats.tx_errors++;
		}
L
Linus Torvalds 已提交
1407

1408
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1409

1410
		if (cmd_sts & TX_FIRST_DESC)
1411 1412 1413
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1414

1415 1416
		if (skb)
			dev_kfree_skb_irq(skb);
1417

1418 1419
		released = 1;
	}
1420

1421
	return released;
1422 1423
}

1424
static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
1425
{
1426
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1427

1428 1429 1430
	if (mv643xx_eth_free_tx_descs(dev, 0) &&
	    mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(dev);
1431 1432
}

1433
static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
L
Linus Torvalds 已提交
1434
{
1435 1436
	mv643xx_eth_free_tx_descs(dev, 1);
}
L
Linus Torvalds 已提交
1437

1438 1439
static void mv643xx_eth_free_tx_rings(struct net_device *dev)
{
1440
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1441

1442 1443
	/* Stop Tx Queues */
	mv643xx_eth_port_disable_tx(mp);
1444

1445 1446
	/* Free outstanding skb's on TX ring */
	mv643xx_eth_free_all_tx_descs(dev);
L
Linus Torvalds 已提交
1447

1448
	BUG_ON(mp->tx_used_desc != mp->tx_curr_desc);
L
Linus Torvalds 已提交
1449

1450 1451
	/* Free TX ring */
	if (mp->tx_sram_size)
1452
		iounmap(mp->tx_desc_area);
1453 1454
	else
		dma_free_coherent(NULL, mp->tx_desc_area_size,
1455
				mp->tx_desc_area, mp->tx_desc_dma);
1456
}
L
Linus Torvalds 已提交
1457 1458


1459
/* netdev ops and related ***************************************************/
1460
static void port_reset(struct mv643xx_eth_private *mp);
L
Linus Torvalds 已提交
1461

1462 1463 1464
static void mv643xx_eth_update_pscr(struct net_device *dev,
				    struct ethtool_cmd *ecmd)
{
1465
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1466 1467 1468
	int port_num = mp->port_num;
	u32 o_pscr, n_pscr;
	unsigned int queues;
L
Linus Torvalds 已提交
1469

1470
	o_pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1471
	n_pscr = o_pscr;
1472

1473 1474 1475 1476 1477
	/* clear speed, duplex and rx buffer size fields */
	n_pscr &= ~(SET_MII_SPEED_TO_100  |
		   SET_GMII_SPEED_TO_1000 |
		   SET_FULL_DUPLEX_MODE   |
		   MAX_RX_PACKET_MASK);
L
Linus Torvalds 已提交
1478

1479 1480
	if (ecmd->duplex == DUPLEX_FULL)
		n_pscr |= SET_FULL_DUPLEX_MODE;
L
Linus Torvalds 已提交
1481

1482 1483 1484 1485 1486 1487 1488 1489
	if (ecmd->speed == SPEED_1000)
		n_pscr |= SET_GMII_SPEED_TO_1000 |
			  MAX_RX_PACKET_9700BYTE;
	else {
		if (ecmd->speed == SPEED_100)
			n_pscr |= SET_MII_SPEED_TO_100;
		n_pscr |= MAX_RX_PACKET_1522BYTE;
	}
L
Linus Torvalds 已提交
1490

1491 1492
	if (n_pscr != o_pscr) {
		if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
1493
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
1494 1495
		else {
			queues = mv643xx_eth_port_disable_tx(mp);
L
Linus Torvalds 已提交
1496

1497
			o_pscr &= ~SERIAL_PORT_ENABLE;
1498 1499 1500
			wrl(mp, PORT_SERIAL_CONTROL(port_num), o_pscr);
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
			wrl(mp, PORT_SERIAL_CONTROL(port_num), n_pscr);
1501 1502 1503 1504 1505
			if (queues)
				mv643xx_eth_port_enable_tx(mp, queues);
		}
	}
}
1506

1507 1508 1509
static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
1510
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1511
	u32 int_cause, int_cause_ext = 0;
1512
	unsigned int port_num = mp->port_num;
1513

1514
	/* Read interrupt cause registers */
1515 1516 1517
	int_cause = rdl(mp, INT_CAUSE(port_num)) & (INT_RX | INT_EXT);
	if (int_cause & INT_EXT) {
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(port_num))
1518
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1519
		wrl(mp, INT_CAUSE_EXT(port_num), ~int_cause_ext);
1520
	}
L
Linus Torvalds 已提交
1521

1522
	/* PHY status changed */
1523
	if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) {
1524
		struct ethtool_cmd cmd;
L
Linus Torvalds 已提交
1525

1526 1527 1528
		if (mii_link_ok(&mp->mii)) {
			mii_ethtool_gset(&mp->mii, &cmd);
			mv643xx_eth_update_pscr(dev, &cmd);
1529
			mv643xx_eth_port_enable_tx(mp, 1);
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
			if (!netif_carrier_ok(dev)) {
				netif_carrier_on(dev);
				if (mp->tx_ring_size - mp->tx_desc_count >=
							MAX_DESCS_PER_SKB)
					netif_wake_queue(dev);
			}
		} else if (netif_carrier_ok(dev)) {
			netif_stop_queue(dev);
			netif_carrier_off(dev);
		}
	}
L
Linus Torvalds 已提交
1541

1542
#ifdef MV643XX_ETH_NAPI
1543
	if (int_cause & INT_RX) {
1544
		/* schedule the NAPI poll routine to maintain port */
1545
		wrl(mp, INT_MASK(port_num), 0x00000000);
L
Linus Torvalds 已提交
1546

1547
		/* wait for previous write to complete */
1548
		rdl(mp, INT_MASK(port_num));
L
Linus Torvalds 已提交
1549

1550
		netif_rx_schedule(dev, &mp->napi);
1551
	}
1552
#else
1553
	if (int_cause & INT_RX)
1554 1555
		mv643xx_eth_receive_queue(dev, INT_MAX);
#endif
1556
	if (int_cause_ext & INT_EXT_TX)
1557
		mv643xx_eth_free_completed_tx_descs(dev);
L
Linus Torvalds 已提交
1558

1559
	/*
1560 1561
	 * If no real interrupt occured, exit.
	 * This can happen when using gigE interrupt coalescing mechanism.
1562
	 */
1563
	if ((int_cause == 0x0) && (int_cause_ext == 0x0))
1564
		return IRQ_NONE;
L
Linus Torvalds 已提交
1565

1566
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1567 1568
}

1569
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1570
{
1571
	unsigned int phy_reg_data;
L
Linus Torvalds 已提交
1572

1573
	/* Reset the PHY */
1574
	read_smi_reg(mp, 0, &phy_reg_data);
1575
	phy_reg_data |= 0x8000;	/* Set bit 15 to reset the PHY */
1576
	write_smi_reg(mp, 0, phy_reg_data);
L
Linus Torvalds 已提交
1577

1578 1579 1580
	/* wait for PHY to come out of reset */
	do {
		udelay(1);
1581
		read_smi_reg(mp, 0, &phy_reg_data);
1582
	} while (phy_reg_data & 0x8000);
L
Linus Torvalds 已提交
1583 1584
}

1585
static void port_start(struct net_device *dev)
L
Linus Torvalds 已提交
1586
{
1587
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1588 1589
	unsigned int port_num = mp->port_num;
	int tx_curr_desc, rx_curr_desc;
1590 1591
	u32 pscr;
	struct ethtool_cmd ethtool_cmd;
L
Linus Torvalds 已提交
1592 1593

	/* Assignment of Tx CTRP of given queue */
1594
	tx_curr_desc = mp->tx_curr_desc;
1595
	wrl(mp, TXQ_CURRENT_DESC_PTR(port_num),
1596
		(u32)((struct tx_desc *)mp->tx_desc_dma + tx_curr_desc));
L
Linus Torvalds 已提交
1597 1598

	/* Assignment of Rx CRDP of given queue */
1599
	rx_curr_desc = mp->rx_curr_desc;
1600
	wrl(mp, RXQ_CURRENT_DESC_PTR(port_num),
1601
		(u32)((struct rx_desc *)mp->rx_desc_dma + rx_curr_desc));
L
Linus Torvalds 已提交
1602 1603

	/* Add the assigned Ethernet address to the port's address table */
1604
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1605

1606 1607 1608 1609 1610
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
	wrl(mp, PORT_CONFIG(port_num), 0x00000000);
1611

1612 1613 1614 1615
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
	wrl(mp, PORT_CONFIG_EXT(port_num), 0x00000000);
L
Linus Torvalds 已提交
1616

1617
	pscr = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1618

1619
	pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
1620
	wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
L
Linus Torvalds 已提交
1621

1622 1623
	pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
		DISABLE_AUTO_NEG_SPEED_GMII    |
1624
		DISABLE_AUTO_NEG_FOR_DUPLEX    |
1625 1626
		DO_NOT_FORCE_LINK_FAIL	   |
		SERIAL_PORT_CONTROL_RESERVED;
L
Linus Torvalds 已提交
1627

1628
	wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
L
Linus Torvalds 已提交
1629

1630
	pscr |= SERIAL_PORT_ENABLE;
1631
	wrl(mp, PORT_SERIAL_CONTROL(port_num), pscr);
L
Linus Torvalds 已提交
1632 1633

	/* Assign port SDMA configuration */
1634
	wrl(mp, SDMA_CONFIG(port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
L
Linus Torvalds 已提交
1635 1636

	/* Enable port Rx. */
1637
	mv643xx_eth_port_enable_rx(mp, 1);
1638 1639

	/* Disable port bandwidth limits by clearing MTU register */
1640
	wrl(mp, TX_BW_MTU(port_num), 0);
1641 1642

	/* save phy settings across reset */
1643
	mv643xx_eth_get_settings(dev, &ethtool_cmd);
1644
	phy_reset(mp);
1645
	mv643xx_eth_set_settings(dev, &ethtool_cmd);
L
Linus Torvalds 已提交
1646 1647
}

1648 1649
#ifdef MV643XX_ETH_COAL
static unsigned int set_rx_coal(struct mv643xx_eth_private *mp,
1650
					unsigned int delay)
L
Linus Torvalds 已提交
1651
{
1652
	unsigned int port_num = mp->port_num;
1653
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1654

1655
	/* Set RX Coalescing mechanism */
1656
	wrl(mp, SDMA_CONFIG(port_num),
1657
		((coal & 0x3fff) << 8) |
1658
		(rdl(mp, SDMA_CONFIG(port_num))
1659
			& 0xffc000ff));
L
Linus Torvalds 已提交
1660

1661
	return coal;
L
Linus Torvalds 已提交
1662
}
1663
#endif
L
Linus Torvalds 已提交
1664

1665
static unsigned int set_tx_coal(struct mv643xx_eth_private *mp,
1666
					unsigned int delay)
L
Linus Torvalds 已提交
1667
{
1668
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1669

1670
	/* Set TX Coalescing mechanism */
1671
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), coal << 4);
L
Linus Torvalds 已提交
1672

1673
	return coal;
L
Linus Torvalds 已提交
1674 1675
}

1676
static void port_init(struct mv643xx_eth_private *mp)
1677
{
1678
	port_reset(mp);
1679

1680
	init_mac_tables(mp);
1681 1682
}

1683
static int mv643xx_eth_open(struct net_device *dev)
1684
{
1685
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1686
	unsigned int port_num = mp->port_num;
1687 1688
	unsigned int size;
	int err;
1689

1690
	/* Clear any pending ethernet port interrupts */
1691 1692
	wrl(mp, INT_CAUSE(port_num), 0);
	wrl(mp, INT_CAUSE_EXT(port_num), 0);
1693
	/* wait for previous write to complete */
1694
	rdl(mp, INT_CAUSE_EXT(port_num));
1695 1696 1697 1698 1699 1700

	err = request_irq(dev->irq, mv643xx_eth_int_handler,
			IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
	if (err) {
		printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
		return -EAGAIN;
1701 1702
	}

1703
	port_init(mp);
1704

1705 1706 1707
	memset(&mp->timeout, 0, sizeof(struct timer_list));
	mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
	mp->timeout.data = (unsigned long)dev;
1708

1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
	/* Allocate RX and TX skb rings */
	mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
								GFP_KERNEL);
	if (!mp->rx_skb) {
		printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
		err = -ENOMEM;
		goto out_free_irq;
	}
	mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
								GFP_KERNEL);
	if (!mp->tx_skb) {
		printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
		err = -ENOMEM;
		goto out_free_rx_skb;
	}
1724

1725 1726
	/* Allocate TX ring */
	mp->tx_desc_count = 0;
1727
	size = mp->tx_ring_size * sizeof(struct tx_desc);
1728
	mp->tx_desc_area_size = size;
1729

1730
	if (mp->tx_sram_size) {
1731
		mp->tx_desc_area = ioremap(mp->tx_sram_addr,
1732 1733 1734
							mp->tx_sram_size);
		mp->tx_desc_dma = mp->tx_sram_addr;
	} else
1735
		mp->tx_desc_area = dma_alloc_coherent(NULL, size,
1736 1737
							&mp->tx_desc_dma,
							GFP_KERNEL);
1738

1739
	if (!mp->tx_desc_area) {
1740 1741 1742 1743 1744
		printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
							dev->name, size);
		err = -ENOMEM;
		goto out_free_tx_skb;
	}
1745 1746
	BUG_ON((u32) mp->tx_desc_area & 0xf);	/* check 16-byte alignment */
	memset((void *)mp->tx_desc_area, 0, mp->tx_desc_area_size);
1747

1748
	ether_init_tx_desc_ring(mp);
1749

1750 1751
	/* Allocate RX ring */
	mp->rx_desc_count = 0;
1752
	size = mp->rx_ring_size * sizeof(struct rx_desc);
1753
	mp->rx_desc_area_size = size;
1754

1755
	if (mp->rx_sram_size) {
1756
		mp->rx_desc_area = ioremap(mp->rx_sram_addr,
1757 1758 1759
							mp->rx_sram_size);
		mp->rx_desc_dma = mp->rx_sram_addr;
	} else
1760
		mp->rx_desc_area = dma_alloc_coherent(NULL, size,
1761 1762
							&mp->rx_desc_dma,
							GFP_KERNEL);
1763

1764
	if (!mp->rx_desc_area) {
1765 1766 1767 1768 1769
		printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
							dev->name, size);
		printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
							dev->name);
		if (mp->rx_sram_size)
1770
			iounmap(mp->tx_desc_area);
1771 1772
		else
			dma_free_coherent(NULL, mp->tx_desc_area_size,
1773
					mp->tx_desc_area, mp->tx_desc_dma);
1774 1775 1776
		err = -ENOMEM;
		goto out_free_tx_skb;
	}
1777
	memset((void *)mp->rx_desc_area, 0, size);
1778

1779
	ether_init_rx_desc_ring(mp);
1780

1781
	mv643xx_eth_rx_refill_descs(dev);	/* Fill RX ring with skb's */
1782

1783
#ifdef MV643XX_ETH_NAPI
1784 1785
	napi_enable(&mp->napi);
#endif
1786

1787
	port_start(dev);
1788

1789
	/* Interrupt Coalescing */
1790

1791 1792
#ifdef MV643XX_ETH_COAL
	mp->rx_int_coal = set_rx_coal(mp, MV643XX_ETH_RX_COAL);
1793 1794
#endif

1795
	mp->tx_int_coal = set_tx_coal(mp, MV643XX_ETH_TX_COAL);
1796

1797
	/* Unmask phy and link status changes interrupts */
1798
	wrl(mp, INT_MASK_EXT(port_num), INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1799

1800
	/* Unmask RX buffer and TX end interrupt */
1801
	wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
1802

1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	return 0;

out_free_tx_skb:
	kfree(mp->tx_skb);
out_free_rx_skb:
	kfree(mp->rx_skb);
out_free_irq:
	free_irq(dev->irq, dev);

	return err;
1813 1814
}

1815
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1816
{
1817
	unsigned int port_num = mp->port_num;
1818
	unsigned int reg_data;
L
Linus Torvalds 已提交
1819

1820 1821
	mv643xx_eth_port_disable_tx(mp);
	mv643xx_eth_port_disable_rx(mp);
L
Linus Torvalds 已提交
1822

1823
	/* Clear all MIB counters */
1824
	clear_mib_counters(mp);
1825 1826

	/* Reset the Enable bit in the Configuration Register */
1827
	reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num));
1828 1829 1830
	reg_data &= ~(SERIAL_PORT_ENABLE		|
			DO_NOT_FORCE_LINK_FAIL	|
			FORCE_LINK_PASS);
1831
	wrl(mp, PORT_SERIAL_CONTROL(port_num), reg_data);
L
Linus Torvalds 已提交
1832 1833
}

1834
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
1835
{
1836
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1837
	unsigned int port_num = mp->port_num;
L
Linus Torvalds 已提交
1838

1839
	/* Mask all interrupts on ethernet port */
1840
	wrl(mp, INT_MASK(port_num), 0x00000000);
1841
	/* wait for previous write to complete */
1842
	rdl(mp, INT_MASK(port_num));
L
Linus Torvalds 已提交
1843

1844
#ifdef MV643XX_ETH_NAPI
1845 1846 1847 1848
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
1849

1850
	port_reset(mp);
L
Linus Torvalds 已提交
1851

1852 1853
	mv643xx_eth_free_tx_rings(dev);
	mv643xx_eth_free_rx_rings(dev);
L
Linus Torvalds 已提交
1854

1855
	free_irq(dev->irq, dev);
L
Linus Torvalds 已提交
1856

1857
	return 0;
L
Linus Torvalds 已提交
1858 1859
}

1860
static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
1861
{
1862
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1863

1864
	return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
L
Linus Torvalds 已提交
1865 1866
}

1867
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
1868
{
1869 1870
	if ((new_mtu > 9500) || (new_mtu < 64))
		return -EINVAL;
L
Linus Torvalds 已提交
1871

1872 1873 1874
	dev->mtu = new_mtu;
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
1875

1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
	 * due to memory being full, which might fail the open function.
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
		printk(KERN_ERR "%s: Fatal error on opening device\n",
			dev->name);
	}

	return 0;
L
Linus Torvalds 已提交
1889 1890
}

1891
static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
1892
{
1893
	struct mv643xx_eth_private *mp = container_of(ugly, struct mv643xx_eth_private,
1894 1895
						  tx_timeout_task);
	struct net_device *dev = mp->dev;
L
Linus Torvalds 已提交
1896

1897 1898
	if (!netif_running(dev))
		return;
L
Linus Torvalds 已提交
1899

1900 1901
	netif_stop_queue(dev);

1902 1903
	port_reset(mp);
	port_start(dev);
1904 1905 1906 1907 1908 1909

	if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(dev);
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
1910
{
1911
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1912

1913
	printk(KERN_INFO "%s: TX timeout  ", dev->name);
1914

1915 1916
	/* Do the reset outside of interrupt context */
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
1917 1918
}

1919
#ifdef CONFIG_NET_POLL_CONTROLLER
1920
static void mv643xx_eth_netpoll(struct net_device *netdev)
1921
{
1922
	struct mv643xx_eth_private *mp = netdev_priv(netdev);
1923 1924
	int port_num = mp->port_num;

1925
	wrl(mp, INT_MASK(port_num), 0x00000000);
1926
	/* wait for previous write to complete */
1927
	rdl(mp, INT_MASK(port_num));
1928 1929 1930

	mv643xx_eth_int_handler(netdev->irq, netdev);

1931
	wrl(mp, INT_MASK(port_num), INT_RX | INT_CAUSE_EXT);
1932
}
1933
#endif
1934

1935
static int mv643xx_eth_mdio_read(struct net_device *dev, int phy_id, int location)
1936
{
1937
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1938 1939
	int val;

1940
	read_smi_reg(mp, location, &val);
1941
	return val;
1942 1943
}

1944
static void mv643xx_eth_mdio_write(struct net_device *dev, int phy_id, int location, int val)
1945
{
1946
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1947
	write_smi_reg(mp, location, val);
1948
}
1949 1950


1951
/* platform glue ************************************************************/
1952 1953 1954
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
1955
{
1956
	void __iomem *base = msp->base;
1957 1958 1959
	u32 win_enable;
	u32 win_protect;
	int i;
1960

1961 1962 1963 1964 1965
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
1966 1967
	}

1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
1985 1986
}

1987
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1988
{
1989
	static int mv643xx_eth_version_printed = 0;
1990
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
1991
	struct mv643xx_eth_shared_private *msp;
1992 1993
	struct resource *res;
	int ret;
1994

1995
	if (!mv643xx_eth_version_printed++)
1996
		printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1997

1998 1999 2000 2001
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2002

2003 2004 2005 2006 2007 2008
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2009 2010
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
		goto out_free;

	spin_lock_init(&msp->phy_lock);
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;

	platform_set_drvdata(pdev, msp);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2034
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2035

2036
	iounmap(msp->base);
2037 2038 2039
	kfree(msp);

	return 0;
2040 2041
}

2042 2043 2044 2045 2046 2047 2048 2049 2050
static struct platform_driver mv643xx_eth_shared_driver = {
	.probe = mv643xx_eth_shared_probe,
	.remove = mv643xx_eth_shared_remove,
	.driver = {
		.name = MV643XX_ETH_SHARED_NAME,
		.owner	= THIS_MODULE,
	},
};

2051
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2052
{
2053 2054
	u32 reg_data;
	int addr_shift = 5 * mp->port_num;
L
Linus Torvalds 已提交
2055

2056
	reg_data = rdl(mp, PHY_ADDR);
2057 2058
	reg_data &= ~(0x1f << addr_shift);
	reg_data |= (phy_addr & 0x1f) << addr_shift;
2059
	wrl(mp, PHY_ADDR, reg_data);
L
Linus Torvalds 已提交
2060 2061
}

2062
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2063
{
2064
	unsigned int reg_data;
L
Linus Torvalds 已提交
2065

2066
	reg_data = rdl(mp, PHY_ADDR);
L
Linus Torvalds 已提交
2067

2068
	return ((reg_data >> (5 * mp->port_num)) & 0x1f);
L
Linus Torvalds 已提交
2069 2070
}

2071
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2072
{
2073 2074
	unsigned int phy_reg_data0;
	int auto_neg;
L
Linus Torvalds 已提交
2075

2076
	read_smi_reg(mp, 0, &phy_reg_data0);
2077 2078
	auto_neg = phy_reg_data0 & 0x1000;
	phy_reg_data0 ^= 0x1000;	/* invert auto_neg */
2079
	write_smi_reg(mp, 0, phy_reg_data0);
L
Linus Torvalds 已提交
2080

2081
	read_smi_reg(mp, 0, &phy_reg_data0);
2082 2083
	if ((phy_reg_data0 & 0x1000) == auto_neg)
		return -ENODEV;				/* change didn't take */
L
Linus Torvalds 已提交
2084

2085
	phy_reg_data0 ^= 0x1000;
2086
	write_smi_reg(mp, 0, phy_reg_data0);
2087
	return 0;
L
Linus Torvalds 已提交
2088 2089
}

2090 2091 2092
static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
				     int speed, int duplex,
				     struct ethtool_cmd *cmd)
2093
{
2094
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2095

2096
	memset(cmd, 0, sizeof(*cmd));
2097

2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	cmd->port = PORT_MII;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->phy_address = phy_address;

	if (speed == 0) {
		cmd->autoneg = AUTONEG_ENABLE;
		/* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
		cmd->speed = SPEED_100;
		cmd->advertising = ADVERTISED_10baseT_Half  |
				   ADVERTISED_10baseT_Full  |
				   ADVERTISED_100baseT_Half |
				   ADVERTISED_100baseT_Full;
		if (mp->mii.supports_gmii)
			cmd->advertising |= ADVERTISED_1000baseT_Full;
	} else {
		cmd->autoneg = AUTONEG_DISABLE;
		cmd->speed = speed;
		cmd->duplex = duplex;
	}
2117 2118
}

2119
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2120
{
2121 2122
	struct mv643xx_eth_platform_data *pd;
	int port_num;
2123
	struct mv643xx_eth_private *mp;
2124 2125 2126 2127 2128 2129 2130 2131
	struct net_device *dev;
	u8 *p;
	struct resource *res;
	int err;
	struct ethtool_cmd cmd;
	int duplex = DUPLEX_HALF;
	int speed = 0;			/* default to auto-negotiation */
	DECLARE_MAC_BUF(mac);
L
Linus Torvalds 已提交
2132

2133 2134 2135 2136 2137
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
		printk(KERN_ERR "No mv643xx_eth_platform_data\n");
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2138

2139 2140 2141 2142
	if (pd->shared == NULL) {
		printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n");
		return -ENODEV;
	}
2143

2144
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2145 2146
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2147

2148
	platform_set_drvdata(pdev, dev);
L
Linus Torvalds 已提交
2149

2150 2151
	mp = netdev_priv(dev);
	mp->dev = dev;
2152 2153
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2154
#endif
L
Linus Torvalds 已提交
2155

2156 2157 2158
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2159

2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->hard_start_xmit = mv643xx_eth_start_xmit;
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;

	/* No need to Tx Timeout */
	dev->tx_timeout = mv643xx_eth_tx_timeout;

#ifdef CONFIG_NET_POLL_CONTROLLER
2170
	dev->poll_controller = mv643xx_eth_netpoll;
2171 2172 2173 2174 2175 2176
#endif

	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
	dev->change_mtu = mv643xx_eth_change_mtu;
	dev->do_ioctl = mv643xx_eth_do_ioctl;
2177
	SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
L
Linus Torvalds 已提交
2178

2179
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2180
#ifdef MAX_SKB_FRAGS
2181
	/*
2182 2183
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2184
	 */
2185 2186 2187
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
#endif
L
Linus Torvalds 已提交
2188

2189 2190
	/* Configure the timeout task */
	INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
L
Linus Torvalds 已提交
2191

2192
	spin_lock_init(&mp->lock);
L
Linus Torvalds 已提交
2193

2194 2195
	mp->shared = platform_get_drvdata(pd->shared);
	port_num = mp->port_num = pd->port_number;
2196

2197 2198
	if (mp->shared->win_protect)
		wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2199

2200 2201 2202 2203 2204
	mp->shared_smi = mp->shared;
	if (pd->shared_smi != NULL)
		mp->shared_smi = platform_get_drvdata(pd->shared_smi);

	/* set default config values */
2205 2206 2207
	uc_addr_get(mp, dev->dev_addr);
	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2208 2209 2210 2211 2212

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);

	if (pd->phy_addr || pd->force_phy_addr)
2213
		phy_addr_set(mp, pd->phy_addr);
2214

2215 2216
	if (pd->rx_queue_size)
		mp->rx_ring_size = pd->rx_queue_size;
L
Linus Torvalds 已提交
2217

2218 2219
	if (pd->tx_queue_size)
		mp->tx_ring_size = pd->tx_queue_size;
L
Linus Torvalds 已提交
2220

2221 2222 2223 2224
	if (pd->tx_sram_size) {
		mp->tx_sram_size = pd->tx_sram_size;
		mp->tx_sram_addr = pd->tx_sram_addr;
	}
L
Linus Torvalds 已提交
2225

2226 2227 2228 2229
	if (pd->rx_sram_size) {
		mp->rx_sram_size = pd->rx_sram_size;
		mp->rx_sram_addr = pd->rx_sram_addr;
	}
L
Linus Torvalds 已提交
2230

2231 2232
	duplex = pd->duplex;
	speed = pd->speed;
L
Linus Torvalds 已提交
2233

2234 2235
	/* Hook up MII support for ethtool */
	mp->mii.dev = dev;
2236 2237
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2238
	mp->mii.phy_id = phy_addr_get(mp);
2239 2240
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
L
Linus Torvalds 已提交
2241

2242
	err = phy_detect(mp);
2243 2244
	if (err) {
		pr_debug("%s: No PHY detected at addr %d\n",
2245
				dev->name, phy_addr_get(mp));
2246 2247
		goto out;
	}
L
Linus Torvalds 已提交
2248

2249
	phy_reset(mp);
2250 2251 2252
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
	mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
	mv643xx_eth_update_pscr(dev, &cmd);
2253
	mv643xx_eth_set_settings(dev, &cmd);
2254

2255 2256 2257 2258
	SET_NETDEV_DEV(dev, &pdev->dev);
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2259

2260 2261 2262 2263
	p = dev->dev_addr;
	printk(KERN_NOTICE
		"%s: port %d with MAC address %s\n",
		dev->name, port_num, print_mac(mac, p));
L
Linus Torvalds 已提交
2264

2265 2266
	if (dev->features & NETIF_F_SG)
		printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
L
Linus Torvalds 已提交
2267

2268 2269 2270
	if (dev->features & NETIF_F_IP_CSUM)
		printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
								dev->name);
L
Linus Torvalds 已提交
2271

2272
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2273 2274
	printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
#endif
L
Linus Torvalds 已提交
2275

2276
#ifdef MV643XX_ETH_COAL
2277 2278 2279
	printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
								dev->name);
#endif
L
Linus Torvalds 已提交
2280

2281
#ifdef MV643XX_ETH_NAPI
2282 2283
	printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
#endif
L
Linus Torvalds 已提交
2284

2285 2286
	if (mp->tx_sram_size > 0)
		printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
L
Linus Torvalds 已提交
2287

2288
	return 0;
L
Linus Torvalds 已提交
2289

2290 2291
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2292

2293
	return err;
L
Linus Torvalds 已提交
2294 2295
}

2296
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2297
{
2298
	struct net_device *dev = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2299

2300 2301 2302 2303 2304 2305
	unregister_netdev(dev);
	flush_scheduled_work();

	free_netdev(dev);
	platform_set_drvdata(pdev, NULL);
	return 0;
L
Linus Torvalds 已提交
2306 2307
}

2308
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2309
{
2310
	struct net_device *dev = platform_get_drvdata(pdev);
2311
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2312
	unsigned int port_num = mp->port_num;
2313

2314
	/* Mask all interrupts on ethernet port */
2315 2316
	wrl(mp, INT_MASK(port_num), 0);
	rdl(mp, INT_MASK(port_num));
2317

2318
	port_reset(mp);
2319 2320
}

2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
static struct platform_driver mv643xx_eth_driver = {
	.probe = mv643xx_eth_probe,
	.remove = mv643xx_eth_remove,
	.shutdown = mv643xx_eth_shutdown,
	.driver = {
		.name = MV643XX_ETH_NAME,
		.owner	= THIS_MODULE,
	},
};

2331
static int __init mv643xx_eth_init_module(void)
2332
{
2333
	int rc;
2334

2335 2336 2337 2338 2339 2340 2341
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
	return rc;
2342 2343
}

2344
static void __exit mv643xx_eth_cleanup_module(void)
2345
{
2346 2347
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2348 2349
}

2350 2351
module_init(mv643xx_eth_init_module);
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2352

2353 2354 2355 2356 2357 2358
MODULE_LICENSE("GPL");
MODULE_AUTHOR(	"Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
		" and Dale Farnsworth");
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);