mv643xx_eth.c 56.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57 58
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.0";
59

60 61 62
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
63

64
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65 66 67 68 69 70 71 72
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

/*
 * Registers shared between all ports.
 */
73 74 75 76 77 78 79
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
80 81 82 83

/*
 * Per-port registers.
 */
84
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
85
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
86 87 88 89 90 91
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
92
#define  TX_FIFO_EMPTY			0x00000400
93
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
94 95
#define TXQ_FIX_PRIO_CONF(p)		(0x044c + ((p) << 10))
#define TX_BW_RATE(p)			(0x0450 + ((p) << 10))
96
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
97
#define TX_BW_BURST(p)			(0x045c + ((p) << 10))
98
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
99 100
#define  INT_RX				0x00000804
#define  INT_EXT			0x00000002
101
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
102 103 104 105 106
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
#define  INT_EXT_TX			0x00000101
107 108 109 110 111 112
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
#define RXQ_CURRENT_DESC_PTR(p)		(0x060c + ((p) << 10))
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
#define TXQ_CURRENT_DESC_PTR(p)		(0x06c0 + ((p) << 10))
113 114 115
#define TXQ_BW_TOKENS(p)		(0x0700 + ((p) << 10))
#define TXQ_BW_CONF(p)			(0x0704 + ((p) << 10))
#define TXQ_BW_WRR_CONF(p)		(0x0708 + ((p) << 10))
116 117 118 119
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
120

121 122 123 124

/*
 * SDMA configuration register.
 */
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
#define RX_BURST_SIZE_4_64BIT		(2 << 1)
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
#define TX_BURST_SIZE_4_64BIT		(2 << 22)

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		TX_BURST_SIZE_4_64BIT
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
		TX_BURST_SIZE_4_64BIT
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

144 145 146 147 148 149 150

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
151 152 153
#define MAX_RX_PACKET_1522BYTE			(1 << 17)
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
#define MAX_RX_PACKET_MASK			(7 << 17)
154 155 156 157 158 159 160
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
161

162 163
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
164 165


166 167
/*
 * RX/TX descriptors.
168 169
 */
#if defined(__BIG_ENDIAN)
170
struct rx_desc {
171 172 173 174 175 176 177
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

178
struct tx_desc {
179 180 181 182 183 184 185
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
186
struct rx_desc {
187 188 189 190 191 192 193
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

194
struct tx_desc {
195 196 197 198 199 200 201 202 203 204
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

205
/* RX & TX descriptor command */
206
#define BUFFER_OWNED_BY_DMA		0x80000000
207 208

/* RX & TX descriptor status */
209
#define ERROR_SUMMARY			0x00000001
210 211

/* RX descriptor status */
212 213 214 215
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
216 217

/* TX descriptor command */
218 219 220 221 222 223 224 225
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
226

227
#define TX_IHL_SHIFT			11
228 229


230
/* global *******************************************************************/
231
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
232 233 234
	/*
	 * Ethernet controller base address.
	 */
235
	void __iomem *base;
236

L
Lennert Buytenhek 已提交
237 238 239
	/*
	 * Protects access to SMI_REG, which is shared between ports.
	 */
240 241
	spinlock_t phy_lock;

L
Lennert Buytenhek 已提交
242 243 244
	/*
	 * Per-port MBUS window access register value.
	 */
245 246
	u32 win_protect;

L
Lennert Buytenhek 已提交
247 248 249
	/*
	 * Hardware-specific parameters.
	 */
250 251 252 253 254
	unsigned int t_clk;
};


/* per-port *****************************************************************/
255
struct mib_counters {
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
struct rx_queue {
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

	struct timer_list rx_oom;
};

303 304
struct tx_queue {
	int tx_ring_size;
305

306 307 308
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
309

310
	struct tx_desc *tx_desc_area;
311 312 313
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
314 315 316 317
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
L
Lennert Buytenhek 已提交
318
	int port_num;
319

L
Lennert Buytenhek 已提交
320
	struct net_device *dev;
321

L
Lennert Buytenhek 已提交
322 323
	struct mv643xx_eth_shared_private *shared_smi;
	int phy_addr;
324 325 326

	spinlock_t lock;

L
Lennert Buytenhek 已提交
327 328
	struct mib_counters mib_counters;
	struct work_struct tx_timeout_task;
329
	struct mii_if_info mii;
330 331 332 333 334 335 336 337 338

	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
	struct napi_struct napi;
	struct rx_queue rxq[1];
339 340 341 342 343 344 345 346 347 348 349

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
	struct tx_queue txq[1];
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
350
};
L
Linus Torvalds 已提交
351

352

353
/* port register accessors **************************************************/
354
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
355
{
356
	return readl(mp->shared->base + offset);
357
}
358

359
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
360
{
361
	writel(data, mp->shared->base + offset);
362
}
363 364


365
/* rxq/txq helper functions *************************************************/
366
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
367
{
368
	return container_of(rxq, struct mv643xx_eth_private, rxq[0]);
369
}
370

371 372 373 374 375
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
	return container_of(txq, struct mv643xx_eth_private, txq[0]);
}

376
static void rxq_enable(struct rx_queue *rxq)
377
{
378 379 380
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	wrl(mp, RXQ_COMMAND(mp->port_num), 1);
}
L
Linus Torvalds 已提交
381

382 383 384 385
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	u8 mask = 1;
L
Linus Torvalds 已提交
386

387 388 389
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
390 391
}

392
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
393
{
394 395
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	wrl(mp, TXQ_COMMAND(mp->port_num), 1);
L
Linus Torvalds 已提交
396 397
}

398
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
399
{
400 401
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	u8 mask = 1;
402

403 404 405 406 407 408 409 410 411 412 413
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(mp->dev);
L
Linus Torvalds 已提交
414 415
}

416 417

/* rx ***********************************************************************/
418
static void txq_reclaim(struct tx_queue *txq, int force);
419

420
static void rxq_refill(struct rx_queue *rxq)
L
Linus Torvalds 已提交
421
{
422
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
423
	unsigned long flags;
L
Linus Torvalds 已提交
424

425
	spin_lock_irqsave(&mp->lock, flags);
426

427 428
	while (rxq->rx_desc_count < rxq->rx_ring_size) {
		int skb_size;
429 430 431 432
		struct sk_buff *skb;
		int unaligned;
		int rx;

433 434 435 436 437 438 439 440 441 442
		/*
		 * Reserve 2+14 bytes for an ethernet header (the
		 * hardware automatically prepends 2 bytes of dummy
		 * data to each received packet), 4 bytes for a VLAN
		 * header, and 4 bytes for the trailing FCS -- 24
		 * bytes total.
		 */
		skb_size = mp->dev->mtu + 24;

		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
443
		if (skb == NULL)
L
Linus Torvalds 已提交
444
			break;
445

R
Ralf Baechle 已提交
446
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
447
		if (unaligned)
R
Ralf Baechle 已提交
448
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
449

450 451 452
		rxq->rx_desc_count++;
		rx = rxq->rx_used_desc;
		rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
453

454 455 456 457
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
						skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = skb_size;
		rxq->rx_skb[rx] = skb;
458
		wmb();
459
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
460 461 462
						RX_ENABLE_INTERRUPT;
		wmb();

L
Lennert Buytenhek 已提交
463 464 465 466 467 468
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
L
Linus Torvalds 已提交
469
	}
470

471 472 473
	if (rxq->rx_desc_count == 0) {
		rxq->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&rxq->rx_oom);
L
Linus Torvalds 已提交
474
	}
475 476

	spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
477 478
}

479
static inline void rxq_refill_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
480
{
481
	rxq_refill((struct rx_queue *)data);
L
Linus Torvalds 已提交
482 483
}

484
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
485
{
486 487 488
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
489

490 491
	rx = 0;
	while (rx < budget) {
L
Lennert Buytenhek 已提交
492
		struct rx_desc *rx_desc;
493
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
494
		struct sk_buff *skb;
495
		unsigned long flags;
496

497
		spin_lock_irqsave(&mp->lock, flags);
498

499
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
500

501 502 503 504 505 506
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
507

508 509
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
510

511
		rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
512

513
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
514

L
Lennert Buytenhek 已提交
515 516
		dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
				 mp->dev->mtu + 24, DMA_FROM_DEVICE);
517 518
		rxq->rx_desc_count--;
		rx++;
519

520 521
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
522 523 524 525 526
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
527
		 */
L
Linus Torvalds 已提交
528
		stats->rx_packets++;
L
Lennert Buytenhek 已提交
529
		stats->rx_bytes += rx_desc->byte_cnt - 2;
530

L
Linus Torvalds 已提交
531
		/*
L
Lennert Buytenhek 已提交
532 533 534
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
535
		 */
536
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
537
					(RX_FIRST_DESC | RX_LAST_DESC))
538
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
539
			stats->rx_dropped++;
L
Lennert Buytenhek 已提交
540

541
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
542
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
543
				if (net_ratelimit())
L
Lennert Buytenhek 已提交
544 545 546
					dev_printk(KERN_ERR, &mp->dev->dev,
						   "received packet spanning "
						   "multiple descriptors\n");
L
Linus Torvalds 已提交
547
			}
L
Lennert Buytenhek 已提交
548

549
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
550 551 552 553 554 555 556 557
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
L
Lennert Buytenhek 已提交
558
			skb_put(skb, rx_desc->byte_cnt - 2 - 4);
L
Linus Torvalds 已提交
559

560
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
561 562
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
563
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
564
			}
565
			skb->protocol = eth_type_trans(skb, mp->dev);
566
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
567 568 569 570 571
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
L
Lennert Buytenhek 已提交
572

573
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
574
	}
L
Lennert Buytenhek 已提交
575

576
	rxq_refill(rxq);
L
Linus Torvalds 已提交
577

578
	return rx;
L
Linus Torvalds 已提交
579 580
}

581 582
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
583
{
584 585 586 587
	struct mv643xx_eth_private *mp;
	int rx;

	mp = container_of(napi, struct mv643xx_eth_private, napi);
588

589
#ifdef MV643XX_ETH_TX_FAST_REFILL
590
	if (++mp->tx_clean_threshold > 5) {
591
		txq_reclaim(mp->txq, 0);
592
		mp->tx_clean_threshold = 0;
593
	}
594
#endif
595

596
	rx = rxq_process(mp->rxq, budget);
597

598 599 600 601 602
	if (rx < budget) {
		netif_rx_complete(mp->dev, napi);
		wrl(mp, INT_CAUSE(mp->port_num), 0);
		wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
		wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
603
	}
604

605
	return rx;
606
}
607
#endif
608

609 610 611

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
612
{
613
	int frag;
L
Linus Torvalds 已提交
614

615
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
616 617
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
618
			return 1;
L
Linus Torvalds 已提交
619
	}
620

621 622
	return 0;
}
623

624
static int txq_alloc_desc_index(struct tx_queue *txq)
625 626
{
	int tx_desc_curr;
627

628
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
629

630 631
	tx_desc_curr = txq->tx_curr_desc;
	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
632

633
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
634

635 636
	return tx_desc_curr;
}
637

638
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
639
{
640
	int nr_frags = skb_shinfo(skb)->nr_frags;
641
	int frag;
L
Linus Torvalds 已提交
642

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
			txq->tx_skb[tx_index] = skb;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}

666 667 668 669 670 671 672
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
673 674
}

675 676 677 678
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
679

680
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
681
{
682
	int nr_frags = skb_shinfo(skb)->nr_frags;
683
	int tx_index;
684
	struct tx_desc *desc;
685 686
	u32 cmd_sts;
	int length;
L
Linus Torvalds 已提交
687

688
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
689

690 691
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];
692 693

	if (nr_frags) {
694
		txq_submit_frag_skb(txq, skb);
695 696

		length = skb_headlen(skb);
697
		txq->tx_skb[tx_index] = NULL;
698
	} else {
699
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
700
		length = skb->len;
701
		txq->tx_skb[tx_index] = skb;
702 703 704 705 706 707 708 709
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		BUG_ON(skb->protocol != htons(ETH_P_IP));

710 711 712
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
713 714 715

		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
716
			cmd_sts |= UDP_FRAME;
717 718 719 720 721 722 723 724 725 726
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
727
		cmd_sts |= 5 << TX_IHL_SHIFT;
728 729 730 731 732 733 734 735 736
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

	/* ensure all descriptors are written before poking hardware */
	wmb();
737
	txq_enable(txq);
738

739
	txq->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
740 741
}

L
Lennert Buytenhek 已提交
742
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
743
{
744
	struct mv643xx_eth_private *mp = netdev_priv(dev);
745
	struct net_device_stats *stats = &dev->stats;
746
	struct tx_queue *txq;
747
	unsigned long flags;
748

749
	BUG_ON(netif_queue_stopped(dev));
750

751 752
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
L
Lennert Buytenhek 已提交
753 754 755
		dev_printk(KERN_DEBUG, &dev->dev,
			   "failed to linearize skb with tiny "
			   "unaligned fragment\n");
756 757 758 759 760
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

761 762 763
	txq = mp->txq;

	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
764 765 766 767 768 769
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&mp->lock, flags);
		return NETDEV_TX_BUSY;
	}

770
	txq_submit_skb(txq, skb);
771 772 773 774
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

775
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB)
776 777 778 779 780
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
781 782
}

783

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

	wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
	wrl(mp, TX_BW_MTU(mp->port_num), mtu);
	wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

	wrl(mp, TXQ_BW_TOKENS(mp->port_num), token_rate << 14);
	wrl(mp, TXQ_BW_CONF(mp->port_num),
			(bucket_size << 10) | token_rate);
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
	off = TXQ_FIX_PRIO_CONF(mp->port_num);

	val = rdl(mp, off);
	val |= 1;
	wrl(mp, off, val);
}

static void txq_set_wrr(struct tx_queue *txq, int weight)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn off fixed priority mode.
	 */
	off = TXQ_FIX_PRIO_CONF(mp->port_num);

	val = rdl(mp, off);
	val &= ~1;
	wrl(mp, off, val);

	/*
	 * Configure WRR weight for this queue.
	 */
	off = TXQ_BW_WRR_CONF(mp->port_num);

	val = rdl(mp, off);
	val = (val & ~0xff) | (weight & 0xff);
	wrl(mp, off, val);
}


873
/* mii management interface *************************************************/
L
Lennert Buytenhek 已提交
874 875 876 877
#define SMI_BUSY		0x10000000
#define SMI_READ_VALID		0x08000000
#define SMI_OPCODE_READ		0x04000000
#define SMI_OPCODE_WRITE	0x00000000
878

L
Lennert Buytenhek 已提交
879 880
static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
			 unsigned int reg, unsigned int *value)
L
Linus Torvalds 已提交
881
{
882
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
883
	unsigned long flags;
L
Linus Torvalds 已提交
884 885
	int i;

886 887 888 889
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
890
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
891
		if (i == 1000) {
892 893 894
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
895
		udelay(10);
L
Linus Torvalds 已提交
896 897
	}

L
Lennert Buytenhek 已提交
898
	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
L
Linus Torvalds 已提交
899

900
	/* now wait for the data to be valid */
901
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
902
		if (i == 1000) {
903 904 905
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
906
		udelay(10);
907 908 909 910 911
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
912 913
}

L
Lennert Buytenhek 已提交
914 915 916
static void smi_reg_write(struct mv643xx_eth_private *mp,
			  unsigned int addr,
			  unsigned int reg, unsigned int value)
L
Linus Torvalds 已提交
917
{
918
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
919
	unsigned long flags;
L
Linus Torvalds 已提交
920 921
	int i;

922 923 924 925
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
926
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
927
		if (i == 1000) {
928 929 930
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
931
		udelay(10);
L
Linus Torvalds 已提交
932 933
	}

L
Lennert Buytenhek 已提交
934 935
	writel(SMI_OPCODE_WRITE | (reg << 21) |
		(addr << 16) | (value & 0xffff), smi_reg);
936 937 938
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
939

940 941

/* mib counters *************************************************************/
L
Lennert Buytenhek 已提交
942
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
943
{
L
Lennert Buytenhek 已提交
944
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
945 946
}

L
Lennert Buytenhek 已提交
947
static void mib_counters_clear(struct mv643xx_eth_private *mp)
948
{
L
Lennert Buytenhek 已提交
949 950 951 952
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
953
}
954

L
Lennert Buytenhek 已提交
955
static void mib_counters_update(struct mv643xx_eth_private *mp)
956
{
957
	struct mib_counters *p = &mp->mib_counters;
958

L
Lennert Buytenhek 已提交
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
	p->good_octets_received += mib_read(mp, 0x00);
	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
991 992
}

993 994

/* ethtool ******************************************************************/
995
struct mv643xx_eth_stats {
996 997
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
998 999
	int netdev_off;
	int mp_off;
1000 1001
};

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1049 1050
};

1051
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1052
{
1053
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1054 1055 1056 1057 1058 1059
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

L
Lennert Buytenhek 已提交
1060 1061 1062
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1063 1064 1065 1066 1067 1068
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

1069
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1070
{
1071
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1072 1073
	int err;

L
Lennert Buytenhek 已提交
1074 1075 1076 1077 1078
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1079 1080 1081
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
1082

1083 1084
	return err;
}
L
Linus Torvalds 已提交
1085

L
Lennert Buytenhek 已提交
1086 1087
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1088
{
1089 1090
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1091
	strncpy(drvinfo->fw_version, "N/A", 32);
L
Lennert Buytenhek 已提交
1092
	strncpy(drvinfo->bus_info, "platform", 32);
1093
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1094
}
L
Linus Torvalds 已提交
1095

L
Lennert Buytenhek 已提交
1096
static int mv643xx_eth_nway_reset(struct net_device *dev)
1097
{
1098
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1099

1100 1101
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
1102

1103 1104
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1105
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1106

1107 1108
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
1109

L
Lennert Buytenhek 已提交
1110 1111
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1112 1113
{
	int i;
L
Linus Torvalds 已提交
1114

L
Lennert Buytenhek 已提交
1115 1116
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1117
			memcpy(data + i * ETH_GSTRING_LEN,
1118
				mv643xx_eth_stats[i].stat_string,
1119
				ETH_GSTRING_LEN);
1120 1121 1122
		}
	}
}
L
Linus Torvalds 已提交
1123

L
Lennert Buytenhek 已提交
1124 1125 1126
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1127
{
L
Lennert Buytenhek 已提交
1128
	struct mv643xx_eth_private *mp = dev->priv;
1129
	int i;
L
Linus Torvalds 已提交
1130

L
Lennert Buytenhek 已提交
1131
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1132

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1146
	}
1147
}
L
Linus Torvalds 已提交
1148

L
Lennert Buytenhek 已提交
1149
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1150
{
L
Lennert Buytenhek 已提交
1151
	if (sset == ETH_SS_STATS)
1152
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1153 1154

	return -EOPNOTSUPP;
1155
}
L
Linus Torvalds 已提交
1156

1157
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1158 1159 1160 1161 1162
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
	.get_link		= mv643xx_eth_get_link,
1163
	.set_sg			= ethtool_op_set_sg,
L
Lennert Buytenhek 已提交
1164 1165
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1166
	.get_sset_count		= mv643xx_eth_get_sset_count,
1167
};
L
Linus Torvalds 已提交
1168

1169

1170
/* address handling *********************************************************/
1171
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1172 1173 1174
{
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1175

L
Lennert Buytenhek 已提交
1176 1177
	mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
L
Linus Torvalds 已提交
1178

1179 1180 1181 1182 1183 1184
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1185
}
L
Linus Torvalds 已提交
1186

1187
static void init_mac_tables(struct mv643xx_eth_private *mp)
1188
{
L
Lennert Buytenhek 已提交
1189
	int i;
L
Linus Torvalds 已提交
1190

L
Lennert Buytenhek 已提交
1191 1192 1193
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1194
	}
L
Lennert Buytenhek 已提交
1195 1196 1197

	for (i = 0; i < 0x10; i += 4)
		wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1198
}
1199

1200
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1201
				   int table, unsigned char entry)
1202 1203
{
	unsigned int table_reg;
1204

1205
	/* Set "accepts frame bit" at specified table entry */
L
Lennert Buytenhek 已提交
1206 1207 1208
	table_reg = rdl(mp, table + (entry & 0xfc));
	table_reg |= 0x01 << (8 * (entry & 3));
	wrl(mp, table + (entry & 0xfc), table_reg);
L
Linus Torvalds 已提交
1209 1210
}

1211
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1212
{
1213 1214 1215
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1216

L
Lennert Buytenhek 已提交
1217 1218
	mac_l = (addr[4] << 8) | addr[5];
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1219

L
Lennert Buytenhek 已提交
1220 1221
	wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
L
Linus Torvalds 已提交
1222

L
Lennert Buytenhek 已提交
1223
	table = UNICAST_TABLE(mp->port_num);
1224
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1225 1226
}

L
Lennert Buytenhek 已提交
1227
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1228
{
1229
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1230

L
Lennert Buytenhek 已提交
1231 1232 1233
	/* +2 is for the offset of the HW addr type */
	memcpy(dev->dev_addr, addr + 2, 6);

1234 1235
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1236 1237 1238 1239

	return 0;
}

1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

L
Lennert Buytenhek 已提交
1258
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
L
Linus Torvalds 已提交
1259
{
L
Lennert Buytenhek 已提交
1260 1261 1262 1263
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_config;
	struct dev_addr_list *addr;
	int i;
1264

L
Lennert Buytenhek 已提交
1265 1266 1267 1268 1269 1270
	port_config = rdl(mp, PORT_CONFIG(mp->port_num));
	if (dev->flags & IFF_PROMISC)
		port_config |= UNICAST_PROMISCUOUS_MODE;
	else
		port_config &= ~UNICAST_PROMISCUOUS_MODE;
	wrl(mp, PORT_CONFIG(mp->port_num), port_config);
L
Linus Torvalds 已提交
1271

L
Lennert Buytenhek 已提交
1272 1273 1274
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		int port_num = mp->port_num;
		u32 accept = 0x01010101;
1275

L
Lennert Buytenhek 已提交
1276 1277 1278
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1279 1280 1281
		}
		return;
	}
1282

L
Lennert Buytenhek 已提交
1283 1284 1285
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
L
Linus Torvalds 已提交
1286 1287
	}

L
Lennert Buytenhek 已提交
1288 1289 1290
	for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
		u8 *a = addr->da_addr;
		int table;
1291

L
Lennert Buytenhek 已提交
1292 1293
		if (addr->da_addrlen != 6)
			continue;
L
Linus Torvalds 已提交
1294

L
Lennert Buytenhek 已提交
1295 1296 1297 1298 1299
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
			table = SPECIAL_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, a[5]);
		} else {
			int crc = addr_crc(a);
L
Linus Torvalds 已提交
1300

L
Lennert Buytenhek 已提交
1301 1302 1303 1304
			table = OTHER_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, crc);
		}
	}
1305
}
1306 1307


1308
/* rx/tx queue initialisation ***********************************************/
1309
static int rxq_init(struct mv643xx_eth_private *mp)
1310
{
1311 1312 1313
	struct rx_queue *rxq = mp->rxq;
	struct rx_desc *rx_desc;
	int size;
1314 1315
	int i;

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

	if (size <= mp->rx_desc_sram_size) {
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1332 1333
	}

1334 1335 1336 1337 1338 1339
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1340

1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
		int nexti = (i + 1) % rxq->rx_ring_size;
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	init_timer(&rxq->rx_oom);
	rxq->rx_oom.data = (unsigned long)rxq;
	rxq->rx_oom.function = rxq_refill_timer_wrapper;

	return 0;


out_free:
	if (size <= mp->rx_desc_sram_size)
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1374
}
1375

1376
static void rxq_deinit(struct rx_queue *rxq)
1377
{
1378 1379 1380 1381
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1382

1383
	del_timer_sync(&rxq->rx_oom);
1384

1385 1386 1387 1388
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1389
		}
1390
	}
L
Linus Torvalds 已提交
1391

1392 1393 1394 1395 1396 1397 1398 1399
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

	if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
		iounmap(rxq->rx_desc_area);
1400
	else
1401 1402 1403 1404
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1405
}
L
Linus Torvalds 已提交
1406

1407
static int txq_init(struct mv643xx_eth_private *mp)
1408
{
1409 1410 1411
	struct tx_queue *txq = mp->txq;
	struct tx_desc *tx_desc;
	int size;
1412
	int i;
L
Linus Torvalds 已提交
1413

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

	if (size <= mp->tx_desc_sram_size) {
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
1436
	}
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
		int nexti = (i + 1) % txq->tx_ring_size;
		tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
					nexti * sizeof(struct tx_desc);
	}

	return 0;

1457

1458 1459 1460 1461 1462 1463 1464
out_free:
	if (size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);
1465

1466 1467
out:
	return -ENOMEM;
1468
}
L
Linus Torvalds 已提交
1469

1470
static void txq_reclaim(struct tx_queue *txq, int force)
1471
{
1472
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1473
	unsigned long flags;
L
Linus Torvalds 已提交
1474

1475 1476 1477 1478 1479 1480 1481 1482
	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;
		dma_addr_t addr;
		int count;
1483

1484 1485
		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
1486
		cmd_sts = desc->cmd_sts;
1487

1488 1489
		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
			break;
L
Linus Torvalds 已提交
1490

1491 1492
		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
		txq->tx_desc_count--;
L
Linus Torvalds 已提交
1493

1494 1495
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
1496 1497
		skb = txq->tx_skb[tx_index];
		txq->tx_skb[tx_index] = NULL;
1498

1499
		if (cmd_sts & ERROR_SUMMARY) {
1500 1501
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
1502
		}
L
Linus Torvalds 已提交
1503

1504 1505 1506
		/*
		 * Drop mp->lock while we free the skb.
		 */
1507
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1508

1509
		if (cmd_sts & TX_FIRST_DESC)
1510 1511 1512
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1513

1514 1515
		if (skb)
			dev_kfree_skb_irq(skb);
1516

1517
		spin_lock_irqsave(&mp->lock, flags);
1518
	}
1519
	spin_unlock_irqrestore(&mp->lock, flags);
1520
}
L
Linus Torvalds 已提交
1521

1522
static void txq_deinit(struct tx_queue *txq)
1523
{
1524
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1525

1526 1527
	txq_disable(txq);
	txq_reclaim(txq, 1);
L
Linus Torvalds 已提交
1528

1529
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1530

1531 1532
	if (txq->tx_desc_area_size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
1533
	else
1534 1535 1536 1537
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
1538
}
L
Linus Torvalds 已提交
1539 1540


1541
/* netdev ops and related ***************************************************/
L
Lennert Buytenhek 已提交
1542
static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
1543
{
1544 1545
	u32 pscr_o;
	u32 pscr_n;
L
Linus Torvalds 已提交
1546

1547
	pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1548

1549
	/* clear speed, duplex and rx buffer size fields */
1550 1551 1552 1553
	pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100   |
			    SET_GMII_SPEED_TO_1000 |
			    SET_FULL_DUPLEX_MODE   |
			    MAX_RX_PACKET_MASK);
L
Linus Torvalds 已提交
1554

L
Lennert Buytenhek 已提交
1555
	if (speed == SPEED_1000) {
1556 1557
		pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
	} else {
L
Lennert Buytenhek 已提交
1558
		if (speed == SPEED_100)
1559 1560
			pscr_n |= SET_MII_SPEED_TO_100;
		pscr_n |= MAX_RX_PACKET_1522BYTE;
1561
	}
L
Linus Torvalds 已提交
1562

L
Lennert Buytenhek 已提交
1563
	if (duplex == DUPLEX_FULL)
1564 1565 1566 1567 1568
		pscr_n |= SET_FULL_DUPLEX_MODE;

	if (pscr_n != pscr_o) {
		if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
1569
		else {
1570 1571 1572 1573 1574 1575
			txq_disable(mp->txq);
			pscr_o &= ~SERIAL_PORT_ENABLE;
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			txq_enable(mp->txq);
1576 1577 1578
		}
	}
}
1579

L
Lennert Buytenhek 已提交
1580
static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1581 1582
{
	struct net_device *dev = (struct net_device *)dev_id;
1583
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1584 1585
	u32 int_cause;
	u32 int_cause_ext;
1586

1587
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT);
L
Lennert Buytenhek 已提交
1588 1589 1590 1591
	if (int_cause == 0)
		return IRQ_NONE;

	int_cause_ext = 0;
1592
	if (int_cause & INT_EXT) {
1593
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1594
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1595
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1596
	}
L
Linus Torvalds 已提交
1597

L
Lennert Buytenhek 已提交
1598
	if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
1599
		if (mii_link_ok(&mp->mii)) {
1600 1601
			struct ethtool_cmd cmd;

1602
			mii_ethtool_gset(&mp->mii, &cmd);
L
Lennert Buytenhek 已提交
1603
			update_pscr(mp, cmd.speed, cmd.duplex);
1604
			txq_enable(mp->txq);
1605 1606
			if (!netif_carrier_ok(dev)) {
				netif_carrier_on(dev);
1607
				__txq_maybe_wake(mp->txq);
1608 1609 1610 1611 1612 1613
			}
		} else if (netif_carrier_ok(dev)) {
			netif_stop_queue(dev);
			netif_carrier_off(dev);
		}
	}
L
Linus Torvalds 已提交
1614

1615
#ifdef MV643XX_ETH_NAPI
1616
	if (int_cause & INT_RX) {
1617 1618
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
		rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1619

1620
		netif_rx_schedule(dev, &mp->napi);
1621
	}
1622
#else
1623
	if (int_cause & INT_RX)
1624
		rxq_process(mp->rxq, INT_MAX);
1625
#endif
L
Lennert Buytenhek 已提交
1626

1627 1628 1629 1630
	if (int_cause_ext & INT_EXT_TX) {
		txq_reclaim(mp->txq, 0);
		__txq_maybe_wake(mp->txq);
	}
L
Linus Torvalds 已提交
1631

1632
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1633 1634
}

1635
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1636
{
L
Lennert Buytenhek 已提交
1637
	unsigned int data;
L
Linus Torvalds 已提交
1638

L
Lennert Buytenhek 已提交
1639 1640 1641
	smi_reg_read(mp, mp->phy_addr, 0, &data);
	data |= 0x8000;
	smi_reg_write(mp, mp->phy_addr, 0, data);
L
Linus Torvalds 已提交
1642

1643 1644
	do {
		udelay(1);
L
Lennert Buytenhek 已提交
1645 1646
		smi_reg_read(mp, mp->phy_addr, 0, &data);
	} while (data & 0x8000);
L
Linus Torvalds 已提交
1647 1648
}

L
Lennert Buytenhek 已提交
1649
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1650
{
1651 1652
	u32 pscr;
	struct ethtool_cmd ethtool_cmd;
1653
	int i;
L
Linus Torvalds 已提交
1654

1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
		DISABLE_AUTO_NEG_SPEED_GMII    |
		DISABLE_AUTO_NEG_FOR_DUPLEX    |
		DO_NOT_FORCE_LINK_FAIL	       |
		SERIAL_PORT_CONTROL_RESERVED;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
L
Linus Torvalds 已提交
1669

1670 1671
	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

L
Lennert Buytenhek 已提交
1672
	mv643xx_eth_get_settings(mp->dev, &ethtool_cmd);
1673
	phy_reset(mp);
L
Lennert Buytenhek 已提交
1674
	mv643xx_eth_set_settings(mp->dev, &ethtool_cmd);
L
Linus Torvalds 已提交
1675

1676 1677 1678
	/*
	 * Configure TX path and queues.
	 */
1679
	tx_set_rate(mp, 1000000000, 16777216);
1680 1681 1682 1683 1684 1685 1686 1687
	for (i = 0; i < 1; i++) {
		struct tx_queue *txq = mp->txq;
		int off = TXQ_CURRENT_DESC_PTR(mp->port_num);
		u32 addr;

		addr = (u32)txq->tx_desc_dma;
		addr += txq->tx_curr_desc * sizeof(struct tx_desc);
		wrl(mp, off, addr);
1688 1689 1690

		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
1691 1692
	}

L
Lennert Buytenhek 已提交
1693 1694 1695 1696
	/*
	 * Add configured unicast address to address filter table.
	 */
	uc_addr_set(mp, mp->dev->dev_addr);
L
Linus Torvalds 已提交
1697

1698 1699 1700 1701
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
1702
	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1703

1704 1705 1706
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1707
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1708

1709 1710 1711 1712 1713 1714 1715
	/*
	 * Enable the receive queue.
	 */
	for (i = 0; i < 1; i++) {
		struct rx_queue *rxq = mp->rxq;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num);
		u32 addr;
L
Linus Torvalds 已提交
1716

1717 1718 1719
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
1720

1721 1722
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
1723 1724
}

1725
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1726
{
1727
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1728

L
Lennert Buytenhek 已提交
1729 1730 1731 1732
	if (coal > 0x3fff)
		coal = 0x3fff;

	wrl(mp, SDMA_CONFIG(mp->port_num),
1733
		((coal & 0x3fff) << 8) |
L
Lennert Buytenhek 已提交
1734
		(rdl(mp, SDMA_CONFIG(mp->port_num))
1735
			& 0xffc000ff));
L
Linus Torvalds 已提交
1736 1737
}

1738
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1739
{
1740
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1741

L
Lennert Buytenhek 已提交
1742 1743 1744
	if (coal > 0x3fff)
		coal = 0x3fff;
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1745 1746
}

1747
static int mv643xx_eth_open(struct net_device *dev)
1748
{
1749
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1750
	int err;
1751

L
Lennert Buytenhek 已提交
1752 1753 1754
	wrl(mp, INT_CAUSE(mp->port_num), 0);
	wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
	rdl(mp, INT_CAUSE_EXT(mp->port_num));
1755

L
Lennert Buytenhek 已提交
1756 1757 1758
	err = request_irq(dev->irq, mv643xx_eth_irq,
			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
			  dev->name, dev);
1759
	if (err) {
L
Lennert Buytenhek 已提交
1760
		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1761
		return -EAGAIN;
1762 1763
	}

L
Lennert Buytenhek 已提交
1764
	init_mac_tables(mp);
1765

1766 1767
	err = rxq_init(mp);
	if (err)
L
Lennert Buytenhek 已提交
1768
		goto out;
1769 1770
	rxq_refill(mp->rxq);

1771 1772
	err = txq_init(mp);
	if (err)
L
Lennert Buytenhek 已提交
1773
		goto out_free;
1774

1775
#ifdef MV643XX_ETH_NAPI
1776 1777
	napi_enable(&mp->napi);
#endif
1778

L
Lennert Buytenhek 已提交
1779
	port_start(mp);
1780

1781 1782
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
1783

L
Lennert Buytenhek 已提交
1784 1785
	wrl(mp, INT_MASK_EXT(mp->port_num),
	    INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1786

L
Lennert Buytenhek 已提交
1787
	wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
1788

1789 1790
	return 0;

1791

L
Lennert Buytenhek 已提交
1792
out_free:
1793
	rxq_deinit(mp->rxq);
L
Lennert Buytenhek 已提交
1794
out:
1795 1796 1797
	free_irq(dev->irq, dev);

	return err;
1798 1799
}

1800
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1801
{
L
Lennert Buytenhek 已提交
1802
	unsigned int data;
L
Linus Torvalds 已提交
1803

1804
	txq_disable(mp->txq);
1805
	rxq_disable(mp->rxq);
1806 1807
	while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
		udelay(10);
L
Linus Torvalds 已提交
1808

1809
	/* Reset the Enable bit in the Configuration Register */
L
Lennert Buytenhek 已提交
1810 1811 1812 1813 1814
	data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
L
Linus Torvalds 已提交
1815 1816
}

1817
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
1818
{
1819
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1820

L
Lennert Buytenhek 已提交
1821 1822
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1823

1824
#ifdef MV643XX_ETH_NAPI
1825 1826 1827 1828
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
1829

L
Lennert Buytenhek 已提交
1830 1831
	free_irq(dev->irq, dev);

1832
	port_reset(mp);
L
Lennert Buytenhek 已提交
1833
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1834

1835
	txq_deinit(mp->txq);
1836
	rxq_deinit(mp->rxq);
L
Linus Torvalds 已提交
1837

1838
	return 0;
L
Linus Torvalds 已提交
1839 1840
}

L
Lennert Buytenhek 已提交
1841
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
1842
{
1843
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1844

1845
	return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
L
Linus Torvalds 已提交
1846 1847
}

1848
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
1849
{
1850 1851
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
1852
	if (new_mtu < 64 || new_mtu > 9500)
1853
		return -EINVAL;
L
Linus Torvalds 已提交
1854

1855
	dev->mtu = new_mtu;
1856 1857
	tx_set_rate(mp, 1000000000, 16777216);

1858 1859
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
1860

1861 1862 1863 1864
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
1865
	 * due to memory being full.
1866 1867 1868
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
L
Lennert Buytenhek 已提交
1869 1870 1871
		dev_printk(KERN_ERR, &dev->dev,
			   "fatal error on re-opening device after "
			   "MTU change\n");
1872 1873 1874
	}

	return 0;
L
Linus Torvalds 已提交
1875 1876
}

L
Lennert Buytenhek 已提交
1877
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
1878
{
L
Lennert Buytenhek 已提交
1879
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
1880

L
Lennert Buytenhek 已提交
1881 1882 1883
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
		netif_stop_queue(mp->dev);
1884

L
Lennert Buytenhek 已提交
1885 1886
		port_reset(mp);
		port_start(mp);
1887

L
Lennert Buytenhek 已提交
1888 1889
		__txq_maybe_wake(mp->txq);
	}
1890 1891 1892
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
1893
{
1894
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1895

L
Lennert Buytenhek 已提交
1896
	dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
1897

1898
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
1899 1900
}

1901
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
1902
static void mv643xx_eth_netpoll(struct net_device *dev)
1903
{
L
Lennert Buytenhek 已提交
1904
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1905

L
Lennert Buytenhek 已提交
1906 1907
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
1908

L
Lennert Buytenhek 已提交
1909
	mv643xx_eth_irq(dev->irq, dev);
1910

L
Lennert Buytenhek 已提交
1911
	wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_CAUSE_EXT);
1912
}
1913
#endif
1914

L
Lennert Buytenhek 已提交
1915
static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
1916
{
1917
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1918 1919
	int val;

L
Lennert Buytenhek 已提交
1920 1921
	smi_reg_read(mp, addr, reg, &val);

1922
	return val;
1923 1924
}

L
Lennert Buytenhek 已提交
1925
static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
1926
{
1927
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1928
	smi_reg_write(mp, addr, reg, val);
1929
}
1930 1931


1932
/* platform glue ************************************************************/
1933 1934 1935
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
1936
{
1937
	void __iomem *base = msp->base;
1938 1939 1940
	u32 win_enable;
	u32 win_protect;
	int i;
1941

1942 1943 1944 1945 1946
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
1947 1948
	}

1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
1966 1967
}

1968
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1969
{
1970
	static int mv643xx_eth_version_printed = 0;
1971
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
1972
	struct mv643xx_eth_shared_private *msp;
1973 1974
	struct resource *res;
	int ret;
1975

1976
	if (!mv643xx_eth_version_printed++)
1977
		printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1978

1979 1980 1981 1982
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
1983

1984 1985 1986 1987 1988 1989
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

1990 1991
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
		goto out_free;

	spin_lock_init(&msp->phy_lock);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

L
Lennert Buytenhek 已提交
2002 2003 2004 2005 2006 2007 2008
	/*
	 * Detect hardware parameters.
	 */
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;

	platform_set_drvdata(pdev, msp);

2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2019
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2020

2021
	iounmap(msp->base);
2022 2023 2024
	kfree(msp);

	return 0;
2025 2026
}

2027
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2028 2029
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2030
	.driver = {
L
Lennert Buytenhek 已提交
2031
		.name	= MV643XX_ETH_SHARED_NAME,
2032 2033 2034 2035
		.owner	= THIS_MODULE,
	},
};

2036
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2037
{
2038
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2039
	u32 data;
L
Linus Torvalds 已提交
2040

L
Lennert Buytenhek 已提交
2041 2042 2043 2044
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2045 2046
}

2047
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2048
{
L
Lennert Buytenhek 已提交
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
	else
		uc_addr_get(mp, dev->dev_addr);

	if (pd->phy_addr == -1) {
		mp->shared_smi = NULL;
		mp->phy_addr = -1;
	} else {
		mp->shared_smi = mp->shared;
		if (pd->shared_smi != NULL)
			mp->shared_smi = platform_get_drvdata(pd->shared_smi);

		if (pd->force_phy_addr || pd->phy_addr) {
			mp->phy_addr = pd->phy_addr & 0x3f;
			phy_addr_set(mp, mp->phy_addr);
		} else {
			mp->phy_addr = phy_addr_get(mp);
		}
	}
L
Linus Torvalds 已提交
2081

L
Lennert Buytenhek 已提交
2082 2083 2084 2085 2086
	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2087

L
Lennert Buytenhek 已提交
2088 2089 2090 2091 2092
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
		mp->default_tx_ring_size = pd->tx_queue_size;
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
L
Linus Torvalds 已提交
2093 2094
}

2095
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2096
{
L
Lennert Buytenhek 已提交
2097 2098 2099 2100 2101
	unsigned int data;
	unsigned int data2;

	smi_reg_read(mp, mp->phy_addr, 0, &data);
	smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000);
L
Linus Torvalds 已提交
2102

L
Lennert Buytenhek 已提交
2103 2104 2105
	smi_reg_read(mp, mp->phy_addr, 0, &data2);
	if (((data ^ data2) & 0x1000) == 0)
		return -ENODEV;
L
Linus Torvalds 已提交
2106

L
Lennert Buytenhek 已提交
2107
	smi_reg_write(mp, mp->phy_addr, 0, data);
L
Linus Torvalds 已提交
2108

2109
	return 0;
L
Linus Torvalds 已提交
2110 2111
}

L
Lennert Buytenhek 已提交
2112 2113
static int phy_init(struct mv643xx_eth_private *mp,
		    struct mv643xx_eth_platform_data *pd)
2114
{
L
Lennert Buytenhek 已提交
2115 2116
	struct ethtool_cmd cmd;
	int err;
2117

L
Lennert Buytenhek 已提交
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
	err = phy_detect(mp);
	if (err) {
		dev_printk(KERN_INFO, &mp->dev->dev,
			   "no PHY detected at addr %d\n", mp->phy_addr);
		return err;
	}
	phy_reset(mp);

	mp->mii.phy_id = mp->phy_addr;
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
	mp->mii.dev = mp->dev;
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2132

L
Lennert Buytenhek 已提交
2133
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2134

L
Lennert Buytenhek 已提交
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
	memset(&cmd, 0, sizeof(cmd));

	cmd.port = PORT_MII;
	cmd.transceiver = XCVR_INTERNAL;
	cmd.phy_address = mp->phy_addr;
	if (pd->speed == 0) {
		cmd.autoneg = AUTONEG_ENABLE;
		cmd.speed = SPEED_100;
		cmd.advertising = ADVERTISED_10baseT_Half  |
				  ADVERTISED_10baseT_Full  |
				  ADVERTISED_100baseT_Half |
				  ADVERTISED_100baseT_Full;
2147
		if (mp->mii.supports_gmii)
L
Lennert Buytenhek 已提交
2148
			cmd.advertising |= ADVERTISED_1000baseT_Full;
2149
	} else {
L
Lennert Buytenhek 已提交
2150 2151 2152
		cmd.autoneg = AUTONEG_DISABLE;
		cmd.speed = pd->speed;
		cmd.duplex = pd->duplex;
2153
	}
L
Lennert Buytenhek 已提交
2154 2155 2156 2157 2158

	update_pscr(mp, cmd.speed, cmd.duplex);
	mv643xx_eth_set_settings(mp->dev, &cmd);

	return 0;
2159 2160
}

2161
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2162
{
2163
	struct mv643xx_eth_platform_data *pd;
2164
	struct mv643xx_eth_private *mp;
2165 2166 2167
	struct net_device *dev;
	struct resource *res;
	DECLARE_MAC_BUF(mac);
L
Lennert Buytenhek 已提交
2168
	int err;
L
Linus Torvalds 已提交
2169

2170 2171
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
L
Lennert Buytenhek 已提交
2172 2173
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data\n");
2174 2175
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2176

2177
	if (pd->shared == NULL) {
L
Lennert Buytenhek 已提交
2178 2179
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data->shared\n");
2180 2181
		return -ENODEV;
	}
2182

2183
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2184 2185
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2186

2187
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2188 2189 2190 2191 2192
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
	mp->port_num = pd->port_number;

2193
	mp->dev = dev;
2194 2195
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2196
#endif
L
Linus Torvalds 已提交
2197

L
Lennert Buytenhek 已提交
2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	set_params(mp, pd);

	spin_lock_init(&mp->lock);

	mib_counters_clear(mp);
	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

	err = phy_init(mp, pd);
	if (err)
		goto out;
	SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);


2211 2212 2213
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2214

L
Lennert Buytenhek 已提交
2215
	dev->hard_start_xmit = mv643xx_eth_xmit;
2216 2217 2218
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;
L
Lennert Buytenhek 已提交
2219 2220 2221
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->do_ioctl = mv643xx_eth_ioctl;
	dev->change_mtu = mv643xx_eth_change_mtu;
2222 2223
	dev->tx_timeout = mv643xx_eth_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
2224
	dev->poll_controller = mv643xx_eth_netpoll;
2225 2226 2227
#endif
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
2228

2229
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2230
	/*
2231 2232
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2233
	 */
2234 2235
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
L
Linus Torvalds 已提交
2236

L
Lennert Buytenhek 已提交
2237
	SET_NETDEV_DEV(dev, &pdev->dev);
2238

2239
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
2240
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2241

2242 2243 2244
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2245

L
Lennert Buytenhek 已提交
2246 2247
	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
		   mp->port_num, print_mac(mac, dev->dev_addr));
L
Linus Torvalds 已提交
2248

2249
	if (dev->features & NETIF_F_SG)
L
Lennert Buytenhek 已提交
2250
		dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
L
Linus Torvalds 已提交
2251

2252
	if (dev->features & NETIF_F_IP_CSUM)
L
Lennert Buytenhek 已提交
2253
		dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
L
Linus Torvalds 已提交
2254

2255
#ifdef MV643XX_ETH_NAPI
L
Lennert Buytenhek 已提交
2256
	dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2257
#endif
L
Linus Torvalds 已提交
2258

2259
	if (mp->tx_desc_sram_size > 0)
L
Lennert Buytenhek 已提交
2260
		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
L
Linus Torvalds 已提交
2261

2262
	return 0;
L
Linus Torvalds 已提交
2263

2264 2265
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2266

2267
	return err;
L
Linus Torvalds 已提交
2268 2269
}

2270
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2271
{
L
Lennert Buytenhek 已提交
2272
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2273

L
Lennert Buytenhek 已提交
2274
	unregister_netdev(mp->dev);
2275
	flush_scheduled_work();
L
Lennert Buytenhek 已提交
2276
	free_netdev(mp->dev);
2277 2278

	platform_set_drvdata(pdev, NULL);
L
Lennert Buytenhek 已提交
2279

2280
	return 0;
L
Linus Torvalds 已提交
2281 2282
}

2283
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2284
{
L
Lennert Buytenhek 已提交
2285
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2286

2287
	/* Mask all interrupts on ethernet port */
L
Lennert Buytenhek 已提交
2288 2289
	wrl(mp, INT_MASK(mp->port_num), 0);
	rdl(mp, INT_MASK(mp->port_num));
2290

L
Lennert Buytenhek 已提交
2291 2292
	if (netif_running(mp->dev))
		port_reset(mp);
2293 2294
}

2295
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
2296 2297 2298
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
2299
	.driver = {
L
Lennert Buytenhek 已提交
2300
		.name	= MV643XX_ETH_NAME,
2301 2302 2303 2304
		.owner	= THIS_MODULE,
	},
};

2305
static int __init mv643xx_eth_init_module(void)
2306
{
2307
	int rc;
2308

2309 2310 2311 2312 2313 2314
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
2315

2316
	return rc;
2317
}
L
Lennert Buytenhek 已提交
2318
module_init(mv643xx_eth_init_module);
2319

2320
static void __exit mv643xx_eth_cleanup_module(void)
2321
{
2322 2323
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2324
}
2325
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2326

L
Lennert Buytenhek 已提交
2327 2328
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani "
	      "and Dale Farnsworth");
2329
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
2330
MODULE_LICENSE("GPL");
2331
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
2332
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);