mv643xx_eth.c 57.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57 58
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.0";
59

60 61 62
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
63

64
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65 66 67 68 69 70 71 72
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

/*
 * Registers shared between all ports.
 */
73 74 75 76 77 78 79
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
80 81 82 83

/*
 * Per-port registers.
 */
84
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
85
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
86 87 88 89 90 91
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
92
#define  TX_FIFO_EMPTY			0x00000400
93
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
94 95
#define TXQ_FIX_PRIO_CONF(p)		(0x044c + ((p) << 10))
#define TX_BW_RATE(p)			(0x0450 + ((p) << 10))
96
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
97
#define TX_BW_BURST(p)			(0x045c + ((p) << 10))
98
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
99
#define  INT_RX				0x0007fbfc
100
#define  INT_EXT			0x00000002
101
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
102 103 104 105 106
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
#define  INT_EXT_TX			0x00000101
107 108 109
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
110
#define RXQ_CURRENT_DESC_PTR(p, q)	(0x060c + ((p) << 10) + ((q) << 4))
111 112
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
#define TXQ_CURRENT_DESC_PTR(p)		(0x06c0 + ((p) << 10))
113 114 115
#define TXQ_BW_TOKENS(p)		(0x0700 + ((p) << 10))
#define TXQ_BW_CONF(p)			(0x0704 + ((p) << 10))
#define TXQ_BW_WRR_CONF(p)		(0x0708 + ((p) << 10))
116 117 118 119
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
120

121 122 123 124

/*
 * SDMA configuration register.
 */
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
#define RX_BURST_SIZE_4_64BIT		(2 << 1)
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
#define TX_BURST_SIZE_4_64BIT		(2 << 22)

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		TX_BURST_SIZE_4_64BIT
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
		RX_BURST_SIZE_4_64BIT	|	\
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
		TX_BURST_SIZE_4_64BIT
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

144 145 146 147 148 149 150

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
151 152 153
#define MAX_RX_PACKET_1522BYTE			(1 << 17)
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
#define MAX_RX_PACKET_MASK			(7 << 17)
154 155 156 157 158 159 160
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
161

162 163
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
164 165


166 167
/*
 * RX/TX descriptors.
168 169
 */
#if defined(__BIG_ENDIAN)
170
struct rx_desc {
171 172 173 174 175 176 177
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

178
struct tx_desc {
179 180 181 182 183 184 185
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
186
struct rx_desc {
187 188 189 190 191 192 193
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

194
struct tx_desc {
195 196 197 198 199 200 201 202 203 204
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

205
/* RX & TX descriptor command */
206
#define BUFFER_OWNED_BY_DMA		0x80000000
207 208

/* RX & TX descriptor status */
209
#define ERROR_SUMMARY			0x00000001
210 211

/* RX descriptor status */
212 213 214 215
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
216 217

/* TX descriptor command */
218 219 220 221 222 223 224 225
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
226

227
#define TX_IHL_SHIFT			11
228 229


230
/* global *******************************************************************/
231
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
232 233 234
	/*
	 * Ethernet controller base address.
	 */
235
	void __iomem *base;
236

L
Lennert Buytenhek 已提交
237 238 239
	/*
	 * Protects access to SMI_REG, which is shared between ports.
	 */
240 241
	spinlock_t phy_lock;

L
Lennert Buytenhek 已提交
242 243 244
	/*
	 * Per-port MBUS window access register value.
	 */
245 246
	u32 win_protect;

L
Lennert Buytenhek 已提交
247 248 249
	/*
	 * Hardware-specific parameters.
	 */
250 251 252 253 254
	unsigned int t_clk;
};


/* per-port *****************************************************************/
255
struct mib_counters {
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

288
struct rx_queue {
289 290
	int index;

291 292 293 294 295 296 297 298 299 300 301 302 303 304
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

	struct timer_list rx_oom;
};

305 306
struct tx_queue {
	int tx_ring_size;
307

308 309 310
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
311

312
	struct tx_desc *tx_desc_area;
313 314 315
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
316 317 318 319
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
L
Lennert Buytenhek 已提交
320
	int port_num;
321

L
Lennert Buytenhek 已提交
322
	struct net_device *dev;
323

L
Lennert Buytenhek 已提交
324 325
	struct mv643xx_eth_shared_private *shared_smi;
	int phy_addr;
326 327 328

	spinlock_t lock;

L
Lennert Buytenhek 已提交
329 330
	struct mib_counters mib_counters;
	struct work_struct tx_timeout_task;
331
	struct mii_if_info mii;
332 333 334 335 336 337 338

	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
339 340
	u8 rxq_mask;
	int rxq_primary;
341
	struct napi_struct napi;
342
	struct rx_queue rxq[8];
343 344 345 346 347 348 349 350 351 352 353

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
	struct tx_queue txq[1];
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
354
};
L
Linus Torvalds 已提交
355

356

357
/* port register accessors **************************************************/
358
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
359
{
360
	return readl(mp->shared->base + offset);
361
}
362

363
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
364
{
365
	writel(data, mp->shared->base + offset);
366
}
367 368


369
/* rxq/txq helper functions *************************************************/
370
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
371
{
372
	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
373
}
374

375 376 377 378 379
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
	return container_of(txq, struct mv643xx_eth_private, txq[0]);
}

380
static void rxq_enable(struct rx_queue *rxq)
381
{
382
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
383
	wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
384
}
L
Linus Torvalds 已提交
385

386 387 388
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
389
	u8 mask = 1 << rxq->index;
L
Linus Torvalds 已提交
390

391 392 393
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
394 395
}

396
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
397
{
398 399
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	wrl(mp, TXQ_COMMAND(mp->port_num), 1);
L
Linus Torvalds 已提交
400 401
}

402
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
403
{
404 405
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	u8 mask = 1;
406

407 408 409 410 411 412 413 414 415 416 417
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(mp->dev);
L
Linus Torvalds 已提交
418 419
}

420 421

/* rx ***********************************************************************/
422
static void txq_reclaim(struct tx_queue *txq, int force);
423

424
static void rxq_refill(struct rx_queue *rxq)
L
Linus Torvalds 已提交
425
{
426
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
427
	unsigned long flags;
L
Linus Torvalds 已提交
428

429
	spin_lock_irqsave(&mp->lock, flags);
430

431 432
	while (rxq->rx_desc_count < rxq->rx_ring_size) {
		int skb_size;
433 434 435 436
		struct sk_buff *skb;
		int unaligned;
		int rx;

437 438 439 440 441 442 443 444 445 446
		/*
		 * Reserve 2+14 bytes for an ethernet header (the
		 * hardware automatically prepends 2 bytes of dummy
		 * data to each received packet), 4 bytes for a VLAN
		 * header, and 4 bytes for the trailing FCS -- 24
		 * bytes total.
		 */
		skb_size = mp->dev->mtu + 24;

		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
447
		if (skb == NULL)
L
Linus Torvalds 已提交
448
			break;
449

R
Ralf Baechle 已提交
450
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
451
		if (unaligned)
R
Ralf Baechle 已提交
452
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
453

454 455 456
		rxq->rx_desc_count++;
		rx = rxq->rx_used_desc;
		rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
457

458 459 460 461
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
						skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = skb_size;
		rxq->rx_skb[rx] = skb;
462
		wmb();
463
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
464 465 466
						RX_ENABLE_INTERRUPT;
		wmb();

L
Lennert Buytenhek 已提交
467 468 469 470 471 472
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
L
Linus Torvalds 已提交
473
	}
474

475 476 477
	if (rxq->rx_desc_count == 0) {
		rxq->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&rxq->rx_oom);
L
Linus Torvalds 已提交
478
	}
479 480

	spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
481 482
}

483
static inline void rxq_refill_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
484
{
485
	rxq_refill((struct rx_queue *)data);
L
Linus Torvalds 已提交
486 487
}

488
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
489
{
490 491 492
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
493

494 495
	rx = 0;
	while (rx < budget) {
L
Lennert Buytenhek 已提交
496
		struct rx_desc *rx_desc;
497
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
498
		struct sk_buff *skb;
499
		unsigned long flags;
500

501
		spin_lock_irqsave(&mp->lock, flags);
502

503
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
504

505 506 507 508 509 510
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
511

512 513
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
514

515
		rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
516

517
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
518

L
Lennert Buytenhek 已提交
519 520
		dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
				 mp->dev->mtu + 24, DMA_FROM_DEVICE);
521 522
		rxq->rx_desc_count--;
		rx++;
523

524 525
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
526 527 528 529 530
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
531
		 */
L
Linus Torvalds 已提交
532
		stats->rx_packets++;
L
Lennert Buytenhek 已提交
533
		stats->rx_bytes += rx_desc->byte_cnt - 2;
534

L
Linus Torvalds 已提交
535
		/*
L
Lennert Buytenhek 已提交
536 537 538
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
539
		 */
540
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
541
					(RX_FIRST_DESC | RX_LAST_DESC))
542
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
543
			stats->rx_dropped++;
L
Lennert Buytenhek 已提交
544

545
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
546
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
547
				if (net_ratelimit())
L
Lennert Buytenhek 已提交
548 549 550
					dev_printk(KERN_ERR, &mp->dev->dev,
						   "received packet spanning "
						   "multiple descriptors\n");
L
Linus Torvalds 已提交
551
			}
L
Lennert Buytenhek 已提交
552

553
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
L
Lennert Buytenhek 已提交
562
			skb_put(skb, rx_desc->byte_cnt - 2 - 4);
L
Linus Torvalds 已提交
563

564
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
565 566
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
567
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
568
			}
569
			skb->protocol = eth_type_trans(skb, mp->dev);
570
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
571 572 573 574 575
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
L
Lennert Buytenhek 已提交
576

577
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
578
	}
L
Lennert Buytenhek 已提交
579

580
	rxq_refill(rxq);
L
Linus Torvalds 已提交
581

582
	return rx;
L
Linus Torvalds 已提交
583 584
}

585 586
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
587
{
588 589
	struct mv643xx_eth_private *mp;
	int rx;
590
	int i;
591 592

	mp = container_of(napi, struct mv643xx_eth_private, napi);
593

594
#ifdef MV643XX_ETH_TX_FAST_REFILL
595
	if (++mp->tx_clean_threshold > 5) {
596
		txq_reclaim(mp->txq, 0);
597
		mp->tx_clean_threshold = 0;
598
	}
599
#endif
600

601 602 603 604
	rx = 0;
	for (i = 7; rx < budget && i >= 0; i--)
		if (mp->rxq_mask & (1 << i))
			rx += rxq_process(mp->rxq + i, budget - rx);
605

606 607 608 609 610
	if (rx < budget) {
		netif_rx_complete(mp->dev, napi);
		wrl(mp, INT_CAUSE(mp->port_num), 0);
		wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
		wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
611
	}
612

613
	return rx;
614
}
615
#endif
616

617 618 619

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
620
{
621
	int frag;
L
Linus Torvalds 已提交
622

623
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
624 625
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
626
			return 1;
L
Linus Torvalds 已提交
627
	}
628

629 630
	return 0;
}
631

632
static int txq_alloc_desc_index(struct tx_queue *txq)
633 634
{
	int tx_desc_curr;
635

636
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
637

638 639
	tx_desc_curr = txq->tx_curr_desc;
	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
640

641
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
642

643 644
	return tx_desc_curr;
}
645

646
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
647
{
648
	int nr_frags = skb_shinfo(skb)->nr_frags;
649
	int frag;
L
Linus Torvalds 已提交
650

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
			txq->tx_skb[tx_index] = skb;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}

674 675 676 677 678 679 680
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
681 682
}

683 684 685 686
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
687

688
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
689
{
690
	int nr_frags = skb_shinfo(skb)->nr_frags;
691
	int tx_index;
692
	struct tx_desc *desc;
693 694
	u32 cmd_sts;
	int length;
L
Linus Torvalds 已提交
695

696
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
697

698 699
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];
700 701

	if (nr_frags) {
702
		txq_submit_frag_skb(txq, skb);
703 704

		length = skb_headlen(skb);
705
		txq->tx_skb[tx_index] = NULL;
706
	} else {
707
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
708
		length = skb->len;
709
		txq->tx_skb[tx_index] = skb;
710 711 712 713 714 715 716 717
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		BUG_ON(skb->protocol != htons(ETH_P_IP));

718 719 720
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
721 722 723

		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
724
			cmd_sts |= UDP_FRAME;
725 726 727 728 729 730 731 732 733 734
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
735
		cmd_sts |= 5 << TX_IHL_SHIFT;
736 737 738 739 740 741 742 743 744
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

	/* ensure all descriptors are written before poking hardware */
	wmb();
745
	txq_enable(txq);
746

747
	txq->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
748 749
}

L
Lennert Buytenhek 已提交
750
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
751
{
752
	struct mv643xx_eth_private *mp = netdev_priv(dev);
753
	struct net_device_stats *stats = &dev->stats;
754
	struct tx_queue *txq;
755
	unsigned long flags;
756

757
	BUG_ON(netif_queue_stopped(dev));
758

759 760
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
L
Lennert Buytenhek 已提交
761 762 763
		dev_printk(KERN_DEBUG, &dev->dev,
			   "failed to linearize skb with tiny "
			   "unaligned fragment\n");
764 765 766 767 768
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

769 770 771
	txq = mp->txq;

	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
772 773 774 775 776 777
		printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
		netif_stop_queue(dev);
		spin_unlock_irqrestore(&mp->lock, flags);
		return NETDEV_TX_BUSY;
	}

778
	txq_submit_skb(txq, skb);
779 780 781 782
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

783
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB)
784 785 786 787 788
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
789 790
}

791

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

	wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
	wrl(mp, TX_BW_MTU(mp->port_num), mtu);
	wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

	wrl(mp, TXQ_BW_TOKENS(mp->port_num), token_rate << 14);
	wrl(mp, TXQ_BW_CONF(mp->port_num),
			(bucket_size << 10) | token_rate);
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
	off = TXQ_FIX_PRIO_CONF(mp->port_num);

	val = rdl(mp, off);
	val |= 1;
	wrl(mp, off, val);
}

static void txq_set_wrr(struct tx_queue *txq, int weight)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn off fixed priority mode.
	 */
	off = TXQ_FIX_PRIO_CONF(mp->port_num);

	val = rdl(mp, off);
	val &= ~1;
	wrl(mp, off, val);

	/*
	 * Configure WRR weight for this queue.
	 */
	off = TXQ_BW_WRR_CONF(mp->port_num);

	val = rdl(mp, off);
	val = (val & ~0xff) | (weight & 0xff);
	wrl(mp, off, val);
}


881
/* mii management interface *************************************************/
L
Lennert Buytenhek 已提交
882 883 884 885
#define SMI_BUSY		0x10000000
#define SMI_READ_VALID		0x08000000
#define SMI_OPCODE_READ		0x04000000
#define SMI_OPCODE_WRITE	0x00000000
886

L
Lennert Buytenhek 已提交
887 888
static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
			 unsigned int reg, unsigned int *value)
L
Linus Torvalds 已提交
889
{
890
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
891
	unsigned long flags;
L
Linus Torvalds 已提交
892 893
	int i;

894 895 896 897
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
898
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
899
		if (i == 1000) {
900 901 902
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
903
		udelay(10);
L
Linus Torvalds 已提交
904 905
	}

L
Lennert Buytenhek 已提交
906
	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
L
Linus Torvalds 已提交
907

908
	/* now wait for the data to be valid */
909
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
910
		if (i == 1000) {
911 912 913
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
914
		udelay(10);
915 916 917 918 919
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
920 921
}

L
Lennert Buytenhek 已提交
922 923 924
static void smi_reg_write(struct mv643xx_eth_private *mp,
			  unsigned int addr,
			  unsigned int reg, unsigned int value)
L
Linus Torvalds 已提交
925
{
926
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
927
	unsigned long flags;
L
Linus Torvalds 已提交
928 929
	int i;

930 931 932 933
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
934
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
935
		if (i == 1000) {
936 937 938
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
939
		udelay(10);
L
Linus Torvalds 已提交
940 941
	}

L
Lennert Buytenhek 已提交
942 943
	writel(SMI_OPCODE_WRITE | (reg << 21) |
		(addr << 16) | (value & 0xffff), smi_reg);
944 945 946
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
947

948 949

/* mib counters *************************************************************/
L
Lennert Buytenhek 已提交
950
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
951
{
L
Lennert Buytenhek 已提交
952
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
953 954
}

L
Lennert Buytenhek 已提交
955
static void mib_counters_clear(struct mv643xx_eth_private *mp)
956
{
L
Lennert Buytenhek 已提交
957 958 959 960
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
961
}
962

L
Lennert Buytenhek 已提交
963
static void mib_counters_update(struct mv643xx_eth_private *mp)
964
{
965
	struct mib_counters *p = &mp->mib_counters;
966

L
Lennert Buytenhek 已提交
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
	p->good_octets_received += mib_read(mp, 0x00);
	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
999 1000
}

1001 1002

/* ethtool ******************************************************************/
1003
struct mv643xx_eth_stats {
1004 1005
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
1006 1007
	int netdev_off;
	int mp_off;
1008 1009
};

1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1057 1058
};

1059
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1060
{
1061
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1062 1063 1064 1065 1066 1067
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

L
Lennert Buytenhek 已提交
1068 1069 1070
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1071 1072 1073 1074 1075 1076
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

1077
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1078
{
1079
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1080 1081
	int err;

L
Lennert Buytenhek 已提交
1082 1083 1084 1085 1086
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1087 1088 1089
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
1090

1091 1092
	return err;
}
L
Linus Torvalds 已提交
1093

L
Lennert Buytenhek 已提交
1094 1095
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1096
{
1097 1098
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1099
	strncpy(drvinfo->fw_version, "N/A", 32);
L
Lennert Buytenhek 已提交
1100
	strncpy(drvinfo->bus_info, "platform", 32);
1101
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1102
}
L
Linus Torvalds 已提交
1103

L
Lennert Buytenhek 已提交
1104
static int mv643xx_eth_nway_reset(struct net_device *dev)
1105
{
1106
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1107

1108 1109
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
1110

1111 1112
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1113
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1114

1115 1116
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
1117

L
Lennert Buytenhek 已提交
1118 1119
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1120 1121
{
	int i;
L
Linus Torvalds 已提交
1122

L
Lennert Buytenhek 已提交
1123 1124
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1125
			memcpy(data + i * ETH_GSTRING_LEN,
1126
				mv643xx_eth_stats[i].stat_string,
1127
				ETH_GSTRING_LEN);
1128 1129 1130
		}
	}
}
L
Linus Torvalds 已提交
1131

L
Lennert Buytenhek 已提交
1132 1133 1134
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1135
{
L
Lennert Buytenhek 已提交
1136
	struct mv643xx_eth_private *mp = dev->priv;
1137
	int i;
L
Linus Torvalds 已提交
1138

L
Lennert Buytenhek 已提交
1139
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1140

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1154
	}
1155
}
L
Linus Torvalds 已提交
1156

L
Lennert Buytenhek 已提交
1157
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1158
{
L
Lennert Buytenhek 已提交
1159
	if (sset == ETH_SS_STATS)
1160
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1161 1162

	return -EOPNOTSUPP;
1163
}
L
Linus Torvalds 已提交
1164

1165
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1166 1167 1168 1169 1170
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
	.get_link		= mv643xx_eth_get_link,
1171
	.set_sg			= ethtool_op_set_sg,
L
Lennert Buytenhek 已提交
1172 1173
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1174
	.get_sset_count		= mv643xx_eth_get_sset_count,
1175
};
L
Linus Torvalds 已提交
1176

1177

1178
/* address handling *********************************************************/
1179
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1180 1181 1182
{
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1183

L
Lennert Buytenhek 已提交
1184 1185
	mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
L
Linus Torvalds 已提交
1186

1187 1188 1189 1190 1191 1192
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1193
}
L
Linus Torvalds 已提交
1194

1195
static void init_mac_tables(struct mv643xx_eth_private *mp)
1196
{
L
Lennert Buytenhek 已提交
1197
	int i;
L
Linus Torvalds 已提交
1198

L
Lennert Buytenhek 已提交
1199 1200 1201
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1202
	}
L
Lennert Buytenhek 已提交
1203 1204 1205

	for (i = 0; i < 0x10; i += 4)
		wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1206
}
1207

1208
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1209
				   int table, unsigned char entry)
1210 1211
{
	unsigned int table_reg;
1212

1213
	/* Set "accepts frame bit" at specified table entry */
L
Lennert Buytenhek 已提交
1214 1215 1216
	table_reg = rdl(mp, table + (entry & 0xfc));
	table_reg |= 0x01 << (8 * (entry & 3));
	wrl(mp, table + (entry & 0xfc), table_reg);
L
Linus Torvalds 已提交
1217 1218
}

1219
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1220
{
1221 1222 1223
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1224

L
Lennert Buytenhek 已提交
1225 1226
	mac_l = (addr[4] << 8) | addr[5];
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1227

L
Lennert Buytenhek 已提交
1228 1229
	wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
L
Linus Torvalds 已提交
1230

L
Lennert Buytenhek 已提交
1231
	table = UNICAST_TABLE(mp->port_num);
1232
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1233 1234
}

L
Lennert Buytenhek 已提交
1235
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1236
{
1237
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1238

L
Lennert Buytenhek 已提交
1239 1240 1241
	/* +2 is for the offset of the HW addr type */
	memcpy(dev->dev_addr, addr + 2, 6);

1242 1243
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1244 1245 1246 1247

	return 0;
}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

L
Lennert Buytenhek 已提交
1266
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
L
Linus Torvalds 已提交
1267
{
L
Lennert Buytenhek 已提交
1268 1269 1270 1271
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_config;
	struct dev_addr_list *addr;
	int i;
1272

L
Lennert Buytenhek 已提交
1273 1274 1275 1276 1277 1278
	port_config = rdl(mp, PORT_CONFIG(mp->port_num));
	if (dev->flags & IFF_PROMISC)
		port_config |= UNICAST_PROMISCUOUS_MODE;
	else
		port_config &= ~UNICAST_PROMISCUOUS_MODE;
	wrl(mp, PORT_CONFIG(mp->port_num), port_config);
L
Linus Torvalds 已提交
1279

L
Lennert Buytenhek 已提交
1280 1281 1282
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		int port_num = mp->port_num;
		u32 accept = 0x01010101;
1283

L
Lennert Buytenhek 已提交
1284 1285 1286
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1287 1288 1289
		}
		return;
	}
1290

L
Lennert Buytenhek 已提交
1291 1292 1293
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
L
Linus Torvalds 已提交
1294 1295
	}

L
Lennert Buytenhek 已提交
1296 1297 1298
	for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
		u8 *a = addr->da_addr;
		int table;
1299

L
Lennert Buytenhek 已提交
1300 1301
		if (addr->da_addrlen != 6)
			continue;
L
Linus Torvalds 已提交
1302

L
Lennert Buytenhek 已提交
1303 1304 1305 1306 1307
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
			table = SPECIAL_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, a[5]);
		} else {
			int crc = addr_crc(a);
L
Linus Torvalds 已提交
1308

L
Lennert Buytenhek 已提交
1309 1310 1311 1312
			table = OTHER_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, crc);
		}
	}
1313
}
1314 1315


1316
/* rx/tx queue initialisation ***********************************************/
1317
static int rxq_init(struct mv643xx_eth_private *mp, int index)
1318
{
1319
	struct rx_queue *rxq = mp->rxq + index;
1320 1321
	struct rx_desc *rx_desc;
	int size;
1322 1323
	int i;

1324 1325
	rxq->index = index;

1326 1327 1328 1329 1330 1331 1332 1333
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

1334
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
1335 1336 1337 1338 1339 1340 1341
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1342 1343
	}

1344 1345 1346 1347 1348 1349
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1350

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
		int nexti = (i + 1) % rxq->rx_ring_size;
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	init_timer(&rxq->rx_oom);
	rxq->rx_oom.data = (unsigned long)rxq;
	rxq->rx_oom.function = rxq_refill_timer_wrapper;

	return 0;


out_free:
1375
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
1376 1377 1378 1379 1380 1381 1382 1383
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1384
}
1385

1386
static void rxq_deinit(struct rx_queue *rxq)
1387
{
1388 1389 1390 1391
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1392

1393
	del_timer_sync(&rxq->rx_oom);
1394

1395 1396 1397 1398
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1399
		}
1400
	}
L
Linus Torvalds 已提交
1401

1402 1403 1404 1405 1406 1407
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

1408 1409
	if (rxq->index == mp->rxq_primary &&
	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1410
		iounmap(rxq->rx_desc_area);
1411
	else
1412 1413 1414 1415
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1416
}
L
Linus Torvalds 已提交
1417

1418
static int txq_init(struct mv643xx_eth_private *mp)
1419
{
1420 1421 1422
	struct tx_queue *txq = mp->txq;
	struct tx_desc *tx_desc;
	int size;
1423
	int i;
L
Linus Torvalds 已提交
1424

1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

	if (size <= mp->tx_desc_sram_size) {
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
1447
	}
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
		int nexti = (i + 1) % txq->tx_ring_size;
		tx_desc[i].next_desc_ptr = txq->tx_desc_dma +
					nexti * sizeof(struct tx_desc);
	}

	return 0;

1468

1469 1470 1471 1472 1473 1474 1475
out_free:
	if (size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);
1476

1477 1478
out:
	return -ENOMEM;
1479
}
L
Linus Torvalds 已提交
1480

1481
static void txq_reclaim(struct tx_queue *txq, int force)
1482
{
1483
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1484
	unsigned long flags;
L
Linus Torvalds 已提交
1485

1486 1487 1488 1489 1490 1491 1492 1493
	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;
		dma_addr_t addr;
		int count;
1494

1495 1496
		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
1497
		cmd_sts = desc->cmd_sts;
1498

1499 1500
		if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA))
			break;
L
Linus Torvalds 已提交
1501

1502 1503
		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
		txq->tx_desc_count--;
L
Linus Torvalds 已提交
1504

1505 1506
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
1507 1508
		skb = txq->tx_skb[tx_index];
		txq->tx_skb[tx_index] = NULL;
1509

1510
		if (cmd_sts & ERROR_SUMMARY) {
1511 1512
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
1513
		}
L
Linus Torvalds 已提交
1514

1515 1516 1517
		/*
		 * Drop mp->lock while we free the skb.
		 */
1518
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1519

1520
		if (cmd_sts & TX_FIRST_DESC)
1521 1522 1523
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1524

1525 1526
		if (skb)
			dev_kfree_skb_irq(skb);
1527

1528
		spin_lock_irqsave(&mp->lock, flags);
1529
	}
1530
	spin_unlock_irqrestore(&mp->lock, flags);
1531
}
L
Linus Torvalds 已提交
1532

1533
static void txq_deinit(struct tx_queue *txq)
1534
{
1535
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1536

1537 1538
	txq_disable(txq);
	txq_reclaim(txq, 1);
L
Linus Torvalds 已提交
1539

1540
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1541

1542 1543
	if (txq->tx_desc_area_size <= mp->tx_desc_sram_size)
		iounmap(txq->tx_desc_area);
1544
	else
1545 1546 1547 1548
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
1549
}
L
Linus Torvalds 已提交
1550 1551


1552
/* netdev ops and related ***************************************************/
L
Lennert Buytenhek 已提交
1553
static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
1554
{
1555 1556
	u32 pscr_o;
	u32 pscr_n;
L
Linus Torvalds 已提交
1557

1558
	pscr_o = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1559

1560
	/* clear speed, duplex and rx buffer size fields */
1561 1562 1563 1564
	pscr_n = pscr_o & ~(SET_MII_SPEED_TO_100   |
			    SET_GMII_SPEED_TO_1000 |
			    SET_FULL_DUPLEX_MODE   |
			    MAX_RX_PACKET_MASK);
L
Linus Torvalds 已提交
1565

L
Lennert Buytenhek 已提交
1566
	if (speed == SPEED_1000) {
1567 1568
		pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
	} else {
L
Lennert Buytenhek 已提交
1569
		if (speed == SPEED_100)
1570 1571
			pscr_n |= SET_MII_SPEED_TO_100;
		pscr_n |= MAX_RX_PACKET_1522BYTE;
1572
	}
L
Linus Torvalds 已提交
1573

L
Lennert Buytenhek 已提交
1574
	if (duplex == DUPLEX_FULL)
1575 1576 1577 1578 1579
		pscr_n |= SET_FULL_DUPLEX_MODE;

	if (pscr_n != pscr_o) {
		if ((pscr_o & SERIAL_PORT_ENABLE) == 0)
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
1580
		else {
1581 1582 1583 1584 1585 1586
			txq_disable(mp->txq);
			pscr_o &= ~SERIAL_PORT_ENABLE;
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n);
			txq_enable(mp->txq);
1587 1588 1589
		}
	}
}
1590

L
Lennert Buytenhek 已提交
1591
static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1592 1593
{
	struct net_device *dev = (struct net_device *)dev_id;
1594
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1595 1596
	u32 int_cause;
	u32 int_cause_ext;
1597

1598
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT);
L
Lennert Buytenhek 已提交
1599 1600 1601 1602
	if (int_cause == 0)
		return IRQ_NONE;

	int_cause_ext = 0;
1603
	if (int_cause & INT_EXT) {
1604
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1605
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1606
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1607
	}
L
Linus Torvalds 已提交
1608

L
Lennert Buytenhek 已提交
1609
	if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
1610
		if (mii_link_ok(&mp->mii)) {
1611 1612
			struct ethtool_cmd cmd;

1613
			mii_ethtool_gset(&mp->mii, &cmd);
L
Lennert Buytenhek 已提交
1614
			update_pscr(mp, cmd.speed, cmd.duplex);
1615
			txq_enable(mp->txq);
1616 1617
			if (!netif_carrier_ok(dev)) {
				netif_carrier_on(dev);
1618
				__txq_maybe_wake(mp->txq);
1619 1620 1621 1622 1623 1624
			}
		} else if (netif_carrier_ok(dev)) {
			netif_stop_queue(dev);
			netif_carrier_off(dev);
		}
	}
L
Linus Torvalds 已提交
1625

1626 1627 1628
	/*
	 * RxBuffer or RxError set for any of the 8 queues?
	 */
1629
#ifdef MV643XX_ETH_NAPI
1630
	if (int_cause & INT_RX) {
1631 1632
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
		rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1633

1634
		netif_rx_schedule(dev, &mp->napi);
1635
	}
1636
#else
1637 1638 1639 1640 1641 1642 1643
	if (int_cause & INT_RX) {
		int i;

		for (i = 7; i >= 0; i--)
			if (mp->rxq_mask & (1 << i))
				rxq_process(mp->rxq + i, INT_MAX);
	}
1644
#endif
L
Lennert Buytenhek 已提交
1645

1646 1647 1648 1649
	if (int_cause_ext & INT_EXT_TX) {
		txq_reclaim(mp->txq, 0);
		__txq_maybe_wake(mp->txq);
	}
L
Linus Torvalds 已提交
1650

1651
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1652 1653
}

1654
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1655
{
L
Lennert Buytenhek 已提交
1656
	unsigned int data;
L
Linus Torvalds 已提交
1657

L
Lennert Buytenhek 已提交
1658 1659 1660
	smi_reg_read(mp, mp->phy_addr, 0, &data);
	data |= 0x8000;
	smi_reg_write(mp, mp->phy_addr, 0, data);
L
Linus Torvalds 已提交
1661

1662 1663
	do {
		udelay(1);
L
Lennert Buytenhek 已提交
1664 1665
		smi_reg_read(mp, mp->phy_addr, 0, &data);
	} while (data & 0x8000);
L
Linus Torvalds 已提交
1666 1667
}

L
Lennert Buytenhek 已提交
1668
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1669
{
1670 1671
	u32 pscr;
	struct ethtool_cmd ethtool_cmd;
1672
	int i;
L
Linus Torvalds 已提交
1673

1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
		DISABLE_AUTO_NEG_SPEED_GMII    |
		DISABLE_AUTO_NEG_FOR_DUPLEX    |
		DO_NOT_FORCE_LINK_FAIL	       |
		SERIAL_PORT_CONTROL_RESERVED;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
L
Linus Torvalds 已提交
1688

1689 1690
	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

L
Lennert Buytenhek 已提交
1691
	mv643xx_eth_get_settings(mp->dev, &ethtool_cmd);
1692
	phy_reset(mp);
L
Lennert Buytenhek 已提交
1693
	mv643xx_eth_set_settings(mp->dev, &ethtool_cmd);
L
Linus Torvalds 已提交
1694

1695 1696 1697
	/*
	 * Configure TX path and queues.
	 */
1698
	tx_set_rate(mp, 1000000000, 16777216);
1699 1700 1701 1702 1703 1704 1705 1706
	for (i = 0; i < 1; i++) {
		struct tx_queue *txq = mp->txq;
		int off = TXQ_CURRENT_DESC_PTR(mp->port_num);
		u32 addr;

		addr = (u32)txq->tx_desc_dma;
		addr += txq->tx_curr_desc * sizeof(struct tx_desc);
		wrl(mp, off, addr);
1707 1708 1709

		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
1710 1711
	}

L
Lennert Buytenhek 已提交
1712 1713 1714 1715
	/*
	 * Add configured unicast address to address filter table.
	 */
	uc_addr_set(mp, mp->dev->dev_addr);
L
Linus Torvalds 已提交
1716

1717 1718 1719 1720
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
1721
	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1722

1723 1724 1725
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1726
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1727

1728
	/*
1729
	 * Enable the receive queues.
1730
	 */
1731 1732 1733
	for (i = 0; i < 8; i++) {
		struct rx_queue *rxq = mp->rxq + i;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1734
		u32 addr;
L
Linus Torvalds 已提交
1735

1736 1737 1738
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

1739 1740 1741
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
1742

1743 1744
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
1745 1746
}

1747
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1748
{
1749
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1750

L
Lennert Buytenhek 已提交
1751 1752 1753 1754
	if (coal > 0x3fff)
		coal = 0x3fff;

	wrl(mp, SDMA_CONFIG(mp->port_num),
1755
		((coal & 0x3fff) << 8) |
L
Lennert Buytenhek 已提交
1756
		(rdl(mp, SDMA_CONFIG(mp->port_num))
1757
			& 0xffc000ff));
L
Linus Torvalds 已提交
1758 1759
}

1760
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1761
{
1762
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1763

L
Lennert Buytenhek 已提交
1764 1765 1766
	if (coal > 0x3fff)
		coal = 0x3fff;
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1767 1768
}

1769
static int mv643xx_eth_open(struct net_device *dev)
1770
{
1771
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1772
	int err;
1773
	int i;
1774

L
Lennert Buytenhek 已提交
1775 1776 1777
	wrl(mp, INT_CAUSE(mp->port_num), 0);
	wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
	rdl(mp, INT_CAUSE_EXT(mp->port_num));
1778

L
Lennert Buytenhek 已提交
1779 1780 1781
	err = request_irq(dev->irq, mv643xx_eth_irq,
			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
			  dev->name, dev);
1782
	if (err) {
L
Lennert Buytenhek 已提交
1783
		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1784
		return -EAGAIN;
1785 1786
	}

L
Lennert Buytenhek 已提交
1787
	init_mac_tables(mp);
1788

1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
	for (i = 0; i < 8; i++) {
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

		err = rxq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->rxq_mask & (1 << i))
					rxq_deinit(mp->rxq + i);
			goto out;
		}

		rxq_refill(mp->rxq + i);
	}
1803

1804 1805
	err = txq_init(mp);
	if (err)
L
Lennert Buytenhek 已提交
1806
		goto out_free;
1807

1808
#ifdef MV643XX_ETH_NAPI
1809 1810
	napi_enable(&mp->napi);
#endif
1811

L
Lennert Buytenhek 已提交
1812
	port_start(mp);
1813

1814 1815
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
1816

L
Lennert Buytenhek 已提交
1817 1818
	wrl(mp, INT_MASK_EXT(mp->port_num),
	    INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1819

L
Lennert Buytenhek 已提交
1820
	wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
1821

1822 1823
	return 0;

1824

L
Lennert Buytenhek 已提交
1825
out_free:
1826 1827 1828
	for (i = 0; i < 8; i++)
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
L
Lennert Buytenhek 已提交
1829
out:
1830 1831 1832
	free_irq(dev->irq, dev);

	return err;
1833 1834
}

1835
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1836
{
L
Lennert Buytenhek 已提交
1837
	unsigned int data;
1838
	int i;
L
Linus Torvalds 已提交
1839

1840 1841 1842 1843
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_disable(mp->rxq + i);
	}
1844 1845 1846
	txq_disable(mp->txq);
	while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
		udelay(10);
L
Linus Torvalds 已提交
1847

1848
	/* Reset the Enable bit in the Configuration Register */
L
Lennert Buytenhek 已提交
1849 1850 1851 1852 1853
	data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
L
Linus Torvalds 已提交
1854 1855
}

1856
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
1857
{
1858
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1859
	int i;
L
Linus Torvalds 已提交
1860

L
Lennert Buytenhek 已提交
1861 1862
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1863

1864
#ifdef MV643XX_ETH_NAPI
1865 1866 1867 1868
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
1869

L
Lennert Buytenhek 已提交
1870 1871
	free_irq(dev->irq, dev);

1872
	port_reset(mp);
L
Lennert Buytenhek 已提交
1873
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1874

1875 1876 1877 1878
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
	}
1879
	txq_deinit(mp->txq);
L
Linus Torvalds 已提交
1880

1881
	return 0;
L
Linus Torvalds 已提交
1882 1883
}

L
Lennert Buytenhek 已提交
1884
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
1885
{
1886
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1887

1888
	return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
L
Linus Torvalds 已提交
1889 1890
}

1891
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
1892
{
1893 1894
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
1895
	if (new_mtu < 64 || new_mtu > 9500)
1896
		return -EINVAL;
L
Linus Torvalds 已提交
1897

1898
	dev->mtu = new_mtu;
1899 1900
	tx_set_rate(mp, 1000000000, 16777216);

1901 1902
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
1903

1904 1905 1906 1907
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
1908
	 * due to memory being full.
1909 1910 1911
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
L
Lennert Buytenhek 已提交
1912 1913 1914
		dev_printk(KERN_ERR, &dev->dev,
			   "fatal error on re-opening device after "
			   "MTU change\n");
1915 1916 1917
	}

	return 0;
L
Linus Torvalds 已提交
1918 1919
}

L
Lennert Buytenhek 已提交
1920
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
1921
{
L
Lennert Buytenhek 已提交
1922
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
1923

L
Lennert Buytenhek 已提交
1924 1925 1926
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
		netif_stop_queue(mp->dev);
1927

L
Lennert Buytenhek 已提交
1928 1929
		port_reset(mp);
		port_start(mp);
1930

L
Lennert Buytenhek 已提交
1931 1932
		__txq_maybe_wake(mp->txq);
	}
1933 1934 1935
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
1936
{
1937
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1938

L
Lennert Buytenhek 已提交
1939
	dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
1940

1941
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
1942 1943
}

1944
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
1945
static void mv643xx_eth_netpoll(struct net_device *dev)
1946
{
L
Lennert Buytenhek 已提交
1947
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1948

L
Lennert Buytenhek 已提交
1949 1950
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
1951

L
Lennert Buytenhek 已提交
1952
	mv643xx_eth_irq(dev->irq, dev);
1953

L
Lennert Buytenhek 已提交
1954
	wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_CAUSE_EXT);
1955
}
1956
#endif
1957

L
Lennert Buytenhek 已提交
1958
static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
1959
{
1960
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1961 1962
	int val;

L
Lennert Buytenhek 已提交
1963 1964
	smi_reg_read(mp, addr, reg, &val);

1965
	return val;
1966 1967
}

L
Lennert Buytenhek 已提交
1968
static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
1969
{
1970
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1971
	smi_reg_write(mp, addr, reg, val);
1972
}
1973 1974


1975
/* platform glue ************************************************************/
1976 1977 1978
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
1979
{
1980
	void __iomem *base = msp->base;
1981 1982 1983
	u32 win_enable;
	u32 win_protect;
	int i;
1984

1985 1986 1987 1988 1989
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
1990 1991
	}

1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2009 2010
}

2011
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2012
{
2013
	static int mv643xx_eth_version_printed = 0;
2014
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2015
	struct mv643xx_eth_shared_private *msp;
2016 2017
	struct resource *res;
	int ret;
2018

2019
	if (!mv643xx_eth_version_printed++)
2020
		printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
2021

2022 2023 2024 2025
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2026

2027 2028 2029 2030 2031 2032
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2033 2034
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
		goto out_free;

	spin_lock_init(&msp->phy_lock);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

L
Lennert Buytenhek 已提交
2045 2046 2047 2048 2049 2050 2051
	/*
	 * Detect hardware parameters.
	 */
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;

	platform_set_drvdata(pdev, msp);

2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2062
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2063

2064
	iounmap(msp->base);
2065 2066 2067
	kfree(msp);

	return 0;
2068 2069
}

2070
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2071 2072
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2073
	.driver = {
L
Lennert Buytenhek 已提交
2074
		.name	= MV643XX_ETH_SHARED_NAME,
2075 2076 2077 2078
		.owner	= THIS_MODULE,
	},
};

2079
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2080
{
2081
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2082
	u32 data;
L
Linus Torvalds 已提交
2083

L
Lennert Buytenhek 已提交
2084 2085 2086 2087
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2088 2089
}

2090
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2091
{
L
Lennert Buytenhek 已提交
2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
	else
		uc_addr_get(mp, dev->dev_addr);

	if (pd->phy_addr == -1) {
		mp->shared_smi = NULL;
		mp->phy_addr = -1;
	} else {
		mp->shared_smi = mp->shared;
		if (pd->shared_smi != NULL)
			mp->shared_smi = platform_get_drvdata(pd->shared_smi);

		if (pd->force_phy_addr || pd->phy_addr) {
			mp->phy_addr = pd->phy_addr & 0x3f;
			phy_addr_set(mp, mp->phy_addr);
		} else {
			mp->phy_addr = phy_addr_get(mp);
		}
	}
L
Linus Torvalds 已提交
2124

L
Lennert Buytenhek 已提交
2125 2126 2127 2128 2129
	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2130

2131 2132 2133 2134 2135 2136
	if (pd->rx_queue_mask)
		mp->rxq_mask = pd->rx_queue_mask;
	else
		mp->rxq_mask = 0x01;
	mp->rxq_primary = fls(mp->rxq_mask) - 1;

L
Lennert Buytenhek 已提交
2137 2138 2139 2140 2141
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
		mp->default_tx_ring_size = pd->tx_queue_size;
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
L
Linus Torvalds 已提交
2142 2143
}

2144
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2145
{
L
Lennert Buytenhek 已提交
2146 2147 2148 2149 2150
	unsigned int data;
	unsigned int data2;

	smi_reg_read(mp, mp->phy_addr, 0, &data);
	smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000);
L
Linus Torvalds 已提交
2151

L
Lennert Buytenhek 已提交
2152 2153 2154
	smi_reg_read(mp, mp->phy_addr, 0, &data2);
	if (((data ^ data2) & 0x1000) == 0)
		return -ENODEV;
L
Linus Torvalds 已提交
2155

L
Lennert Buytenhek 已提交
2156
	smi_reg_write(mp, mp->phy_addr, 0, data);
L
Linus Torvalds 已提交
2157

2158
	return 0;
L
Linus Torvalds 已提交
2159 2160
}

L
Lennert Buytenhek 已提交
2161 2162
static int phy_init(struct mv643xx_eth_private *mp,
		    struct mv643xx_eth_platform_data *pd)
2163
{
L
Lennert Buytenhek 已提交
2164 2165
	struct ethtool_cmd cmd;
	int err;
2166

L
Lennert Buytenhek 已提交
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	err = phy_detect(mp);
	if (err) {
		dev_printk(KERN_INFO, &mp->dev->dev,
			   "no PHY detected at addr %d\n", mp->phy_addr);
		return err;
	}
	phy_reset(mp);

	mp->mii.phy_id = mp->phy_addr;
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
	mp->mii.dev = mp->dev;
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2181

L
Lennert Buytenhek 已提交
2182
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2183

L
Lennert Buytenhek 已提交
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
	memset(&cmd, 0, sizeof(cmd));

	cmd.port = PORT_MII;
	cmd.transceiver = XCVR_INTERNAL;
	cmd.phy_address = mp->phy_addr;
	if (pd->speed == 0) {
		cmd.autoneg = AUTONEG_ENABLE;
		cmd.speed = SPEED_100;
		cmd.advertising = ADVERTISED_10baseT_Half  |
				  ADVERTISED_10baseT_Full  |
				  ADVERTISED_100baseT_Half |
				  ADVERTISED_100baseT_Full;
2196
		if (mp->mii.supports_gmii)
L
Lennert Buytenhek 已提交
2197
			cmd.advertising |= ADVERTISED_1000baseT_Full;
2198
	} else {
L
Lennert Buytenhek 已提交
2199 2200 2201
		cmd.autoneg = AUTONEG_DISABLE;
		cmd.speed = pd->speed;
		cmd.duplex = pd->duplex;
2202
	}
L
Lennert Buytenhek 已提交
2203 2204 2205 2206 2207

	update_pscr(mp, cmd.speed, cmd.duplex);
	mv643xx_eth_set_settings(mp->dev, &cmd);

	return 0;
2208 2209
}

2210
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2211
{
2212
	struct mv643xx_eth_platform_data *pd;
2213
	struct mv643xx_eth_private *mp;
2214 2215 2216
	struct net_device *dev;
	struct resource *res;
	DECLARE_MAC_BUF(mac);
L
Lennert Buytenhek 已提交
2217
	int err;
L
Linus Torvalds 已提交
2218

2219 2220
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
L
Lennert Buytenhek 已提交
2221 2222
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data\n");
2223 2224
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2225

2226
	if (pd->shared == NULL) {
L
Lennert Buytenhek 已提交
2227 2228
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data->shared\n");
2229 2230
		return -ENODEV;
	}
2231

2232
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2233 2234
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2235

2236
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2237 2238 2239 2240 2241
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
	mp->port_num = pd->port_number;

2242
	mp->dev = dev;
2243 2244
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2245
#endif
L
Linus Torvalds 已提交
2246

L
Lennert Buytenhek 已提交
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
	set_params(mp, pd);

	spin_lock_init(&mp->lock);

	mib_counters_clear(mp);
	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

	err = phy_init(mp, pd);
	if (err)
		goto out;
	SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);


2260 2261 2262
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2263

L
Lennert Buytenhek 已提交
2264
	dev->hard_start_xmit = mv643xx_eth_xmit;
2265 2266 2267
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;
L
Lennert Buytenhek 已提交
2268 2269 2270
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->do_ioctl = mv643xx_eth_ioctl;
	dev->change_mtu = mv643xx_eth_change_mtu;
2271 2272
	dev->tx_timeout = mv643xx_eth_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
2273
	dev->poll_controller = mv643xx_eth_netpoll;
2274 2275 2276
#endif
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
2277

2278
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2279
	/*
2280 2281
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2282
	 */
2283 2284
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif
L
Linus Torvalds 已提交
2285

L
Lennert Buytenhek 已提交
2286
	SET_NETDEV_DEV(dev, &pdev->dev);
2287

2288
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
2289
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2290

2291 2292 2293
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2294

L
Lennert Buytenhek 已提交
2295 2296
	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
		   mp->port_num, print_mac(mac, dev->dev_addr));
L
Linus Torvalds 已提交
2297

2298
	if (dev->features & NETIF_F_SG)
L
Lennert Buytenhek 已提交
2299
		dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
L
Linus Torvalds 已提交
2300

2301
	if (dev->features & NETIF_F_IP_CSUM)
L
Lennert Buytenhek 已提交
2302
		dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
L
Linus Torvalds 已提交
2303

2304
#ifdef MV643XX_ETH_NAPI
L
Lennert Buytenhek 已提交
2305
	dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2306
#endif
L
Linus Torvalds 已提交
2307

2308
	if (mp->tx_desc_sram_size > 0)
L
Lennert Buytenhek 已提交
2309
		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
L
Linus Torvalds 已提交
2310

2311
	return 0;
L
Linus Torvalds 已提交
2312

2313 2314
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2315

2316
	return err;
L
Linus Torvalds 已提交
2317 2318
}

2319
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2320
{
L
Lennert Buytenhek 已提交
2321
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2322

L
Lennert Buytenhek 已提交
2323
	unregister_netdev(mp->dev);
2324
	flush_scheduled_work();
L
Lennert Buytenhek 已提交
2325
	free_netdev(mp->dev);
2326 2327

	platform_set_drvdata(pdev, NULL);
L
Lennert Buytenhek 已提交
2328

2329
	return 0;
L
Linus Torvalds 已提交
2330 2331
}

2332
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2333
{
L
Lennert Buytenhek 已提交
2334
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2335

2336
	/* Mask all interrupts on ethernet port */
L
Lennert Buytenhek 已提交
2337 2338
	wrl(mp, INT_MASK(mp->port_num), 0);
	rdl(mp, INT_MASK(mp->port_num));
2339

L
Lennert Buytenhek 已提交
2340 2341
	if (netif_running(mp->dev))
		port_reset(mp);
2342 2343
}

2344
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
2345 2346 2347
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
2348
	.driver = {
L
Lennert Buytenhek 已提交
2349
		.name	= MV643XX_ETH_NAME,
2350 2351 2352 2353
		.owner	= THIS_MODULE,
	},
};

2354
static int __init mv643xx_eth_init_module(void)
2355
{
2356
	int rc;
2357

2358 2359 2360 2361 2362 2363
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
2364

2365
	return rc;
2366
}
L
Lennert Buytenhek 已提交
2367
module_init(mv643xx_eth_init_module);
2368

2369
static void __exit mv643xx_eth_cleanup_module(void)
2370
{
2371 2372
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2373
}
2374
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2375

L
Lennert Buytenhek 已提交
2376 2377
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani "
	      "and Dale Farnsworth");
2378
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
2379
MODULE_LICENSE("GPL");
2380
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
2381
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);