mv643xx_eth.c 65.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58
static char mv643xx_eth_driver_version[] = "1.3";
59

60
#define MV643XX_ETH_TX_FAST_REFILL
61 62 63 64

/*
 * Registers shared between all ports.
 */
65 66
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
67 68 69 70 71 72 73
#define  SMI_BUSY			0x10000000
#define  SMI_READ_VALID			0x08000000
#define  SMI_OPCODE_READ		0x04000000
#define  SMI_OPCODE_WRITE		0x00000000
#define ERR_INT_CAUSE			0x0080
#define  ERR_INT_SMI_DONE		0x00000010
#define ERR_INT_MASK			0x0084
74 75 76 77 78
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
79 80 81 82

/*
 * Per-port registers.
 */
83
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
84
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
85 86 87 88 89 90
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
91
#define  TX_FIFO_EMPTY			0x00000400
92
#define  TX_IN_PROGRESS			0x00000080
93 94 95 96 97 98
#define  PORT_SPEED_MASK		0x00000030
#define  PORT_SPEED_1000		0x00000010
#define  PORT_SPEED_100			0x00000020
#define  PORT_SPEED_10			0x00000000
#define  FLOW_CONTROL_ENABLED		0x00000008
#define  FULL_DUPLEX			0x00000004
99
#define  LINK_UP			0x00000002
100
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
101 102
#define TXQ_FIX_PRIO_CONF(p)		(0x044c + ((p) << 10))
#define TX_BW_RATE(p)			(0x0450 + ((p) << 10))
103
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
104
#define TX_BW_BURST(p)			(0x045c + ((p) << 10))
105
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
106
#define  INT_TX_END_0			0x00080000
107
#define  INT_TX_END			0x07f80000
108
#define  INT_RX				0x0007fbfc
109
#define  INT_EXT			0x00000002
110
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
111 112 113 114
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
115
#define  INT_EXT_TX			0x0000ffff
116 117 118
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
119 120 121 122
#define TXQ_FIX_PRIO_CONF_MOVED(p)	(0x04dc + ((p) << 10))
#define TX_BW_RATE_MOVED(p)		(0x04e0 + ((p) << 10))
#define TX_BW_MTU_MOVED(p)		(0x04e8 + ((p) << 10))
#define TX_BW_BURST_MOVED(p)		(0x04ec + ((p) << 10))
123
#define RXQ_CURRENT_DESC_PTR(p, q)	(0x060c + ((p) << 10) + ((q) << 4))
124
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
125 126 127 128
#define TXQ_CURRENT_DESC_PTR(p, q)	(0x06c0 + ((p) << 10) + ((q) << 2))
#define TXQ_BW_TOKENS(p, q)		(0x0700 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_CONF(p, q)		(0x0704 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_WRR_CONF(p, q)		(0x0708 + ((p) << 10) + ((q) << 4))
129 130 131 132
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
133

134 135 136 137

/*
 * SDMA configuration register.
 */
138
#define RX_BURST_SIZE_16_64BIT		(4 << 1)
139 140
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
141
#define TX_BURST_SIZE_16_64BIT		(4 << 22)
142 143 144

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
145 146
		RX_BURST_SIZE_16_64BIT	|	\
		TX_BURST_SIZE_16_64BIT
147 148
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
149
		RX_BURST_SIZE_16_64BIT	|	\
150 151
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
152
		TX_BURST_SIZE_16_64BIT
153 154 155 156
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

157 158 159 160 161 162 163

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
164
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
165 166 167 168 169 170 171
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
172

173 174
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
175 176


177 178
/*
 * RX/TX descriptors.
179 180
 */
#if defined(__BIG_ENDIAN)
181
struct rx_desc {
182 183 184 185 186 187 188
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

189
struct tx_desc {
190 191 192 193 194 195 196
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
197
struct rx_desc {
198 199 200 201 202 203 204
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

205
struct tx_desc {
206 207 208 209 210 211 212 213 214 215
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

216
/* RX & TX descriptor command */
217
#define BUFFER_OWNED_BY_DMA		0x80000000
218 219

/* RX & TX descriptor status */
220
#define ERROR_SUMMARY			0x00000001
221 222

/* RX descriptor status */
223 224 225 226
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
227 228

/* TX descriptor command */
229 230 231 232 233 234 235 236
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
237 238
#define MAC_HDR_EXTRA_4_BYTES		0x00008000
#define MAC_HDR_EXTRA_8_BYTES		0x00000200
239

240
#define TX_IHL_SHIFT			11
241 242


243
/* global *******************************************************************/
244
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
245 246 247
	/*
	 * Ethernet controller base address.
	 */
248
	void __iomem *base;
249

L
Lennert Buytenhek 已提交
250 251 252
	/*
	 * Protects access to SMI_REG, which is shared between ports.
	 */
253
	struct mutex phy_lock;
254

255 256 257 258 259 260 261 262 263
	/*
	 * If we have access to the error interrupt pin (which is
	 * somewhat misnamed as it not only reflects internal errors
	 * but also reflects SMI completion), use that to wait for
	 * SMI access completion instead of polling the SMI busy bit.
	 */
	int err_interrupt;
	wait_queue_head_t smi_busy_wait;

L
Lennert Buytenhek 已提交
264 265 266
	/*
	 * Per-port MBUS window access register value.
	 */
267 268
	u32 win_protect;

L
Lennert Buytenhek 已提交
269 270 271
	/*
	 * Hardware-specific parameters.
	 */
272
	unsigned int t_clk;
273
	int extended_rx_coal_limit;
274
	int tx_bw_control_moved;
275 276 277 278
};


/* per-port *****************************************************************/
279
struct mib_counters {
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

312
struct rx_queue {
313 314
	int index;

315 316 317 318 319 320 321 322 323 324 325 326
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;
};

327
struct tx_queue {
328 329
	int index;

330
	int tx_ring_size;
331

332 333 334
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
335

336
	struct tx_desc *tx_desc_area;
337 338 339
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
340 341 342 343
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
L
Lennert Buytenhek 已提交
344
	int port_num;
345

L
Lennert Buytenhek 已提交
346
	struct net_device *dev;
347

L
Lennert Buytenhek 已提交
348 349
	struct mv643xx_eth_shared_private *shared_smi;
	int phy_addr;
350 351 352

	spinlock_t lock;

L
Lennert Buytenhek 已提交
353 354
	struct mib_counters mib_counters;
	struct work_struct tx_timeout_task;
355
	struct mii_if_info mii;
356 357 358 359 360 361 362

	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
363 364
	u8 rxq_mask;
	int rxq_primary;
365
	struct napi_struct napi;
366
	struct timer_list rx_oom;
367
	struct rx_queue rxq[8];
368 369 370 371 372 373 374

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
375 376 377
	u8 txq_mask;
	int txq_primary;
	struct tx_queue txq[8];
378 379 380
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
381
};
L
Linus Torvalds 已提交
382

383

384
/* port register accessors **************************************************/
385
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
386
{
387
	return readl(mp->shared->base + offset);
388
}
389

390
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
391
{
392
	writel(data, mp->shared->base + offset);
393
}
394 395


396
/* rxq/txq helper functions *************************************************/
397
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
398
{
399
	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
400
}
401

402 403
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
404
	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
405 406
}

407
static void rxq_enable(struct rx_queue *rxq)
408
{
409
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
410
	wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
411
}
L
Linus Torvalds 已提交
412

413 414 415
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
416
	u8 mask = 1 << rxq->index;
L
Linus Torvalds 已提交
417

418 419 420
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
421 422
}

423 424 425 426 427 428 429 430 431 432 433
static void txq_reset_hw_ptr(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
	u32 addr;

	addr = (u32)txq->tx_desc_dma;
	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
	wrl(mp, off, addr);
}

434
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
435
{
436
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
437
	wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index);
L
Linus Torvalds 已提交
438 439
}

440
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
441
{
442
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
443
	u8 mask = 1 << txq->index;
444

445 446 447 448 449 450 451 452 453
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

454 455 456 457 458 459
	/*
	 * netif_{stop,wake}_queue() flow control only applies to
	 * the primary queue.
	 */
	BUG_ON(txq->index != mp->txq_primary);

460
	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
461
		netif_wake_queue(mp->dev);
L
Linus Torvalds 已提交
462 463
}

464 465

/* rx ***********************************************************************/
466
static void txq_reclaim(struct tx_queue *txq, int force);
467

468
static int rxq_refill(struct rx_queue *rxq, int budget, int *oom)
L
Linus Torvalds 已提交
469
{
470 471
	int skb_size;
	int refilled;
L
Linus Torvalds 已提交
472

473 474 475 476 477 478 479 480 481 482 483 484 485 486
	/*
	 * Reserve 2+14 bytes for an ethernet header (the hardware
	 * automatically prepends 2 bytes of dummy data to each
	 * received packet), 16 bytes for up to four VLAN tags, and
	 * 4 bytes for the trailing FCS -- 36 bytes total.
	 */
	skb_size = rxq_to_mp(rxq)->dev->mtu + 36;

	/*
	 * Make sure that the skb size is a multiple of 8 bytes, as
	 * the lower three bits of the receive descriptor's buffer
	 * size field are ignored by the hardware.
	 */
	skb_size = (skb_size + 7) & ~7;
487

488 489
	refilled = 0;
	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
490 491 492 493
		struct sk_buff *skb;
		int unaligned;
		int rx;

494
		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
495 496
		if (skb == NULL) {
			*oom = 1;
L
Linus Torvalds 已提交
497
			break;
498
		}
499

R
Ralf Baechle 已提交
500
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
501
		if (unaligned)
R
Ralf Baechle 已提交
502
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
503

504
		refilled++;
505
		rxq->rx_desc_count++;
506 507 508 509

		rx = rxq->rx_used_desc++;
		if (rxq->rx_used_desc == rxq->rx_ring_size)
			rxq->rx_used_desc = 0;
510

511 512 513 514
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
						skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = skb_size;
		rxq->rx_skb[rx] = skb;
515
		wmb();
516
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
517 518 519
						RX_ENABLE_INTERRUPT;
		wmb();

L
Lennert Buytenhek 已提交
520 521 522 523 524 525
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
L
Linus Torvalds 已提交
526
	}
527

528
	return refilled;
L
Linus Torvalds 已提交
529 530
}

531
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
532
{
533 534 535
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
536

537
	rx = 0;
538
	while (rx < budget && rxq->rx_desc_count) {
L
Lennert Buytenhek 已提交
539
		struct rx_desc *rx_desc;
540
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
541
		struct sk_buff *skb;
542

543
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
544

545
		cmd_sts = rx_desc->cmd_sts;
546
		if (cmd_sts & BUFFER_OWNED_BY_DMA)
547 548
			break;
		rmb();
L
Linus Torvalds 已提交
549

550 551
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
552

553 554 555
		rxq->rx_curr_desc++;
		if (rxq->rx_curr_desc == rxq->rx_ring_size)
			rxq->rx_curr_desc = 0;
556

557
		dma_unmap_single(NULL, rx_desc->buf_ptr,
558
				 rx_desc->buf_size, DMA_FROM_DEVICE);
559 560
		rxq->rx_desc_count--;
		rx++;
561

562 563
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
564 565 566 567 568
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
569
		 */
L
Linus Torvalds 已提交
570
		stats->rx_packets++;
L
Lennert Buytenhek 已提交
571
		stats->rx_bytes += rx_desc->byte_cnt - 2;
572

L
Linus Torvalds 已提交
573
		/*
L
Lennert Buytenhek 已提交
574 575 576
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
577
		 */
578
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
579
					(RX_FIRST_DESC | RX_LAST_DESC))
580
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
581
			stats->rx_dropped++;
L
Lennert Buytenhek 已提交
582

583
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
584
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
585
				if (net_ratelimit())
L
Lennert Buytenhek 已提交
586 587 588
					dev_printk(KERN_ERR, &mp->dev->dev,
						   "received packet spanning "
						   "multiple descriptors\n");
L
Linus Torvalds 已提交
589
			}
L
Lennert Buytenhek 已提交
590

591
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
592 593
				stats->rx_errors++;

594
			dev_kfree_skb(skb);
L
Linus Torvalds 已提交
595 596 597 598 599
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
L
Lennert Buytenhek 已提交
600
			skb_put(skb, rx_desc->byte_cnt - 2 - 4);
L
Linus Torvalds 已提交
601

602
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
603 604
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
605
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
606
			}
607
			skb->protocol = eth_type_trans(skb, mp->dev);
L
Linus Torvalds 已提交
608 609
			netif_receive_skb(skb);
		}
L
Lennert Buytenhek 已提交
610

611
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
612
	}
L
Lennert Buytenhek 已提交
613

614
	return rx;
L
Linus Torvalds 已提交
615 616
}

617
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
618
{
619
	struct mv643xx_eth_private *mp;
620 621
	int work_done;
	int oom;
622
	int i;
623 624

	mp = container_of(napi, struct mv643xx_eth_private, napi);
625

626
#ifdef MV643XX_ETH_TX_FAST_REFILL
627 628
	if (++mp->tx_clean_threshold > 5) {
		mp->tx_clean_threshold = 0;
629 630 631
		for (i = 0; i < 8; i++)
			if (mp->txq_mask & (1 << i))
				txq_reclaim(mp->txq + i, 0);
632 633

		if (netif_carrier_ok(mp->dev)) {
634
			spin_lock_irq(&mp->lock);
635
			__txq_maybe_wake(mp->txq + mp->txq_primary);
636
			spin_unlock_irq(&mp->lock);
637
		}
638
	}
639
#endif
640

641 642 643 644 645
	work_done = 0;
	oom = 0;
	for (i = 7; work_done < budget && i >= 0; i--) {
		if (mp->rxq_mask & (1 << i)) {
			struct rx_queue *rxq = mp->rxq + i;
646

647 648 649 650 651 652 653 654
			work_done += rxq_process(rxq, budget - work_done);
			work_done += rxq_refill(rxq, budget - work_done, &oom);
		}
	}

	if (work_done < budget) {
		if (oom)
			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
655
		netif_rx_complete(mp->dev, napi);
656
		wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
657
	}
658

659 660 661 662 663 664 665 666
	return work_done;
}

static inline void oom_timer_wrapper(unsigned long data)
{
	struct mv643xx_eth_private *mp = (void *)data;

	napi_schedule(&mp->napi);
667 668
}

669 670 671

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
672
{
673
	int frag;
L
Linus Torvalds 已提交
674

675
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
676 677
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
678
			return 1;
L
Linus Torvalds 已提交
679
	}
680

681 682
	return 0;
}
683

684
static int txq_alloc_desc_index(struct tx_queue *txq)
685 686
{
	int tx_desc_curr;
687

688
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
689

690 691 692
	tx_desc_curr = txq->tx_curr_desc++;
	if (txq->tx_curr_desc == txq->tx_ring_size)
		txq->tx_curr_desc = 0;
693

694
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
695

696 697
	return tx_desc_curr;
}
698

699
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
700
{
701
	int nr_frags = skb_shinfo(skb)->nr_frags;
702
	int frag;
L
Linus Torvalds 已提交
703

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
			txq->tx_skb[tx_index] = skb;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}

727 728 729 730 731 732 733
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
734 735
}

736 737 738 739
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
740

741
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
742
{
743
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
744
	int nr_frags = skb_shinfo(skb)->nr_frags;
745
	int tx_index;
746
	struct tx_desc *desc;
747 748
	u32 cmd_sts;
	int length;
L
Linus Torvalds 已提交
749

750
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
751

752 753
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];
754 755

	if (nr_frags) {
756
		txq_submit_frag_skb(txq, skb);
757 758

		length = skb_headlen(skb);
759
		txq->tx_skb[tx_index] = NULL;
760
	} else {
761
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
762
		length = skb->len;
763
		txq->tx_skb[tx_index] = skb;
764 765 766 767 768 769
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
770 771 772 773
		int mac_hdr_len;

		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
		       skb->protocol != htons(ETH_P_8021Q));
774

775 776 777
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
778

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
		mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
		switch (mac_hdr_len - ETH_HLEN) {
		case 0:
			break;
		case 4:
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
			break;
		case 8:
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
			break;
		case 12:
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
			break;
		default:
			if (net_ratelimit())
				dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
				   "mac header length is %d?!\n", mac_hdr_len);
			break;
		}

800 801
		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
802
			cmd_sts |= UDP_FRAME;
803 804 805 806 807 808 809 810 811 812
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
813
		cmd_sts |= 5 << TX_IHL_SHIFT;
814 815 816 817 818 819 820
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

821 822 823 824
	/* clear TX_END interrupt status */
	wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
	rdl(mp, INT_CAUSE(mp->port_num));

825 826
	/* ensure all descriptors are written before poking hardware */
	wmb();
827
	txq_enable(txq);
828

829
	txq->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
830 831
}

L
Lennert Buytenhek 已提交
832
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
833
{
834
	struct mv643xx_eth_private *mp = netdev_priv(dev);
835
	struct net_device_stats *stats = &dev->stats;
836
	struct tx_queue *txq;
837
	unsigned long flags;
838

839 840
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
L
Lennert Buytenhek 已提交
841 842 843
		dev_printk(KERN_DEBUG, &dev->dev,
			   "failed to linearize skb with tiny "
			   "unaligned fragment\n");
844 845 846 847 848
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

849
	txq = mp->txq + mp->txq_primary;
850

851
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
852
		spin_unlock_irqrestore(&mp->lock, flags);
853 854 855 856 857
		if (txq->index == mp->txq_primary && net_ratelimit())
			dev_printk(KERN_ERR, &dev->dev,
				   "primary tx queue full?!\n");
		kfree_skb(skb);
		return NETDEV_TX_OK;
858 859
	}

860
	txq_submit_skb(txq, skb);
861 862 863 864
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

865 866 867 868
	if (txq->index == mp->txq_primary) {
		int entries_left;

		entries_left = txq->tx_ring_size - txq->tx_desc_count;
869
		if (entries_left < MAX_SKB_FRAGS + 1)
870 871
			netif_stop_queue(dev);
	}
872 873 874 875

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
876 877
}

878

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

902 903 904 905 906 907 908 909 910
	if (mp->shared->tx_bw_control_moved) {
		wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
	} else {
		wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
	}
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

927 928
	wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14);
	wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index),
929 930 931 932 933 934 935 936 937 938 939 940
			(bucket_size << 10) | token_rate);
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
941 942 943 944
	if (mp->shared->tx_bw_control_moved)
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
	else
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
945 946

	val = rdl(mp, off);
947
	val |= 1 << txq->index;
948 949 950 951 952 953 954 955 956 957 958 959
	wrl(mp, off, val);
}

static void txq_set_wrr(struct tx_queue *txq, int weight)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn off fixed priority mode.
	 */
960 961 962 963
	if (mp->shared->tx_bw_control_moved)
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
	else
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
964 965

	val = rdl(mp, off);
966
	val &= ~(1 << txq->index);
967 968 969 970 971
	wrl(mp, off, val);

	/*
	 * Configure WRR weight for this queue.
	 */
972
	off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
973 974 975 976 977 978 979

	val = rdl(mp, off);
	val = (val & ~0xff) | (weight & 0xff);
	wrl(mp, off, val);
}


980
/* mii management interface *************************************************/
981 982 983 984 985 986 987 988 989 990 991 992
static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
{
	struct mv643xx_eth_shared_private *msp = dev_id;

	if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
		writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
		wake_up(&msp->smi_busy_wait);
		return IRQ_HANDLED;
	}

	return IRQ_NONE;
}
993

994
static int smi_is_done(struct mv643xx_eth_shared_private *msp)
L
Linus Torvalds 已提交
995
{
996 997
	return !(readl(msp->base + SMI_REG) & SMI_BUSY);
}
L
Linus Torvalds 已提交
998

999 1000 1001 1002
static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
{
	if (msp->err_interrupt == NO_IRQ) {
		int i;
1003

1004 1005 1006 1007
		for (i = 0; !smi_is_done(msp); i++) {
			if (i == 10)
				return -ETIMEDOUT;
			msleep(10);
1008
		}
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

		return 0;
	}

	if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
				msecs_to_jiffies(100)))
		return -ETIMEDOUT;

	return 0;
}

static int smi_reg_read(struct mv643xx_eth_private *mp,
			unsigned int addr, unsigned int reg)
{
	struct mv643xx_eth_shared_private *msp = mp->shared_smi;
	void __iomem *smi_reg = msp->base + SMI_REG;
	int ret;

	mutex_lock(&msp->phy_lock);

	if (smi_wait_ready(msp)) {
		printk("%s: SMI bus busy timeout\n", mp->dev->name);
		ret = -ETIMEDOUT;
		goto out;
L
Linus Torvalds 已提交
1033 1034
	}

L
Lennert Buytenhek 已提交
1035
	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
L
Linus Torvalds 已提交
1036

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	if (smi_wait_ready(msp)) {
		printk("%s: SMI bus busy timeout\n", mp->dev->name);
		ret = -ETIMEDOUT;
		goto out;
	}

	ret = readl(smi_reg);
	if (!(ret & SMI_READ_VALID)) {
		printk("%s: SMI bus read not valid\n", mp->dev->name);
		ret = -ENODEV;
		goto out;
1048 1049
	}

1050 1051
	ret &= 0xffff;

1052
out:
1053 1054 1055
	mutex_unlock(&msp->phy_lock);

	return ret;
L
Linus Torvalds 已提交
1056 1057
}

1058 1059
static int smi_reg_write(struct mv643xx_eth_private *mp, unsigned int addr,
			 unsigned int reg, unsigned int value)
L
Linus Torvalds 已提交
1060
{
1061 1062
	struct mv643xx_eth_shared_private *msp = mp->shared_smi;
	void __iomem *smi_reg = msp->base + SMI_REG;
L
Linus Torvalds 已提交
1063

1064
	mutex_lock(&msp->phy_lock);
1065

1066 1067 1068 1069
	if (smi_wait_ready(msp)) {
		printk("%s: SMI bus busy timeout\n", mp->dev->name);
		mutex_unlock(&msp->phy_lock);
		return -ETIMEDOUT;
L
Linus Torvalds 已提交
1070 1071
	}

L
Lennert Buytenhek 已提交
1072 1073
	writel(SMI_OPCODE_WRITE | (reg << 21) |
		(addr << 16) | (value & 0xffff), smi_reg);
1074 1075 1076 1077

	mutex_unlock(&msp->phy_lock);

	return 0;
1078
}
L
Linus Torvalds 已提交
1079

1080 1081

/* mib counters *************************************************************/
L
Lennert Buytenhek 已提交
1082
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1083
{
L
Lennert Buytenhek 已提交
1084
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
1085 1086
}

L
Lennert Buytenhek 已提交
1087
static void mib_counters_clear(struct mv643xx_eth_private *mp)
1088
{
L
Lennert Buytenhek 已提交
1089 1090 1091 1092
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
1093
}
1094

L
Lennert Buytenhek 已提交
1095
static void mib_counters_update(struct mv643xx_eth_private *mp)
1096
{
1097
	struct mib_counters *p = &mp->mib_counters;
1098

L
Lennert Buytenhek 已提交
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	p->good_octets_received += mib_read(mp, 0x00);
	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
1131 1132
}

1133 1134

/* ethtool ******************************************************************/
1135
struct mv643xx_eth_stats {
1136 1137
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
1138 1139
	int netdev_off;
	int mp_off;
1140 1141
};

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1189 1190
};

1191
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1192
{
1193
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1194 1195 1196 1197
	int err;

	err = mii_ethtool_gset(&mp->mii, cmd);

L
Lennert Buytenhek 已提交
1198 1199 1200
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1201 1202 1203 1204 1205 1206
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

1207 1208
static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
1209 1210 1211 1212 1213
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_status;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));

1214 1215
	cmd->supported = SUPPORTED_MII;
	cmd->advertising = ADVERTISED_MII;
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		cmd->speed = SPEED_10;
		break;
	case PORT_SPEED_100:
		cmd->speed = SPEED_100;
		break;
	case PORT_SPEED_1000:
		cmd->speed = SPEED_1000;
		break;
	default:
		cmd->speed = -1;
		break;
	}
	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
	cmd->port = PORT_MII;
	cmd->phy_address = 0;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->autoneg = AUTONEG_DISABLE;
	cmd->maxtxpkt = 1;
	cmd->maxrxpkt = 1;

	return 0;
}

1241
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1242
{
1243
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1244

L
Lennert Buytenhek 已提交
1245 1246 1247 1248 1249
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1250
	return mii_ethtool_sset(&mp->mii, cmd);
1251
}
L
Linus Torvalds 已提交
1252

1253 1254 1255 1256 1257
static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
	return -EINVAL;
}

L
Lennert Buytenhek 已提交
1258 1259
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1260
{
1261 1262
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1263
	strncpy(drvinfo->fw_version, "N/A", 32);
L
Lennert Buytenhek 已提交
1264
	strncpy(drvinfo->bus_info, "platform", 32);
1265
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1266
}
L
Linus Torvalds 已提交
1267

L
Lennert Buytenhek 已提交
1268
static int mv643xx_eth_nway_reset(struct net_device *dev)
1269
{
1270
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1271

1272 1273
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
1274

1275 1276 1277 1278 1279
static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
{
	return -EINVAL;
}

1280 1281
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1282
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1283

1284 1285
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
1286

1287 1288 1289 1290 1291
static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
{
	return 1;
}

L
Lennert Buytenhek 已提交
1292 1293
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1294 1295
{
	int i;
L
Linus Torvalds 已提交
1296

L
Lennert Buytenhek 已提交
1297 1298
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1299
			memcpy(data + i * ETH_GSTRING_LEN,
1300
				mv643xx_eth_stats[i].stat_string,
1301
				ETH_GSTRING_LEN);
1302 1303 1304
		}
	}
}
L
Linus Torvalds 已提交
1305

L
Lennert Buytenhek 已提交
1306 1307 1308
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1309
{
1310
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1311
	int i;
L
Linus Torvalds 已提交
1312

L
Lennert Buytenhek 已提交
1313
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1314

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1328
	}
1329
}
L
Linus Torvalds 已提交
1330

L
Lennert Buytenhek 已提交
1331
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1332
{
L
Lennert Buytenhek 已提交
1333
	if (sset == ETH_SS_STATS)
1334
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1335 1336

	return -EOPNOTSUPP;
1337
}
L
Linus Torvalds 已提交
1338

1339
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1340 1341 1342 1343 1344
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
	.get_link		= mv643xx_eth_get_link,
1345
	.set_sg			= ethtool_op_set_sg,
L
Lennert Buytenhek 已提交
1346 1347
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1348
	.get_sset_count		= mv643xx_eth_get_sset_count,
1349
};
L
Linus Torvalds 已提交
1350

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
	.get_settings		= mv643xx_eth_get_settings_phyless,
	.set_settings		= mv643xx_eth_set_settings_phyless,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset_phyless,
	.get_link		= mv643xx_eth_get_link_phyless,
	.set_sg			= ethtool_op_set_sg,
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
	.get_sset_count		= mv643xx_eth_get_sset_count,
};

1363

1364
/* address handling *********************************************************/
1365
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1366 1367 1368
{
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1369

L
Lennert Buytenhek 已提交
1370 1371
	mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
L
Linus Torvalds 已提交
1372

1373 1374 1375 1376 1377 1378
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1379
}
L
Linus Torvalds 已提交
1380

1381
static void init_mac_tables(struct mv643xx_eth_private *mp)
1382
{
L
Lennert Buytenhek 已提交
1383
	int i;
L
Linus Torvalds 已提交
1384

L
Lennert Buytenhek 已提交
1385 1386 1387
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1388
	}
L
Lennert Buytenhek 已提交
1389 1390 1391

	for (i = 0; i < 0x10; i += 4)
		wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1392
}
1393

1394
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1395
				   int table, unsigned char entry)
1396 1397
{
	unsigned int table_reg;
1398

1399
	/* Set "accepts frame bit" at specified table entry */
L
Lennert Buytenhek 已提交
1400 1401 1402
	table_reg = rdl(mp, table + (entry & 0xfc));
	table_reg |= 0x01 << (8 * (entry & 3));
	wrl(mp, table + (entry & 0xfc), table_reg);
L
Linus Torvalds 已提交
1403 1404
}

1405
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1406
{
1407 1408 1409
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1410

L
Lennert Buytenhek 已提交
1411 1412
	mac_l = (addr[4] << 8) | addr[5];
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1413

L
Lennert Buytenhek 已提交
1414 1415
	wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
L
Linus Torvalds 已提交
1416

L
Lennert Buytenhek 已提交
1417
	table = UNICAST_TABLE(mp->port_num);
1418
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1419 1420
}

L
Lennert Buytenhek 已提交
1421
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1422
{
1423
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1424

L
Lennert Buytenhek 已提交
1425 1426 1427
	/* +2 is for the offset of the HW addr type */
	memcpy(dev->dev_addr, addr + 2, 6);

1428 1429
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1430 1431 1432 1433

	return 0;
}

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

L
Lennert Buytenhek 已提交
1452
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
L
Linus Torvalds 已提交
1453
{
L
Lennert Buytenhek 已提交
1454 1455 1456 1457
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_config;
	struct dev_addr_list *addr;
	int i;
1458

L
Lennert Buytenhek 已提交
1459 1460 1461 1462 1463 1464
	port_config = rdl(mp, PORT_CONFIG(mp->port_num));
	if (dev->flags & IFF_PROMISC)
		port_config |= UNICAST_PROMISCUOUS_MODE;
	else
		port_config &= ~UNICAST_PROMISCUOUS_MODE;
	wrl(mp, PORT_CONFIG(mp->port_num), port_config);
L
Linus Torvalds 已提交
1465

L
Lennert Buytenhek 已提交
1466 1467 1468
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		int port_num = mp->port_num;
		u32 accept = 0x01010101;
1469

L
Lennert Buytenhek 已提交
1470 1471 1472
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1473 1474 1475
		}
		return;
	}
1476

L
Lennert Buytenhek 已提交
1477 1478 1479
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
L
Linus Torvalds 已提交
1480 1481
	}

L
Lennert Buytenhek 已提交
1482 1483 1484
	for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
		u8 *a = addr->da_addr;
		int table;
1485

L
Lennert Buytenhek 已提交
1486 1487
		if (addr->da_addrlen != 6)
			continue;
L
Linus Torvalds 已提交
1488

L
Lennert Buytenhek 已提交
1489 1490 1491 1492 1493
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
			table = SPECIAL_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, a[5]);
		} else {
			int crc = addr_crc(a);
L
Linus Torvalds 已提交
1494

L
Lennert Buytenhek 已提交
1495 1496 1497 1498
			table = OTHER_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, crc);
		}
	}
1499
}
1500 1501


1502
/* rx/tx queue initialisation ***********************************************/
1503
static int rxq_init(struct mv643xx_eth_private *mp, int index)
1504
{
1505
	struct rx_queue *rxq = mp->rxq + index;
1506 1507
	struct rx_desc *rx_desc;
	int size;
1508 1509
	int i;

1510 1511
	rxq->index = index;

1512 1513 1514 1515 1516 1517 1518 1519
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

1520
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
1521 1522 1523 1524 1525 1526 1527
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1528 1529
	}

1530 1531 1532 1533 1534 1535
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1536

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
1548 1549 1550 1551 1552 1553
		int nexti;

		nexti = i + 1;
		if (nexti == rxq->rx_ring_size)
			nexti = 0;

1554 1555 1556 1557 1558 1559 1560 1561
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	return 0;


out_free:
1562
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
1563 1564 1565 1566 1567 1568 1569 1570
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1571
}
1572

1573
static void rxq_deinit(struct rx_queue *rxq)
1574
{
1575 1576 1577 1578
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1579

1580 1581 1582 1583
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1584
		}
1585
	}
L
Linus Torvalds 已提交
1586

1587 1588 1589 1590 1591 1592
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

1593 1594
	if (rxq->index == mp->rxq_primary &&
	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1595
		iounmap(rxq->rx_desc_area);
1596
	else
1597 1598 1599 1600
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1601
}
L
Linus Torvalds 已提交
1602

1603
static int txq_init(struct mv643xx_eth_private *mp, int index)
1604
{
1605
	struct tx_queue *txq = mp->txq + index;
1606 1607
	struct tx_desc *tx_desc;
	int size;
1608
	int i;
L
Linus Torvalds 已提交
1609

1610 1611
	txq->index = index;

1612 1613 1614 1615 1616 1617 1618 1619
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

1620
	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) {
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
1634
	}
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
1648
		struct tx_desc *txd = tx_desc + i;
1649 1650 1651 1652 1653
		int nexti;

		nexti = i + 1;
		if (nexti == txq->tx_ring_size)
			nexti = 0;
1654 1655 1656

		txd->cmd_sts = 0;
		txd->next_desc_ptr = txq->tx_desc_dma +
1657 1658 1659 1660 1661
					nexti * sizeof(struct tx_desc);
	}

	return 0;

1662

1663
out_free:
1664
	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
1665 1666 1667 1668 1669
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);
1670

1671 1672
out:
	return -ENOMEM;
1673
}
L
Linus Torvalds 已提交
1674

1675
static void txq_reclaim(struct tx_queue *txq, int force)
1676
{
1677
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1678
	unsigned long flags;
L
Linus Torvalds 已提交
1679

1680 1681 1682 1683 1684 1685 1686 1687
	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;
		dma_addr_t addr;
		int count;
1688

1689 1690
		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
1691
		cmd_sts = desc->cmd_sts;
1692

1693 1694 1695 1696 1697
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			if (!force)
				break;
			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
		}
L
Linus Torvalds 已提交
1698

1699 1700 1701
		txq->tx_used_desc = tx_index + 1;
		if (txq->tx_used_desc == txq->tx_ring_size)
			txq->tx_used_desc = 0;
1702
		txq->tx_desc_count--;
L
Linus Torvalds 已提交
1703

1704 1705
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
1706 1707
		skb = txq->tx_skb[tx_index];
		txq->tx_skb[tx_index] = NULL;
1708

1709
		if (cmd_sts & ERROR_SUMMARY) {
1710 1711
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
1712
		}
L
Linus Torvalds 已提交
1713

1714 1715 1716
		/*
		 * Drop mp->lock while we free the skb.
		 */
1717
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1718

1719
		if (cmd_sts & TX_FIRST_DESC)
1720 1721 1722
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1723

1724 1725
		if (skb)
			dev_kfree_skb_irq(skb);
1726

1727
		spin_lock_irqsave(&mp->lock, flags);
1728
	}
1729
	spin_unlock_irqrestore(&mp->lock, flags);
1730
}
L
Linus Torvalds 已提交
1731

1732
static void txq_deinit(struct tx_queue *txq)
1733
{
1734
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1735

1736 1737
	txq_disable(txq);
	txq_reclaim(txq, 1);
L
Linus Torvalds 已提交
1738

1739
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1740

1741 1742
	if (txq->index == mp->txq_primary &&
	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1743
		iounmap(txq->tx_desc_area);
1744
	else
1745 1746 1747 1748
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
1749
}
L
Linus Torvalds 已提交
1750 1751


1752
/* netdev ops and related ***************************************************/
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
static void handle_link_event(struct mv643xx_eth_private *mp)
{
	struct net_device *dev = mp->dev;
	u32 port_status;
	int speed;
	int duplex;
	int fc;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));
	if (!(port_status & LINK_UP)) {
		if (netif_carrier_ok(dev)) {
			int i;

			printk(KERN_INFO "%s: link down\n", dev->name);

			netif_carrier_off(dev);
			netif_stop_queue(dev);

			for (i = 0; i < 8; i++) {
				struct tx_queue *txq = mp->txq + i;

				if (mp->txq_mask & (1 << i)) {
					txq_reclaim(txq, 1);
					txq_reset_hw_ptr(txq);
				}
			}
		}
		return;
	}

	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		speed = 10;
		break;
	case PORT_SPEED_100:
		speed = 100;
		break;
	case PORT_SPEED_1000:
		speed = 1000;
		break;
	default:
		speed = -1;
		break;
	}
	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;

	printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
			 "flow control %sabled\n", dev->name,
			 speed, duplex ? "full" : "half",
			 fc ? "en" : "dis");

	if (!netif_carrier_ok(dev)) {
		netif_carrier_on(dev);
		netif_wake_queue(dev);
	}
}

L
Lennert Buytenhek 已提交
1811
static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1812 1813
{
	struct net_device *dev = (struct net_device *)dev_id;
1814
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1815 1816
	u32 int_cause;
	u32 int_cause_ext;
1817

1818 1819
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
			(INT_TX_END | INT_RX | INT_EXT);
L
Lennert Buytenhek 已提交
1820 1821 1822 1823
	if (int_cause == 0)
		return IRQ_NONE;

	int_cause_ext = 0;
1824
	if (int_cause & INT_EXT) {
1825
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1826
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1827
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1828
	}
L
Linus Torvalds 已提交
1829

1830 1831
	if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
		handle_link_event(mp);
L
Linus Torvalds 已提交
1832

1833 1834 1835
	/*
	 * RxBuffer or RxError set for any of the 8 queues?
	 */
1836
	if (int_cause & INT_RX) {
1837
		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
1838 1839
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
		rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1840

1841
		napi_schedule(&mp->napi);
1842
	}
L
Lennert Buytenhek 已提交
1843

1844 1845 1846
	/*
	 * TxBuffer or TxError set for any of the 8 queues?
	 */
1847
	if (int_cause_ext & INT_EXT_TX) {
1848 1849 1850 1851 1852
		int i;

		for (i = 0; i < 8; i++)
			if (mp->txq_mask & (1 << i))
				txq_reclaim(mp->txq + i, 0);
1853 1854 1855 1856 1857

		/*
		 * Enough space again in the primary TX queue for a
		 * full packet?
		 */
1858 1859 1860 1861 1862
		if (netif_carrier_ok(dev)) {
			spin_lock(&mp->lock);
			__txq_maybe_wake(mp->txq + mp->txq_primary);
			spin_unlock(&mp->lock);
		}
1863
	}
1864

1865 1866 1867 1868 1869 1870 1871
	/*
	 * Any TxEnd interrupts?
	 */
	if (int_cause & INT_TX_END) {
		int i;

		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1872 1873

		spin_lock(&mp->lock);
1874 1875
		for (i = 0; i < 8; i++) {
			struct tx_queue *txq = mp->txq + i;
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
			u32 hw_desc_ptr;
			u32 expected_ptr;

			if ((int_cause & (INT_TX_END_0 << i)) == 0)
				continue;

			hw_desc_ptr =
				rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
			expected_ptr = (u32)txq->tx_desc_dma +
				txq->tx_curr_desc * sizeof(struct tx_desc);

			if (hw_desc_ptr != expected_ptr)
1888 1889
				txq_enable(txq);
		}
1890
		spin_unlock(&mp->lock);
1891
	}
L
Linus Torvalds 已提交
1892

1893
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1894 1895
}

1896
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1897
{
1898 1899 1900 1901 1902
	int data;

	data = smi_reg_read(mp, mp->phy_addr, MII_BMCR);
	if (data < 0)
		return;
L
Linus Torvalds 已提交
1903

1904
	data |= BMCR_RESET;
1905 1906
	if (smi_reg_write(mp, mp->phy_addr, MII_BMCR, data) < 0)
		return;
L
Linus Torvalds 已提交
1907

1908
	do {
1909 1910
		data = smi_reg_read(mp, mp->phy_addr, MII_BMCR);
	} while (data >= 0 && data & BMCR_RESET);
L
Linus Torvalds 已提交
1911 1912
}

L
Lennert Buytenhek 已提交
1913
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1914
{
1915
	u32 pscr;
1916
	int i;
L
Linus Torvalds 已提交
1917

1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
	/*
	 * Perform PHY reset, if there is a PHY.
	 */
	if (mp->phy_addr != -1) {
		struct ethtool_cmd cmd;

		mv643xx_eth_get_settings(mp->dev, &cmd);
		phy_reset(mp);
		mv643xx_eth_set_settings(mp->dev, &cmd);
	}
L
Linus Torvalds 已提交
1928

1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));

	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	pscr |= DO_NOT_FORCE_LINK_FAIL;
	if (mp->phy_addr == -1)
		pscr |= FORCE_LINK_PASS;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

1944 1945 1946
	/*
	 * Configure TX path and queues.
	 */
1947
	tx_set_rate(mp, 1000000000, 16777216);
1948 1949
	for (i = 0; i < 8; i++) {
		struct tx_queue *txq = mp->txq + i;
1950

1951 1952 1953
		if ((mp->txq_mask & (1 << i)) == 0)
			continue;

1954
		txq_reset_hw_ptr(txq);
1955 1956
		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
1957 1958
	}

L
Lennert Buytenhek 已提交
1959 1960 1961 1962
	/*
	 * Add configured unicast address to address filter table.
	 */
	uc_addr_set(mp, mp->dev->dev_addr);
L
Linus Torvalds 已提交
1963

1964 1965 1966 1967
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
1968
	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1969

1970 1971 1972
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1973
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1974

1975
	/*
1976
	 * Enable the receive queues.
1977
	 */
1978 1979 1980
	for (i = 0; i < 8; i++) {
		struct rx_queue *rxq = mp->rxq + i;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1981
		u32 addr;
L
Linus Torvalds 已提交
1982

1983 1984 1985
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

1986 1987 1988
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
1989

1990 1991
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
1992 1993
}

1994
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1995
{
1996
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1997
	u32 val;
L
Linus Torvalds 已提交
1998

1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
	val = rdl(mp, SDMA_CONFIG(mp->port_num));
	if (mp->shared->extended_rx_coal_limit) {
		if (coal > 0xffff)
			coal = 0xffff;
		val &= ~0x023fff80;
		val |= (coal & 0x8000) << 10;
		val |= (coal & 0x7fff) << 7;
	} else {
		if (coal > 0x3fff)
			coal = 0x3fff;
		val &= ~0x003fff00;
		val |= (coal & 0x3fff) << 8;
	}
	wrl(mp, SDMA_CONFIG(mp->port_num), val);
L
Linus Torvalds 已提交
2013 2014
}

2015
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
2016
{
2017
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
2018

L
Lennert Buytenhek 已提交
2019 2020 2021
	if (coal > 0x3fff)
		coal = 0x3fff;
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
2022 2023
}

2024
static int mv643xx_eth_open(struct net_device *dev)
2025
{
2026
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2027
	int err;
2028
	int oom;
2029
	int i;
2030

L
Lennert Buytenhek 已提交
2031 2032 2033
	wrl(mp, INT_CAUSE(mp->port_num), 0);
	wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
	rdl(mp, INT_CAUSE_EXT(mp->port_num));
2034

L
Lennert Buytenhek 已提交
2035
	err = request_irq(dev->irq, mv643xx_eth_irq,
2036
			  IRQF_SHARED, dev->name, dev);
2037
	if (err) {
L
Lennert Buytenhek 已提交
2038
		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2039
		return -EAGAIN;
2040 2041
	}

L
Lennert Buytenhek 已提交
2042
	init_mac_tables(mp);
2043

2044 2045 2046
	napi_enable(&mp->napi);

	oom = 0;
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058
	for (i = 0; i < 8; i++) {
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

		err = rxq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->rxq_mask & (1 << i))
					rxq_deinit(mp->rxq + i);
			goto out;
		}

2059 2060 2061 2062 2063 2064
		rxq_refill(mp->rxq + i, INT_MAX, &oom);
	}

	if (oom) {
		mp->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&mp->rx_oom);
2065
	}
2066

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
	for (i = 0; i < 8; i++) {
		if ((mp->txq_mask & (1 << i)) == 0)
			continue;

		err = txq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->txq_mask & (1 << i))
					txq_deinit(mp->txq + i);
			goto out_free;
		}
	}
2079

2080 2081 2082
	netif_carrier_off(dev);
	netif_stop_queue(dev);

L
Lennert Buytenhek 已提交
2083
	port_start(mp);
2084

2085 2086
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
2087

L
Lennert Buytenhek 已提交
2088 2089
	wrl(mp, INT_MASK_EXT(mp->port_num),
	    INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
2090

2091
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2092

2093 2094
	return 0;

2095

L
Lennert Buytenhek 已提交
2096
out_free:
2097 2098 2099
	for (i = 0; i < 8; i++)
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
L
Lennert Buytenhek 已提交
2100
out:
2101 2102 2103
	free_irq(dev->irq, dev);

	return err;
2104 2105
}

2106
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2107
{
L
Lennert Buytenhek 已提交
2108
	unsigned int data;
2109
	int i;
L
Linus Torvalds 已提交
2110

2111 2112 2113
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_disable(mp->rxq + i);
2114 2115
		if (mp->txq_mask & (1 << i))
			txq_disable(mp->txq + i);
2116
	}
2117 2118 2119 2120 2121 2122

	while (1) {
		u32 ps = rdl(mp, PORT_STATUS(mp->port_num));

		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
			break;
2123
		udelay(10);
2124
	}
L
Linus Torvalds 已提交
2125

2126
	/* Reset the Enable bit in the Configuration Register */
L
Lennert Buytenhek 已提交
2127 2128 2129 2130 2131
	data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
L
Linus Torvalds 已提交
2132 2133
}

2134
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
2135
{
2136
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2137
	int i;
L
Linus Torvalds 已提交
2138

L
Lennert Buytenhek 已提交
2139 2140
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
2141

2142
	napi_disable(&mp->napi);
2143

2144 2145
	del_timer_sync(&mp->rx_oom);

2146 2147
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
2148

L
Lennert Buytenhek 已提交
2149 2150
	free_irq(dev->irq, dev);

2151
	port_reset(mp);
L
Lennert Buytenhek 已提交
2152
	mib_counters_update(mp);
L
Linus Torvalds 已提交
2153

2154 2155 2156
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
2157 2158
		if (mp->txq_mask & (1 << i))
			txq_deinit(mp->txq + i);
2159
	}
L
Linus Torvalds 已提交
2160

2161
	return 0;
L
Linus Torvalds 已提交
2162 2163
}

L
Lennert Buytenhek 已提交
2164
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
2165
{
2166
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2167

2168 2169 2170 2171
	if (mp->phy_addr != -1)
		return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);

	return -EOPNOTSUPP;
L
Linus Torvalds 已提交
2172 2173
}

2174
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
2175
{
2176 2177
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
2178
	if (new_mtu < 64 || new_mtu > 9500)
2179
		return -EINVAL;
L
Linus Torvalds 已提交
2180

2181
	dev->mtu = new_mtu;
2182 2183
	tx_set_rate(mp, 1000000000, 16777216);

2184 2185
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
2186

2187 2188 2189 2190
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
2191
	 * due to memory being full.
2192 2193 2194
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
L
Lennert Buytenhek 已提交
2195 2196 2197
		dev_printk(KERN_ERR, &dev->dev,
			   "fatal error on re-opening device after "
			   "MTU change\n");
2198 2199 2200
	}

	return 0;
L
Linus Torvalds 已提交
2201 2202
}

L
Lennert Buytenhek 已提交
2203
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
2204
{
L
Lennert Buytenhek 已提交
2205
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
2206

L
Lennert Buytenhek 已提交
2207 2208 2209
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
		netif_stop_queue(mp->dev);
2210

L
Lennert Buytenhek 已提交
2211 2212
		port_reset(mp);
		port_start(mp);
2213

2214
		__txq_maybe_wake(mp->txq + mp->txq_primary);
L
Lennert Buytenhek 已提交
2215
	}
2216 2217 2218
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
2219
{
2220
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2221

L
Lennert Buytenhek 已提交
2222
	dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
2223

2224
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
2225 2226
}

2227
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
2228
static void mv643xx_eth_netpoll(struct net_device *dev)
2229
{
L
Lennert Buytenhek 已提交
2230
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2231

L
Lennert Buytenhek 已提交
2232 2233
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
2234

L
Lennert Buytenhek 已提交
2235
	mv643xx_eth_irq(dev->irq, dev);
2236

2237
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2238
}
2239
#endif
2240

L
Lennert Buytenhek 已提交
2241
static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
2242
{
2243
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2244
	return smi_reg_read(mp, addr, reg);
2245 2246
}

L
Lennert Buytenhek 已提交
2247
static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
2248
{
2249
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2250
	smi_reg_write(mp, addr, reg, val);
2251
}
2252 2253


2254
/* platform glue ************************************************************/
2255 2256 2257
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
2258
{
2259
	void __iomem *base = msp->base;
2260 2261 2262
	u32 win_enable;
	u32 win_protect;
	int i;
2263

2264 2265 2266 2267 2268
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
2269 2270
	}

2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2288 2289
}

2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
{
	/*
	 * Check whether we have a 14-bit coal limit field in bits
	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
	 * SDMA config register.
	 */
	writel(0x02000000, msp->base + SDMA_CONFIG(0));
	if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000)
		msp->extended_rx_coal_limit = 1;
	else
		msp->extended_rx_coal_limit = 0;
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311

	/*
	 * Check whether the TX rate control registers are in the
	 * old or the new place.
	 */
	writel(1, msp->base + TX_BW_MTU_MOVED(0));
	if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1)
		msp->tx_bw_control_moved = 1;
	else
		msp->tx_bw_control_moved = 0;
2312 2313
}

2314
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2315
{
2316
	static int mv643xx_eth_version_printed = 0;
2317
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2318
	struct mv643xx_eth_shared_private *msp;
2319 2320
	struct resource *res;
	int ret;
2321

2322
	if (!mv643xx_eth_version_printed++)
2323 2324
		printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
			"driver version %s\n", mv643xx_eth_driver_version);
2325

2326 2327 2328 2329
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2330

2331 2332 2333 2334 2335 2336
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2337 2338
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2339 2340
		goto out_free;

2341
	mutex_init(&msp->phy_lock);
2342

2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
	msp->err_interrupt = NO_IRQ;
	init_waitqueue_head(&msp->smi_busy_wait);

	/*
	 * Check whether the error interrupt is hooked up.
	 */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res != NULL) {
		int err;

		err = request_irq(res->start, mv643xx_eth_err_irq,
				  IRQF_SHARED, "mv643xx_eth", msp);
		if (!err) {
			writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
			msp->err_interrupt = res->start;
		}
	}

2361 2362 2363 2364 2365 2366
	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

L
Lennert Buytenhek 已提交
2367 2368 2369 2370
	/*
	 * Detect hardware parameters.
	 */
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2371
	infer_hw_params(msp);
L
Lennert Buytenhek 已提交
2372 2373 2374

	platform_set_drvdata(pdev, msp);

2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2385
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2386

2387 2388
	if (msp->err_interrupt != NO_IRQ)
		free_irq(msp->err_interrupt, msp);
2389
	iounmap(msp->base);
2390 2391 2392
	kfree(msp);

	return 0;
2393 2394
}

2395
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2396 2397
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2398
	.driver = {
L
Lennert Buytenhek 已提交
2399
		.name	= MV643XX_ETH_SHARED_NAME,
2400 2401 2402 2403
		.owner	= THIS_MODULE,
	},
};

2404
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2405
{
2406
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2407
	u32 data;
L
Linus Torvalds 已提交
2408

L
Lennert Buytenhek 已提交
2409 2410 2411 2412
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2413 2414
}

2415
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2416
{
L
Lennert Buytenhek 已提交
2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
	else
		uc_addr_get(mp, dev->dev_addr);

	if (pd->phy_addr == -1) {
		mp->shared_smi = NULL;
		mp->phy_addr = -1;
	} else {
		mp->shared_smi = mp->shared;
		if (pd->shared_smi != NULL)
			mp->shared_smi = platform_get_drvdata(pd->shared_smi);

		if (pd->force_phy_addr || pd->phy_addr) {
			mp->phy_addr = pd->phy_addr & 0x3f;
			phy_addr_set(mp, mp->phy_addr);
		} else {
			mp->phy_addr = phy_addr_get(mp);
		}
	}
L
Linus Torvalds 已提交
2449

L
Lennert Buytenhek 已提交
2450 2451 2452 2453 2454
	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2455

2456 2457 2458 2459 2460 2461
	if (pd->rx_queue_mask)
		mp->rxq_mask = pd->rx_queue_mask;
	else
		mp->rxq_mask = 0x01;
	mp->rxq_primary = fls(mp->rxq_mask) - 1;

L
Lennert Buytenhek 已提交
2462 2463 2464 2465 2466
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
		mp->default_tx_ring_size = pd->tx_queue_size;
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
2467 2468 2469 2470 2471 2472

	if (pd->tx_queue_mask)
		mp->txq_mask = pd->tx_queue_mask;
	else
		mp->txq_mask = 0x01;
	mp->txq_primary = fls(mp->txq_mask) - 1;
L
Linus Torvalds 已提交
2473 2474
}

2475
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2476
{
2477 2478 2479 2480 2481 2482 2483 2484 2485
	int data;
	int data2;

	data = smi_reg_read(mp, mp->phy_addr, MII_BMCR);
	if (data < 0)
		return -ENODEV;

	if (smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE) < 0)
		return -ENODEV;
L
Lennert Buytenhek 已提交
2486

2487 2488 2489
	data2 = smi_reg_read(mp, mp->phy_addr, MII_BMCR);
	if (data2 < 0)
		return -ENODEV;
L
Linus Torvalds 已提交
2490

2491
	if (((data ^ data2) & BMCR_ANENABLE) == 0)
L
Lennert Buytenhek 已提交
2492
		return -ENODEV;
L
Linus Torvalds 已提交
2493

2494
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
L
Linus Torvalds 已提交
2495

2496
	return 0;
L
Linus Torvalds 已提交
2497 2498
}

L
Lennert Buytenhek 已提交
2499 2500
static int phy_init(struct mv643xx_eth_private *mp,
		    struct mv643xx_eth_platform_data *pd)
2501
{
L
Lennert Buytenhek 已提交
2502 2503
	struct ethtool_cmd cmd;
	int err;
2504

L
Lennert Buytenhek 已提交
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
	err = phy_detect(mp);
	if (err) {
		dev_printk(KERN_INFO, &mp->dev->dev,
			   "no PHY detected at addr %d\n", mp->phy_addr);
		return err;
	}
	phy_reset(mp);

	mp->mii.phy_id = mp->phy_addr;
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
	mp->mii.dev = mp->dev;
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2519

L
Lennert Buytenhek 已提交
2520
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2521

L
Lennert Buytenhek 已提交
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
	memset(&cmd, 0, sizeof(cmd));

	cmd.port = PORT_MII;
	cmd.transceiver = XCVR_INTERNAL;
	cmd.phy_address = mp->phy_addr;
	if (pd->speed == 0) {
		cmd.autoneg = AUTONEG_ENABLE;
		cmd.speed = SPEED_100;
		cmd.advertising = ADVERTISED_10baseT_Half  |
				  ADVERTISED_10baseT_Full  |
				  ADVERTISED_100baseT_Half |
				  ADVERTISED_100baseT_Full;
2534
		if (mp->mii.supports_gmii)
L
Lennert Buytenhek 已提交
2535
			cmd.advertising |= ADVERTISED_1000baseT_Full;
2536
	} else {
L
Lennert Buytenhek 已提交
2537 2538 2539
		cmd.autoneg = AUTONEG_DISABLE;
		cmd.speed = pd->speed;
		cmd.duplex = pd->duplex;
2540
	}
L
Lennert Buytenhek 已提交
2541 2542 2543 2544

	mv643xx_eth_set_settings(mp->dev, &cmd);

	return 0;
2545 2546
}

2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
	u32 pscr;

	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	if (pscr & SERIAL_PORT_ENABLE) {
		pscr &= ~SERIAL_PORT_ENABLE;
		wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	}

	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
	if (mp->phy_addr == -1) {
		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
		if (speed == SPEED_1000)
			pscr |= SET_GMII_SPEED_TO_1000;
		else if (speed == SPEED_100)
			pscr |= SET_MII_SPEED_TO_100;

		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;

		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
		if (duplex == DUPLEX_FULL)
			pscr |= SET_FULL_DUPLEX_MODE;
	}

	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
}

2575
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2576
{
2577
	struct mv643xx_eth_platform_data *pd;
2578
	struct mv643xx_eth_private *mp;
2579 2580 2581
	struct net_device *dev;
	struct resource *res;
	DECLARE_MAC_BUF(mac);
L
Lennert Buytenhek 已提交
2582
	int err;
L
Linus Torvalds 已提交
2583

2584 2585
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
L
Lennert Buytenhek 已提交
2586 2587
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data\n");
2588 2589
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2590

2591
	if (pd->shared == NULL) {
L
Lennert Buytenhek 已提交
2592 2593
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data->shared\n");
2594 2595
		return -ENODEV;
	}
2596

2597
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2598 2599
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2600

2601
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2602 2603 2604 2605 2606
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
	mp->port_num = pd->port_number;

2607
	mp->dev = dev;
2608

L
Lennert Buytenhek 已提交
2609 2610 2611 2612 2613 2614 2615
	set_params(mp, pd);

	spin_lock_init(&mp->lock);

	mib_counters_clear(mp);
	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

2616 2617 2618 2619 2620 2621 2622 2623 2624
	if (mp->phy_addr != -1) {
		err = phy_init(mp, pd);
		if (err)
			goto out;

		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
	} else {
		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
	}
2625
	init_pscr(mp, pd->speed, pd->duplex);
L
Lennert Buytenhek 已提交
2626

2627 2628 2629 2630 2631 2632
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);

	init_timer(&mp->rx_oom);
	mp->rx_oom.data = (unsigned long)mp;
	mp->rx_oom.function = oom_timer_wrapper;

L
Lennert Buytenhek 已提交
2633

2634 2635 2636
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2637

L
Lennert Buytenhek 已提交
2638
	dev->hard_start_xmit = mv643xx_eth_xmit;
2639 2640 2641
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;
L
Lennert Buytenhek 已提交
2642 2643 2644
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->do_ioctl = mv643xx_eth_ioctl;
	dev->change_mtu = mv643xx_eth_change_mtu;
2645 2646
	dev->tx_timeout = mv643xx_eth_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
2647
	dev->poll_controller = mv643xx_eth_netpoll;
2648 2649 2650
#endif
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
2651

2652
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2653
	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
L
Linus Torvalds 已提交
2654

L
Lennert Buytenhek 已提交
2655
	SET_NETDEV_DEV(dev, &pdev->dev);
2656

2657
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
2658
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2659

2660 2661 2662
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2663

L
Lennert Buytenhek 已提交
2664 2665
	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
		   mp->port_num, print_mac(mac, dev->dev_addr));
L
Linus Torvalds 已提交
2666

2667
	if (mp->tx_desc_sram_size > 0)
L
Lennert Buytenhek 已提交
2668
		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
L
Linus Torvalds 已提交
2669

2670
	return 0;
L
Linus Torvalds 已提交
2671

2672 2673
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2674

2675
	return err;
L
Linus Torvalds 已提交
2676 2677
}

2678
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2679
{
L
Lennert Buytenhek 已提交
2680
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2681

L
Lennert Buytenhek 已提交
2682
	unregister_netdev(mp->dev);
2683
	flush_scheduled_work();
L
Lennert Buytenhek 已提交
2684
	free_netdev(mp->dev);
2685 2686

	platform_set_drvdata(pdev, NULL);
L
Lennert Buytenhek 已提交
2687

2688
	return 0;
L
Linus Torvalds 已提交
2689 2690
}

2691
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2692
{
L
Lennert Buytenhek 已提交
2693
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2694

2695
	/* Mask all interrupts on ethernet port */
L
Lennert Buytenhek 已提交
2696 2697
	wrl(mp, INT_MASK(mp->port_num), 0);
	rdl(mp, INT_MASK(mp->port_num));
2698

L
Lennert Buytenhek 已提交
2699 2700
	if (netif_running(mp->dev))
		port_reset(mp);
2701 2702
}

2703
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
2704 2705 2706
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
2707
	.driver = {
L
Lennert Buytenhek 已提交
2708
		.name	= MV643XX_ETH_NAME,
2709 2710 2711 2712
		.owner	= THIS_MODULE,
	},
};

2713
static int __init mv643xx_eth_init_module(void)
2714
{
2715
	int rc;
2716

2717 2718 2719 2720 2721 2722
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
2723

2724
	return rc;
2725
}
L
Lennert Buytenhek 已提交
2726
module_init(mv643xx_eth_init_module);
2727

2728
static void __exit mv643xx_eth_cleanup_module(void)
2729
{
2730 2731
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2732
}
2733
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2734

2735 2736
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
2737
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
2738
MODULE_LICENSE("GPL");
2739
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
2740
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);