mv643xx_eth.c 64.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58
static char mv643xx_eth_driver_version[] = "1.2";
59

60 61 62
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
63

64
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65 66 67 68 69 70 71 72
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

/*
 * Registers shared between all ports.
 */
73 74 75 76 77 78 79
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
80 81 82 83

/*
 * Per-port registers.
 */
84
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
85
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
86 87 88 89 90 91
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
92
#define  TX_FIFO_EMPTY			0x00000400
93
#define  TX_IN_PROGRESS			0x00000080
94 95 96 97 98 99
#define  PORT_SPEED_MASK		0x00000030
#define  PORT_SPEED_1000		0x00000010
#define  PORT_SPEED_100			0x00000020
#define  PORT_SPEED_10			0x00000000
#define  FLOW_CONTROL_ENABLED		0x00000008
#define  FULL_DUPLEX			0x00000004
100
#define  LINK_UP			0x00000002
101
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
102 103
#define TXQ_FIX_PRIO_CONF(p)		(0x044c + ((p) << 10))
#define TX_BW_RATE(p)			(0x0450 + ((p) << 10))
104
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
105
#define TX_BW_BURST(p)			(0x045c + ((p) << 10))
106
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
107
#define  INT_TX_END_0			0x00080000
108
#define  INT_TX_END			0x07f80000
109
#define  INT_RX				0x0007fbfc
110
#define  INT_EXT			0x00000002
111
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
112 113 114 115
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
116
#define  INT_EXT_TX			0x0000ffff
117 118 119
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
120 121 122 123
#define TXQ_FIX_PRIO_CONF_MOVED(p)	(0x04dc + ((p) << 10))
#define TX_BW_RATE_MOVED(p)		(0x04e0 + ((p) << 10))
#define TX_BW_MTU_MOVED(p)		(0x04e8 + ((p) << 10))
#define TX_BW_BURST_MOVED(p)		(0x04ec + ((p) << 10))
124
#define RXQ_CURRENT_DESC_PTR(p, q)	(0x060c + ((p) << 10) + ((q) << 4))
125
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
126 127 128 129
#define TXQ_CURRENT_DESC_PTR(p, q)	(0x06c0 + ((p) << 10) + ((q) << 2))
#define TXQ_BW_TOKENS(p, q)		(0x0700 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_CONF(p, q)		(0x0704 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_WRR_CONF(p, q)		(0x0708 + ((p) << 10) + ((q) << 4))
130 131 132 133
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
134

135 136 137 138

/*
 * SDMA configuration register.
 */
139
#define RX_BURST_SIZE_16_64BIT		(4 << 1)
140 141
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
142
#define TX_BURST_SIZE_16_64BIT		(4 << 22)
143 144 145

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
146 147
		RX_BURST_SIZE_16_64BIT	|	\
		TX_BURST_SIZE_16_64BIT
148 149
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
150
		RX_BURST_SIZE_16_64BIT	|	\
151 152
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
153
		TX_BURST_SIZE_16_64BIT
154 155 156 157
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

158 159 160 161 162 163 164

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
165
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
166 167 168 169 170 171 172
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
173

174 175
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
176 177


178 179
/*
 * RX/TX descriptors.
180 181
 */
#if defined(__BIG_ENDIAN)
182
struct rx_desc {
183 184 185 186 187 188 189
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

190
struct tx_desc {
191 192 193 194 195 196 197
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
198
struct rx_desc {
199 200 201 202 203 204 205
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

206
struct tx_desc {
207 208 209 210 211 212 213 214 215 216
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

217
/* RX & TX descriptor command */
218
#define BUFFER_OWNED_BY_DMA		0x80000000
219 220

/* RX & TX descriptor status */
221
#define ERROR_SUMMARY			0x00000001
222 223

/* RX descriptor status */
224 225 226 227
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
228 229

/* TX descriptor command */
230 231 232 233 234 235 236 237
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
238 239
#define MAC_HDR_EXTRA_4_BYTES		0x00008000
#define MAC_HDR_EXTRA_8_BYTES		0x00000200
240

241
#define TX_IHL_SHIFT			11
242 243


244
/* global *******************************************************************/
245
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
246 247 248
	/*
	 * Ethernet controller base address.
	 */
249
	void __iomem *base;
250

L
Lennert Buytenhek 已提交
251 252 253
	/*
	 * Protects access to SMI_REG, which is shared between ports.
	 */
254 255
	spinlock_t phy_lock;

L
Lennert Buytenhek 已提交
256 257 258
	/*
	 * Per-port MBUS window access register value.
	 */
259 260
	u32 win_protect;

L
Lennert Buytenhek 已提交
261 262 263
	/*
	 * Hardware-specific parameters.
	 */
264
	unsigned int t_clk;
265
	int extended_rx_coal_limit;
266
	int tx_bw_control_moved;
267 268 269 270
};


/* per-port *****************************************************************/
271
struct mib_counters {
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

304
struct rx_queue {
305 306
	int index;

307 308 309 310 311 312 313 314 315 316 317 318 319 320
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

	struct timer_list rx_oom;
};

321
struct tx_queue {
322 323
	int index;

324
	int tx_ring_size;
325

326 327 328
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
329

330
	struct tx_desc *tx_desc_area;
331 332 333
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
334 335 336 337
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
L
Lennert Buytenhek 已提交
338
	int port_num;
339

L
Lennert Buytenhek 已提交
340
	struct net_device *dev;
341

L
Lennert Buytenhek 已提交
342 343
	struct mv643xx_eth_shared_private *shared_smi;
	int phy_addr;
344 345 346

	spinlock_t lock;

L
Lennert Buytenhek 已提交
347 348
	struct mib_counters mib_counters;
	struct work_struct tx_timeout_task;
349
	struct mii_if_info mii;
350 351 352 353 354 355 356

	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
357 358
	u8 rxq_mask;
	int rxq_primary;
359
	struct napi_struct napi;
360
	struct rx_queue rxq[8];
361 362 363 364 365 366 367

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
368 369 370
	u8 txq_mask;
	int txq_primary;
	struct tx_queue txq[8];
371 372 373
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
374
};
L
Linus Torvalds 已提交
375

376

377
/* port register accessors **************************************************/
378
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
379
{
380
	return readl(mp->shared->base + offset);
381
}
382

383
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
384
{
385
	writel(data, mp->shared->base + offset);
386
}
387 388


389
/* rxq/txq helper functions *************************************************/
390
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
391
{
392
	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
393
}
394

395 396
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
397
	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
398 399
}

400
static void rxq_enable(struct rx_queue *rxq)
401
{
402
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
403
	wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
404
}
L
Linus Torvalds 已提交
405

406 407 408
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
409
	u8 mask = 1 << rxq->index;
L
Linus Torvalds 已提交
410

411 412 413
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
414 415
}

416 417 418 419 420 421 422 423 424 425 426
static void txq_reset_hw_ptr(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
	u32 addr;

	addr = (u32)txq->tx_desc_dma;
	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
	wrl(mp, off, addr);
}

427
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
428
{
429
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
430
	wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index);
L
Linus Torvalds 已提交
431 432
}

433
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
434
{
435
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
436
	u8 mask = 1 << txq->index;
437

438 439 440 441 442 443 444 445 446
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

447 448 449 450 451 452
	/*
	 * netif_{stop,wake}_queue() flow control only applies to
	 * the primary queue.
	 */
	BUG_ON(txq->index != mp->txq_primary);

453 454
	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(mp->dev);
L
Linus Torvalds 已提交
455 456
}

457 458

/* rx ***********************************************************************/
459
static void txq_reclaim(struct tx_queue *txq, int force);
460

461
static void rxq_refill(struct rx_queue *rxq)
L
Linus Torvalds 已提交
462
{
463
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
464
	unsigned long flags;
L
Linus Torvalds 已提交
465

466
	spin_lock_irqsave(&mp->lock, flags);
467

468 469
	while (rxq->rx_desc_count < rxq->rx_ring_size) {
		int skb_size;
470 471 472 473
		struct sk_buff *skb;
		int unaligned;
		int rx;

474 475 476 477 478 479 480 481 482 483
		/*
		 * Reserve 2+14 bytes for an ethernet header (the
		 * hardware automatically prepends 2 bytes of dummy
		 * data to each received packet), 4 bytes for a VLAN
		 * header, and 4 bytes for the trailing FCS -- 24
		 * bytes total.
		 */
		skb_size = mp->dev->mtu + 24;

		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
484
		if (skb == NULL)
L
Linus Torvalds 已提交
485
			break;
486

R
Ralf Baechle 已提交
487
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
488
		if (unaligned)
R
Ralf Baechle 已提交
489
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
490

491 492 493
		rxq->rx_desc_count++;
		rx = rxq->rx_used_desc;
		rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
494

495 496 497 498
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
						skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = skb_size;
		rxq->rx_skb[rx] = skb;
499
		wmb();
500
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
501 502 503
						RX_ENABLE_INTERRUPT;
		wmb();

L
Lennert Buytenhek 已提交
504 505 506 507 508 509
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
L
Linus Torvalds 已提交
510
	}
511

512
	if (rxq->rx_desc_count != rxq->rx_ring_size) {
513 514
		rxq->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&rxq->rx_oom);
L
Linus Torvalds 已提交
515
	}
516 517

	spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
518 519
}

520
static inline void rxq_refill_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
521
{
522
	rxq_refill((struct rx_queue *)data);
L
Linus Torvalds 已提交
523 524
}

525
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
526
{
527 528 529
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
530

531 532
	rx = 0;
	while (rx < budget) {
L
Lennert Buytenhek 已提交
533
		struct rx_desc *rx_desc;
534
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
535
		struct sk_buff *skb;
536
		unsigned long flags;
537

538
		spin_lock_irqsave(&mp->lock, flags);
539

540
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
541

542 543 544 545 546 547
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
548

549 550
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
551

552
		rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
553

554
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
555

L
Lennert Buytenhek 已提交
556 557
		dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
				 mp->dev->mtu + 24, DMA_FROM_DEVICE);
558 559
		rxq->rx_desc_count--;
		rx++;
560

561 562
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
563 564 565 566 567
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
568
		 */
L
Linus Torvalds 已提交
569
		stats->rx_packets++;
L
Lennert Buytenhek 已提交
570
		stats->rx_bytes += rx_desc->byte_cnt - 2;
571

L
Linus Torvalds 已提交
572
		/*
L
Lennert Buytenhek 已提交
573 574 575
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
576
		 */
577
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
578
					(RX_FIRST_DESC | RX_LAST_DESC))
579
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
580
			stats->rx_dropped++;
L
Lennert Buytenhek 已提交
581

582
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
583
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
584
				if (net_ratelimit())
L
Lennert Buytenhek 已提交
585 586 587
					dev_printk(KERN_ERR, &mp->dev->dev,
						   "received packet spanning "
						   "multiple descriptors\n");
L
Linus Torvalds 已提交
588
			}
L
Lennert Buytenhek 已提交
589

590
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
L
Lennert Buytenhek 已提交
599
			skb_put(skb, rx_desc->byte_cnt - 2 - 4);
L
Linus Torvalds 已提交
600

601
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
602 603
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
604
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
605
			}
606
			skb->protocol = eth_type_trans(skb, mp->dev);
607
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
608 609 610 611 612
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
L
Lennert Buytenhek 已提交
613

614
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
615
	}
L
Lennert Buytenhek 已提交
616

617
	rxq_refill(rxq);
L
Linus Torvalds 已提交
618

619
	return rx;
L
Linus Torvalds 已提交
620 621
}

622 623
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
624
{
625 626
	struct mv643xx_eth_private *mp;
	int rx;
627
	int i;
628 629

	mp = container_of(napi, struct mv643xx_eth_private, napi);
630

631
#ifdef MV643XX_ETH_TX_FAST_REFILL
632 633
	if (++mp->tx_clean_threshold > 5) {
		mp->tx_clean_threshold = 0;
634 635 636
		for (i = 0; i < 8; i++)
			if (mp->txq_mask & (1 << i))
				txq_reclaim(mp->txq + i, 0);
637 638 639 640 641 642

		if (netif_carrier_ok(mp->dev)) {
			spin_lock(&mp->lock);
			__txq_maybe_wake(mp->txq + mp->txq_primary);
			spin_unlock(&mp->lock);
		}
643
	}
644
#endif
645

646 647 648 649
	rx = 0;
	for (i = 7; rx < budget && i >= 0; i--)
		if (mp->rxq_mask & (1 << i))
			rx += rxq_process(mp->rxq + i, budget - rx);
650

651 652
	if (rx < budget) {
		netif_rx_complete(mp->dev, napi);
653
		wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
654
	}
655

656
	return rx;
657
}
658
#endif
659

660 661 662

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
663
{
664
	int frag;
L
Linus Torvalds 已提交
665

666
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
667 668
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
669
			return 1;
L
Linus Torvalds 已提交
670
	}
671

672 673
	return 0;
}
674

675
static int txq_alloc_desc_index(struct tx_queue *txq)
676 677
{
	int tx_desc_curr;
678

679
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
680

681 682
	tx_desc_curr = txq->tx_curr_desc;
	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
683

684
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
685

686 687
	return tx_desc_curr;
}
688

689
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
690
{
691
	int nr_frags = skb_shinfo(skb)->nr_frags;
692
	int frag;
L
Linus Torvalds 已提交
693

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
			txq->tx_skb[tx_index] = skb;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}

717 718 719 720 721 722 723
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
724 725
}

726 727 728 729
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
730

731
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
732
{
733
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
734
	int nr_frags = skb_shinfo(skb)->nr_frags;
735
	int tx_index;
736
	struct tx_desc *desc;
737 738
	u32 cmd_sts;
	int length;
L
Linus Torvalds 已提交
739

740
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
741

742 743
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];
744 745

	if (nr_frags) {
746
		txq_submit_frag_skb(txq, skb);
747 748

		length = skb_headlen(skb);
749
		txq->tx_skb[tx_index] = NULL;
750
	} else {
751
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
752
		length = skb->len;
753
		txq->tx_skb[tx_index] = skb;
754 755 756 757 758 759
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
760 761 762 763
		int mac_hdr_len;

		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
		       skb->protocol != htons(ETH_P_8021Q));
764

765 766 767
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
768

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
		mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
		switch (mac_hdr_len - ETH_HLEN) {
		case 0:
			break;
		case 4:
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
			break;
		case 8:
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
			break;
		case 12:
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
			break;
		default:
			if (net_ratelimit())
				dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
				   "mac header length is %d?!\n", mac_hdr_len);
			break;
		}

790 791
		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
792
			cmd_sts |= UDP_FRAME;
793 794 795 796 797 798 799 800 801 802
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
803
		cmd_sts |= 5 << TX_IHL_SHIFT;
804 805 806 807 808 809 810
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

811 812 813 814
	/* clear TX_END interrupt status */
	wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
	rdl(mp, INT_CAUSE(mp->port_num));

815 816
	/* ensure all descriptors are written before poking hardware */
	wmb();
817
	txq_enable(txq);
818

819
	txq->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
820 821
}

L
Lennert Buytenhek 已提交
822
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
823
{
824
	struct mv643xx_eth_private *mp = netdev_priv(dev);
825
	struct net_device_stats *stats = &dev->stats;
826
	struct tx_queue *txq;
827
	unsigned long flags;
828

829 830
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
L
Lennert Buytenhek 已提交
831 832 833
		dev_printk(KERN_DEBUG, &dev->dev,
			   "failed to linearize skb with tiny "
			   "unaligned fragment\n");
834 835 836 837 838
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

839
	txq = mp->txq + mp->txq_primary;
840 841

	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
842
		spin_unlock_irqrestore(&mp->lock, flags);
843 844 845 846 847
		if (txq->index == mp->txq_primary && net_ratelimit())
			dev_printk(KERN_ERR, &dev->dev,
				   "primary tx queue full?!\n");
		kfree_skb(skb);
		return NETDEV_TX_OK;
848 849
	}

850
	txq_submit_skb(txq, skb);
851 852 853 854
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

855 856 857 858 859 860 861
	if (txq->index == mp->txq_primary) {
		int entries_left;

		entries_left = txq->tx_ring_size - txq->tx_desc_count;
		if (entries_left < MAX_DESCS_PER_SKB)
			netif_stop_queue(dev);
	}
862 863 864 865

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
866 867
}

868

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

892 893 894 895 896 897 898 899 900
	if (mp->shared->tx_bw_control_moved) {
		wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
	} else {
		wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
	}
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

917 918
	wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14);
	wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index),
919 920 921 922 923 924 925 926 927 928 929 930
			(bucket_size << 10) | token_rate);
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
931 932 933 934
	if (mp->shared->tx_bw_control_moved)
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
	else
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
935 936

	val = rdl(mp, off);
937
	val |= 1 << txq->index;
938 939 940 941 942 943 944 945 946 947 948 949
	wrl(mp, off, val);
}

static void txq_set_wrr(struct tx_queue *txq, int weight)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn off fixed priority mode.
	 */
950 951 952 953
	if (mp->shared->tx_bw_control_moved)
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
	else
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
954 955

	val = rdl(mp, off);
956
	val &= ~(1 << txq->index);
957 958 959 960 961
	wrl(mp, off, val);

	/*
	 * Configure WRR weight for this queue.
	 */
962
	off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
963 964 965 966 967 968 969

	val = rdl(mp, off);
	val = (val & ~0xff) | (weight & 0xff);
	wrl(mp, off, val);
}


970
/* mii management interface *************************************************/
L
Lennert Buytenhek 已提交
971 972 973 974
#define SMI_BUSY		0x10000000
#define SMI_READ_VALID		0x08000000
#define SMI_OPCODE_READ		0x04000000
#define SMI_OPCODE_WRITE	0x00000000
975

L
Lennert Buytenhek 已提交
976 977
static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
			 unsigned int reg, unsigned int *value)
L
Linus Torvalds 已提交
978
{
979
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
980
	unsigned long flags;
L
Linus Torvalds 已提交
981 982
	int i;

983 984 985 986
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
987
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
988
		if (i == 1000) {
989 990 991
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
992
		udelay(10);
L
Linus Torvalds 已提交
993 994
	}

L
Lennert Buytenhek 已提交
995
	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
L
Linus Torvalds 已提交
996

997
	/* now wait for the data to be valid */
998
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
999
		if (i == 1000) {
1000 1001 1002
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
1003
		udelay(10);
1004 1005 1006 1007 1008
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
1009 1010
}

L
Lennert Buytenhek 已提交
1011 1012 1013
static void smi_reg_write(struct mv643xx_eth_private *mp,
			  unsigned int addr,
			  unsigned int reg, unsigned int value)
L
Linus Torvalds 已提交
1014
{
1015
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
1016
	unsigned long flags;
L
Linus Torvalds 已提交
1017 1018
	int i;

1019 1020 1021 1022
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
1023
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
1024
		if (i == 1000) {
1025 1026 1027
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
1028
		udelay(10);
L
Linus Torvalds 已提交
1029 1030
	}

L
Lennert Buytenhek 已提交
1031 1032
	writel(SMI_OPCODE_WRITE | (reg << 21) |
		(addr << 16) | (value & 0xffff), smi_reg);
1033 1034 1035
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
1036

1037 1038

/* mib counters *************************************************************/
L
Lennert Buytenhek 已提交
1039
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1040
{
L
Lennert Buytenhek 已提交
1041
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
1042 1043
}

L
Lennert Buytenhek 已提交
1044
static void mib_counters_clear(struct mv643xx_eth_private *mp)
1045
{
L
Lennert Buytenhek 已提交
1046 1047 1048 1049
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
1050
}
1051

L
Lennert Buytenhek 已提交
1052
static void mib_counters_update(struct mv643xx_eth_private *mp)
1053
{
1054
	struct mib_counters *p = &mp->mib_counters;
1055

L
Lennert Buytenhek 已提交
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	p->good_octets_received += mib_read(mp, 0x00);
	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
1088 1089
}

1090 1091

/* ethtool ******************************************************************/
1092
struct mv643xx_eth_stats {
1093 1094
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
1095 1096
	int netdev_off;
	int mp_off;
1097 1098
};

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1146 1147
};

1148
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1149
{
1150
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1151 1152 1153 1154 1155 1156
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

L
Lennert Buytenhek 已提交
1157 1158 1159
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1160 1161 1162 1163 1164 1165
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

1166 1167
static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
1168 1169 1170 1171 1172
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_status;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));

1173 1174
	cmd->supported = SUPPORTED_MII;
	cmd->advertising = ADVERTISED_MII;
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		cmd->speed = SPEED_10;
		break;
	case PORT_SPEED_100:
		cmd->speed = SPEED_100;
		break;
	case PORT_SPEED_1000:
		cmd->speed = SPEED_1000;
		break;
	default:
		cmd->speed = -1;
		break;
	}
	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
	cmd->port = PORT_MII;
	cmd->phy_address = 0;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->autoneg = AUTONEG_DISABLE;
	cmd->maxtxpkt = 1;
	cmd->maxrxpkt = 1;

	return 0;
}

1200
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1201
{
1202
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1203 1204
	int err;

L
Lennert Buytenhek 已提交
1205 1206 1207 1208 1209
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1210 1211 1212
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
1213

1214 1215
	return err;
}
L
Linus Torvalds 已提交
1216

1217 1218 1219 1220 1221
static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
	return -EINVAL;
}

L
Lennert Buytenhek 已提交
1222 1223
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1224
{
1225 1226
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1227
	strncpy(drvinfo->fw_version, "N/A", 32);
L
Lennert Buytenhek 已提交
1228
	strncpy(drvinfo->bus_info, "platform", 32);
1229
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1230
}
L
Linus Torvalds 已提交
1231

L
Lennert Buytenhek 已提交
1232
static int mv643xx_eth_nway_reset(struct net_device *dev)
1233
{
1234
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1235

1236 1237
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
1238

1239 1240 1241 1242 1243
static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
{
	return -EINVAL;
}

1244 1245
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1246
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1247

1248 1249
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
1250

1251 1252 1253 1254 1255
static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
{
	return 1;
}

L
Lennert Buytenhek 已提交
1256 1257
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1258 1259
{
	int i;
L
Linus Torvalds 已提交
1260

L
Lennert Buytenhek 已提交
1261 1262
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1263
			memcpy(data + i * ETH_GSTRING_LEN,
1264
				mv643xx_eth_stats[i].stat_string,
1265
				ETH_GSTRING_LEN);
1266 1267 1268
		}
	}
}
L
Linus Torvalds 已提交
1269

L
Lennert Buytenhek 已提交
1270 1271 1272
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1273
{
L
Lennert Buytenhek 已提交
1274
	struct mv643xx_eth_private *mp = dev->priv;
1275
	int i;
L
Linus Torvalds 已提交
1276

L
Lennert Buytenhek 已提交
1277
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1278

1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1292
	}
1293
}
L
Linus Torvalds 已提交
1294

L
Lennert Buytenhek 已提交
1295
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1296
{
L
Lennert Buytenhek 已提交
1297
	if (sset == ETH_SS_STATS)
1298
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1299 1300

	return -EOPNOTSUPP;
1301
}
L
Linus Torvalds 已提交
1302

1303
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1304 1305 1306 1307 1308
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
	.get_link		= mv643xx_eth_get_link,
1309
	.set_sg			= ethtool_op_set_sg,
L
Lennert Buytenhek 已提交
1310 1311
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1312
	.get_sset_count		= mv643xx_eth_get_sset_count,
1313
};
L
Linus Torvalds 已提交
1314

1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
	.get_settings		= mv643xx_eth_get_settings_phyless,
	.set_settings		= mv643xx_eth_set_settings_phyless,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset_phyless,
	.get_link		= mv643xx_eth_get_link_phyless,
	.set_sg			= ethtool_op_set_sg,
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
	.get_sset_count		= mv643xx_eth_get_sset_count,
};

1327

1328
/* address handling *********************************************************/
1329
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1330 1331 1332
{
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1333

L
Lennert Buytenhek 已提交
1334 1335
	mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
L
Linus Torvalds 已提交
1336

1337 1338 1339 1340 1341 1342
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1343
}
L
Linus Torvalds 已提交
1344

1345
static void init_mac_tables(struct mv643xx_eth_private *mp)
1346
{
L
Lennert Buytenhek 已提交
1347
	int i;
L
Linus Torvalds 已提交
1348

L
Lennert Buytenhek 已提交
1349 1350 1351
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1352
	}
L
Lennert Buytenhek 已提交
1353 1354 1355

	for (i = 0; i < 0x10; i += 4)
		wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1356
}
1357

1358
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1359
				   int table, unsigned char entry)
1360 1361
{
	unsigned int table_reg;
1362

1363
	/* Set "accepts frame bit" at specified table entry */
L
Lennert Buytenhek 已提交
1364 1365 1366
	table_reg = rdl(mp, table + (entry & 0xfc));
	table_reg |= 0x01 << (8 * (entry & 3));
	wrl(mp, table + (entry & 0xfc), table_reg);
L
Linus Torvalds 已提交
1367 1368
}

1369
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1370
{
1371 1372 1373
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1374

L
Lennert Buytenhek 已提交
1375 1376
	mac_l = (addr[4] << 8) | addr[5];
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1377

L
Lennert Buytenhek 已提交
1378 1379
	wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
L
Linus Torvalds 已提交
1380

L
Lennert Buytenhek 已提交
1381
	table = UNICAST_TABLE(mp->port_num);
1382
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1383 1384
}

L
Lennert Buytenhek 已提交
1385
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1386
{
1387
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1388

L
Lennert Buytenhek 已提交
1389 1390 1391
	/* +2 is for the offset of the HW addr type */
	memcpy(dev->dev_addr, addr + 2, 6);

1392 1393
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1394 1395 1396 1397

	return 0;
}

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

L
Lennert Buytenhek 已提交
1416
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
L
Linus Torvalds 已提交
1417
{
L
Lennert Buytenhek 已提交
1418 1419 1420 1421
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_config;
	struct dev_addr_list *addr;
	int i;
1422

L
Lennert Buytenhek 已提交
1423 1424 1425 1426 1427 1428
	port_config = rdl(mp, PORT_CONFIG(mp->port_num));
	if (dev->flags & IFF_PROMISC)
		port_config |= UNICAST_PROMISCUOUS_MODE;
	else
		port_config &= ~UNICAST_PROMISCUOUS_MODE;
	wrl(mp, PORT_CONFIG(mp->port_num), port_config);
L
Linus Torvalds 已提交
1429

L
Lennert Buytenhek 已提交
1430 1431 1432
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		int port_num = mp->port_num;
		u32 accept = 0x01010101;
1433

L
Lennert Buytenhek 已提交
1434 1435 1436
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1437 1438 1439
		}
		return;
	}
1440

L
Lennert Buytenhek 已提交
1441 1442 1443
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
L
Linus Torvalds 已提交
1444 1445
	}

L
Lennert Buytenhek 已提交
1446 1447 1448
	for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
		u8 *a = addr->da_addr;
		int table;
1449

L
Lennert Buytenhek 已提交
1450 1451
		if (addr->da_addrlen != 6)
			continue;
L
Linus Torvalds 已提交
1452

L
Lennert Buytenhek 已提交
1453 1454 1455 1456 1457
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
			table = SPECIAL_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, a[5]);
		} else {
			int crc = addr_crc(a);
L
Linus Torvalds 已提交
1458

L
Lennert Buytenhek 已提交
1459 1460 1461 1462
			table = OTHER_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, crc);
		}
	}
1463
}
1464 1465


1466
/* rx/tx queue initialisation ***********************************************/
1467
static int rxq_init(struct mv643xx_eth_private *mp, int index)
1468
{
1469
	struct rx_queue *rxq = mp->rxq + index;
1470 1471
	struct rx_desc *rx_desc;
	int size;
1472 1473
	int i;

1474 1475
	rxq->index = index;

1476 1477 1478 1479 1480 1481 1482 1483
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

1484
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
1485 1486 1487 1488 1489 1490 1491
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1492 1493
	}

1494 1495 1496 1497 1498 1499
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1500

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
		int nexti = (i + 1) % rxq->rx_ring_size;
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	init_timer(&rxq->rx_oom);
	rxq->rx_oom.data = (unsigned long)rxq;
	rxq->rx_oom.function = rxq_refill_timer_wrapper;

	return 0;


out_free:
1525
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
1526 1527 1528 1529 1530 1531 1532 1533
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1534
}
1535

1536
static void rxq_deinit(struct rx_queue *rxq)
1537
{
1538 1539 1540 1541
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1542

1543
	del_timer_sync(&rxq->rx_oom);
1544

1545 1546 1547 1548
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1549
		}
1550
	}
L
Linus Torvalds 已提交
1551

1552 1553 1554 1555 1556 1557
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

1558 1559
	if (rxq->index == mp->rxq_primary &&
	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1560
		iounmap(rxq->rx_desc_area);
1561
	else
1562 1563 1564 1565
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1566
}
L
Linus Torvalds 已提交
1567

1568
static int txq_init(struct mv643xx_eth_private *mp, int index)
1569
{
1570
	struct tx_queue *txq = mp->txq + index;
1571 1572
	struct tx_desc *tx_desc;
	int size;
1573
	int i;
L
Linus Torvalds 已提交
1574

1575 1576
	txq->index = index;

1577 1578 1579 1580 1581 1582 1583 1584
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

1585
	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) {
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
1599
	}
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
1613
		struct tx_desc *txd = tx_desc + i;
1614
		int nexti = (i + 1) % txq->tx_ring_size;
1615 1616 1617

		txd->cmd_sts = 0;
		txd->next_desc_ptr = txq->tx_desc_dma +
1618 1619 1620 1621 1622
					nexti * sizeof(struct tx_desc);
	}

	return 0;

1623

1624
out_free:
1625
	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
1626 1627 1628 1629 1630
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);
1631

1632 1633
out:
	return -ENOMEM;
1634
}
L
Linus Torvalds 已提交
1635

1636
static void txq_reclaim(struct tx_queue *txq, int force)
1637
{
1638
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1639
	unsigned long flags;
L
Linus Torvalds 已提交
1640

1641 1642 1643 1644 1645 1646 1647 1648
	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;
		dma_addr_t addr;
		int count;
1649

1650 1651
		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
1652
		cmd_sts = desc->cmd_sts;
1653

1654 1655 1656 1657 1658
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			if (!force)
				break;
			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
		}
L
Linus Torvalds 已提交
1659

1660 1661
		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
		txq->tx_desc_count--;
L
Linus Torvalds 已提交
1662

1663 1664
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
1665 1666
		skb = txq->tx_skb[tx_index];
		txq->tx_skb[tx_index] = NULL;
1667

1668
		if (cmd_sts & ERROR_SUMMARY) {
1669 1670
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
1671
		}
L
Linus Torvalds 已提交
1672

1673 1674 1675
		/*
		 * Drop mp->lock while we free the skb.
		 */
1676
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1677

1678
		if (cmd_sts & TX_FIRST_DESC)
1679 1680 1681
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1682

1683 1684
		if (skb)
			dev_kfree_skb_irq(skb);
1685

1686
		spin_lock_irqsave(&mp->lock, flags);
1687
	}
1688
	spin_unlock_irqrestore(&mp->lock, flags);
1689
}
L
Linus Torvalds 已提交
1690

1691
static void txq_deinit(struct tx_queue *txq)
1692
{
1693
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1694

1695 1696
	txq_disable(txq);
	txq_reclaim(txq, 1);
L
Linus Torvalds 已提交
1697

1698
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1699

1700 1701
	if (txq->index == mp->txq_primary &&
	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1702
		iounmap(txq->tx_desc_area);
1703
	else
1704 1705 1706 1707
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
1708
}
L
Linus Torvalds 已提交
1709 1710


1711
/* netdev ops and related ***************************************************/
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
static void handle_link_event(struct mv643xx_eth_private *mp)
{
	struct net_device *dev = mp->dev;
	u32 port_status;
	int speed;
	int duplex;
	int fc;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));
	if (!(port_status & LINK_UP)) {
		if (netif_carrier_ok(dev)) {
			int i;

			printk(KERN_INFO "%s: link down\n", dev->name);

			netif_carrier_off(dev);
			netif_stop_queue(dev);

			for (i = 0; i < 8; i++) {
				struct tx_queue *txq = mp->txq + i;

				if (mp->txq_mask & (1 << i)) {
					txq_reclaim(txq, 1);
					txq_reset_hw_ptr(txq);
				}
			}
		}
		return;
	}

	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		speed = 10;
		break;
	case PORT_SPEED_100:
		speed = 100;
		break;
	case PORT_SPEED_1000:
		speed = 1000;
		break;
	default:
		speed = -1;
		break;
	}
	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;

	printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
			 "flow control %sabled\n", dev->name,
			 speed, duplex ? "full" : "half",
			 fc ? "en" : "dis");

	if (!netif_carrier_ok(dev)) {
		netif_carrier_on(dev);
		netif_wake_queue(dev);
	}
}

L
Lennert Buytenhek 已提交
1770
static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1771 1772
{
	struct net_device *dev = (struct net_device *)dev_id;
1773
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1774 1775
	u32 int_cause;
	u32 int_cause_ext;
1776

1777 1778
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
			(INT_TX_END | INT_RX | INT_EXT);
L
Lennert Buytenhek 已提交
1779 1780 1781 1782
	if (int_cause == 0)
		return IRQ_NONE;

	int_cause_ext = 0;
1783
	if (int_cause & INT_EXT) {
1784
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1785
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1786
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1787
	}
L
Linus Torvalds 已提交
1788

1789 1790
	if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
		handle_link_event(mp);
L
Linus Torvalds 已提交
1791

1792 1793 1794
	/*
	 * RxBuffer or RxError set for any of the 8 queues?
	 */
1795
#ifdef MV643XX_ETH_NAPI
1796
	if (int_cause & INT_RX) {
1797
		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
1798 1799
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
		rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1800

1801
		netif_rx_schedule(dev, &mp->napi);
1802
	}
1803
#else
1804 1805 1806 1807 1808 1809 1810
	if (int_cause & INT_RX) {
		int i;

		for (i = 7; i >= 0; i--)
			if (mp->rxq_mask & (1 << i))
				rxq_process(mp->rxq + i, INT_MAX);
	}
1811
#endif
L
Lennert Buytenhek 已提交
1812

1813 1814 1815
	/*
	 * TxBuffer or TxError set for any of the 8 queues?
	 */
1816
	if (int_cause_ext & INT_EXT_TX) {
1817 1818 1819 1820 1821
		int i;

		for (i = 0; i < 8; i++)
			if (mp->txq_mask & (1 << i))
				txq_reclaim(mp->txq + i, 0);
1822 1823 1824 1825 1826

		/*
		 * Enough space again in the primary TX queue for a
		 * full packet?
		 */
1827 1828 1829 1830 1831
		if (netif_carrier_ok(dev)) {
			spin_lock(&mp->lock);
			__txq_maybe_wake(mp->txq + mp->txq_primary);
			spin_unlock(&mp->lock);
		}
1832
	}
1833

1834 1835 1836 1837 1838 1839 1840
	/*
	 * Any TxEnd interrupts?
	 */
	if (int_cause & INT_TX_END) {
		int i;

		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1841 1842

		spin_lock(&mp->lock);
1843 1844
		for (i = 0; i < 8; i++) {
			struct tx_queue *txq = mp->txq + i;
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
			u32 hw_desc_ptr;
			u32 expected_ptr;

			if ((int_cause & (INT_TX_END_0 << i)) == 0)
				continue;

			hw_desc_ptr =
				rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
			expected_ptr = (u32)txq->tx_desc_dma +
				txq->tx_curr_desc * sizeof(struct tx_desc);

			if (hw_desc_ptr != expected_ptr)
1857 1858
				txq_enable(txq);
		}
1859
		spin_unlock(&mp->lock);
1860
	}
L
Linus Torvalds 已提交
1861

1862
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1863 1864
}

1865
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1866
{
L
Lennert Buytenhek 已提交
1867
	unsigned int data;
L
Linus Torvalds 已提交
1868

1869 1870 1871
	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
	data |= BMCR_RESET;
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
L
Linus Torvalds 已提交
1872

1873 1874
	do {
		udelay(1);
1875 1876
		smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
	} while (data & BMCR_RESET);
L
Linus Torvalds 已提交
1877 1878
}

L
Lennert Buytenhek 已提交
1879
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1880
{
1881
	u32 pscr;
1882
	int i;
L
Linus Torvalds 已提交
1883

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
	/*
	 * Perform PHY reset, if there is a PHY.
	 */
	if (mp->phy_addr != -1) {
		struct ethtool_cmd cmd;

		mv643xx_eth_get_settings(mp->dev, &cmd);
		phy_reset(mp);
		mv643xx_eth_set_settings(mp->dev, &cmd);
	}
L
Linus Torvalds 已提交
1894

1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));

	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	pscr |= DO_NOT_FORCE_LINK_FAIL;
	if (mp->phy_addr == -1)
		pscr |= FORCE_LINK_PASS;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

1910 1911 1912
	/*
	 * Configure TX path and queues.
	 */
1913
	tx_set_rate(mp, 1000000000, 16777216);
1914 1915
	for (i = 0; i < 8; i++) {
		struct tx_queue *txq = mp->txq + i;
1916

1917 1918 1919
		if ((mp->txq_mask & (1 << i)) == 0)
			continue;

1920
		txq_reset_hw_ptr(txq);
1921 1922
		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
1923 1924
	}

L
Lennert Buytenhek 已提交
1925 1926 1927 1928
	/*
	 * Add configured unicast address to address filter table.
	 */
	uc_addr_set(mp, mp->dev->dev_addr);
L
Linus Torvalds 已提交
1929

1930 1931 1932 1933
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
1934
	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1935

1936 1937 1938
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1939
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1940

1941
	/*
1942
	 * Enable the receive queues.
1943
	 */
1944 1945 1946
	for (i = 0; i < 8; i++) {
		struct rx_queue *rxq = mp->rxq + i;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1947
		u32 addr;
L
Linus Torvalds 已提交
1948

1949 1950 1951
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

1952 1953 1954
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
1955

1956 1957
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
1958 1959
}

1960
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1961
{
1962
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1963
	u32 val;
L
Linus Torvalds 已提交
1964

1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
	val = rdl(mp, SDMA_CONFIG(mp->port_num));
	if (mp->shared->extended_rx_coal_limit) {
		if (coal > 0xffff)
			coal = 0xffff;
		val &= ~0x023fff80;
		val |= (coal & 0x8000) << 10;
		val |= (coal & 0x7fff) << 7;
	} else {
		if (coal > 0x3fff)
			coal = 0x3fff;
		val &= ~0x003fff00;
		val |= (coal & 0x3fff) << 8;
	}
	wrl(mp, SDMA_CONFIG(mp->port_num), val);
L
Linus Torvalds 已提交
1979 1980
}

1981
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1982
{
1983
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1984

L
Lennert Buytenhek 已提交
1985 1986 1987
	if (coal > 0x3fff)
		coal = 0x3fff;
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1988 1989
}

1990
static int mv643xx_eth_open(struct net_device *dev)
1991
{
1992
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1993
	int err;
1994
	int i;
1995

L
Lennert Buytenhek 已提交
1996 1997 1998
	wrl(mp, INT_CAUSE(mp->port_num), 0);
	wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
	rdl(mp, INT_CAUSE_EXT(mp->port_num));
1999

L
Lennert Buytenhek 已提交
2000 2001 2002
	err = request_irq(dev->irq, mv643xx_eth_irq,
			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
			  dev->name, dev);
2003
	if (err) {
L
Lennert Buytenhek 已提交
2004
		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2005
		return -EAGAIN;
2006 2007
	}

L
Lennert Buytenhek 已提交
2008
	init_mac_tables(mp);
2009

2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
	for (i = 0; i < 8; i++) {
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

		err = rxq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->rxq_mask & (1 << i))
					rxq_deinit(mp->rxq + i);
			goto out;
		}

		rxq_refill(mp->rxq + i);
	}
2024

2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
	for (i = 0; i < 8; i++) {
		if ((mp->txq_mask & (1 << i)) == 0)
			continue;

		err = txq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->txq_mask & (1 << i))
					txq_deinit(mp->txq + i);
			goto out_free;
		}
	}
2037

2038
#ifdef MV643XX_ETH_NAPI
2039 2040
	napi_enable(&mp->napi);
#endif
2041

2042 2043 2044
	netif_carrier_off(dev);
	netif_stop_queue(dev);

L
Lennert Buytenhek 已提交
2045
	port_start(mp);
2046

2047 2048
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
2049

L
Lennert Buytenhek 已提交
2050 2051
	wrl(mp, INT_MASK_EXT(mp->port_num),
	    INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
2052

2053
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2054

2055 2056
	return 0;

2057

L
Lennert Buytenhek 已提交
2058
out_free:
2059 2060 2061
	for (i = 0; i < 8; i++)
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
L
Lennert Buytenhek 已提交
2062
out:
2063 2064 2065
	free_irq(dev->irq, dev);

	return err;
2066 2067
}

2068
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2069
{
L
Lennert Buytenhek 已提交
2070
	unsigned int data;
2071
	int i;
L
Linus Torvalds 已提交
2072

2073 2074 2075
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_disable(mp->rxq + i);
2076 2077
		if (mp->txq_mask & (1 << i))
			txq_disable(mp->txq + i);
2078
	}
2079 2080 2081 2082 2083 2084

	while (1) {
		u32 ps = rdl(mp, PORT_STATUS(mp->port_num));

		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
			break;
2085
		udelay(10);
2086
	}
L
Linus Torvalds 已提交
2087

2088
	/* Reset the Enable bit in the Configuration Register */
L
Lennert Buytenhek 已提交
2089 2090 2091 2092 2093
	data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
L
Linus Torvalds 已提交
2094 2095
}

2096
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
2097
{
2098
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2099
	int i;
L
Linus Torvalds 已提交
2100

L
Lennert Buytenhek 已提交
2101 2102
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
2103

2104
#ifdef MV643XX_ETH_NAPI
2105 2106 2107 2108
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
2109

L
Lennert Buytenhek 已提交
2110 2111
	free_irq(dev->irq, dev);

2112
	port_reset(mp);
L
Lennert Buytenhek 已提交
2113
	mib_counters_update(mp);
L
Linus Torvalds 已提交
2114

2115 2116 2117
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
2118 2119
		if (mp->txq_mask & (1 << i))
			txq_deinit(mp->txq + i);
2120
	}
L
Linus Torvalds 已提交
2121

2122
	return 0;
L
Linus Torvalds 已提交
2123 2124
}

L
Lennert Buytenhek 已提交
2125
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
2126
{
2127
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2128

2129 2130 2131 2132
	if (mp->phy_addr != -1)
		return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);

	return -EOPNOTSUPP;
L
Linus Torvalds 已提交
2133 2134
}

2135
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
2136
{
2137 2138
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
2139
	if (new_mtu < 64 || new_mtu > 9500)
2140
		return -EINVAL;
L
Linus Torvalds 已提交
2141

2142
	dev->mtu = new_mtu;
2143 2144
	tx_set_rate(mp, 1000000000, 16777216);

2145 2146
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
2147

2148 2149 2150 2151
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
2152
	 * due to memory being full.
2153 2154 2155
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
L
Lennert Buytenhek 已提交
2156 2157 2158
		dev_printk(KERN_ERR, &dev->dev,
			   "fatal error on re-opening device after "
			   "MTU change\n");
2159 2160 2161
	}

	return 0;
L
Linus Torvalds 已提交
2162 2163
}

L
Lennert Buytenhek 已提交
2164
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
2165
{
L
Lennert Buytenhek 已提交
2166
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
2167

L
Lennert Buytenhek 已提交
2168 2169 2170
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
		netif_stop_queue(mp->dev);
2171

L
Lennert Buytenhek 已提交
2172 2173
		port_reset(mp);
		port_start(mp);
2174

2175
		__txq_maybe_wake(mp->txq + mp->txq_primary);
L
Lennert Buytenhek 已提交
2176
	}
2177 2178 2179
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
2180
{
2181
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2182

L
Lennert Buytenhek 已提交
2183
	dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
2184

2185
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
2186 2187
}

2188
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
2189
static void mv643xx_eth_netpoll(struct net_device *dev)
2190
{
L
Lennert Buytenhek 已提交
2191
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2192

L
Lennert Buytenhek 已提交
2193 2194
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
2195

L
Lennert Buytenhek 已提交
2196
	mv643xx_eth_irq(dev->irq, dev);
2197

2198
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2199
}
2200
#endif
2201

L
Lennert Buytenhek 已提交
2202
static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
2203
{
2204
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2205 2206
	int val;

L
Lennert Buytenhek 已提交
2207 2208
	smi_reg_read(mp, addr, reg, &val);

2209
	return val;
2210 2211
}

L
Lennert Buytenhek 已提交
2212
static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
2213
{
2214
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2215
	smi_reg_write(mp, addr, reg, val);
2216
}
2217 2218


2219
/* platform glue ************************************************************/
2220 2221 2222
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
2223
{
2224
	void __iomem *base = msp->base;
2225 2226 2227
	u32 win_enable;
	u32 win_protect;
	int i;
2228

2229 2230 2231 2232 2233
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
2234 2235
	}

2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2253 2254
}

2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266
static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
{
	/*
	 * Check whether we have a 14-bit coal limit field in bits
	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
	 * SDMA config register.
	 */
	writel(0x02000000, msp->base + SDMA_CONFIG(0));
	if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000)
		msp->extended_rx_coal_limit = 1;
	else
		msp->extended_rx_coal_limit = 0;
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276

	/*
	 * Check whether the TX rate control registers are in the
	 * old or the new place.
	 */
	writel(1, msp->base + TX_BW_MTU_MOVED(0));
	if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1)
		msp->tx_bw_control_moved = 1;
	else
		msp->tx_bw_control_moved = 0;
2277 2278
}

2279
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2280
{
2281
	static int mv643xx_eth_version_printed = 0;
2282
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2283
	struct mv643xx_eth_shared_private *msp;
2284 2285
	struct resource *res;
	int ret;
2286

2287
	if (!mv643xx_eth_version_printed++)
2288 2289
		printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
			"driver version %s\n", mv643xx_eth_driver_version);
2290

2291 2292 2293 2294
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2295

2296 2297 2298 2299 2300 2301
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2302 2303
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
		goto out_free;

	spin_lock_init(&msp->phy_lock);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

L
Lennert Buytenhek 已提交
2314 2315 2316 2317
	/*
	 * Detect hardware parameters.
	 */
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2318
	infer_hw_params(msp);
L
Lennert Buytenhek 已提交
2319 2320 2321

	platform_set_drvdata(pdev, msp);

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2332
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2333

2334
	iounmap(msp->base);
2335 2336 2337
	kfree(msp);

	return 0;
2338 2339
}

2340
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2341 2342
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2343
	.driver = {
L
Lennert Buytenhek 已提交
2344
		.name	= MV643XX_ETH_SHARED_NAME,
2345 2346 2347 2348
		.owner	= THIS_MODULE,
	},
};

2349
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2350
{
2351
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2352
	u32 data;
L
Linus Torvalds 已提交
2353

L
Lennert Buytenhek 已提交
2354 2355 2356 2357
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2358 2359
}

2360
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2361
{
L
Lennert Buytenhek 已提交
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
	else
		uc_addr_get(mp, dev->dev_addr);

	if (pd->phy_addr == -1) {
		mp->shared_smi = NULL;
		mp->phy_addr = -1;
	} else {
		mp->shared_smi = mp->shared;
		if (pd->shared_smi != NULL)
			mp->shared_smi = platform_get_drvdata(pd->shared_smi);

		if (pd->force_phy_addr || pd->phy_addr) {
			mp->phy_addr = pd->phy_addr & 0x3f;
			phy_addr_set(mp, mp->phy_addr);
		} else {
			mp->phy_addr = phy_addr_get(mp);
		}
	}
L
Linus Torvalds 已提交
2394

L
Lennert Buytenhek 已提交
2395 2396 2397 2398 2399
	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2400

2401 2402 2403 2404 2405 2406
	if (pd->rx_queue_mask)
		mp->rxq_mask = pd->rx_queue_mask;
	else
		mp->rxq_mask = 0x01;
	mp->rxq_primary = fls(mp->rxq_mask) - 1;

L
Lennert Buytenhek 已提交
2407 2408 2409 2410 2411
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
		mp->default_tx_ring_size = pd->tx_queue_size;
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
2412 2413 2414 2415 2416 2417

	if (pd->tx_queue_mask)
		mp->txq_mask = pd->tx_queue_mask;
	else
		mp->txq_mask = 0x01;
	mp->txq_primary = fls(mp->txq_mask) - 1;
L
Linus Torvalds 已提交
2418 2419
}

2420
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2421
{
L
Lennert Buytenhek 已提交
2422 2423 2424
	unsigned int data;
	unsigned int data2;

2425 2426
	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
L
Linus Torvalds 已提交
2427

2428 2429
	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
	if (((data ^ data2) & BMCR_ANENABLE) == 0)
L
Lennert Buytenhek 已提交
2430
		return -ENODEV;
L
Linus Torvalds 已提交
2431

2432
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
L
Linus Torvalds 已提交
2433

2434
	return 0;
L
Linus Torvalds 已提交
2435 2436
}

L
Lennert Buytenhek 已提交
2437 2438
static int phy_init(struct mv643xx_eth_private *mp,
		    struct mv643xx_eth_platform_data *pd)
2439
{
L
Lennert Buytenhek 已提交
2440 2441
	struct ethtool_cmd cmd;
	int err;
2442

L
Lennert Buytenhek 已提交
2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
	err = phy_detect(mp);
	if (err) {
		dev_printk(KERN_INFO, &mp->dev->dev,
			   "no PHY detected at addr %d\n", mp->phy_addr);
		return err;
	}
	phy_reset(mp);

	mp->mii.phy_id = mp->phy_addr;
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
	mp->mii.dev = mp->dev;
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2457

L
Lennert Buytenhek 已提交
2458
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2459

L
Lennert Buytenhek 已提交
2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
	memset(&cmd, 0, sizeof(cmd));

	cmd.port = PORT_MII;
	cmd.transceiver = XCVR_INTERNAL;
	cmd.phy_address = mp->phy_addr;
	if (pd->speed == 0) {
		cmd.autoneg = AUTONEG_ENABLE;
		cmd.speed = SPEED_100;
		cmd.advertising = ADVERTISED_10baseT_Half  |
				  ADVERTISED_10baseT_Full  |
				  ADVERTISED_100baseT_Half |
				  ADVERTISED_100baseT_Full;
2472
		if (mp->mii.supports_gmii)
L
Lennert Buytenhek 已提交
2473
			cmd.advertising |= ADVERTISED_1000baseT_Full;
2474
	} else {
L
Lennert Buytenhek 已提交
2475 2476 2477
		cmd.autoneg = AUTONEG_DISABLE;
		cmd.speed = pd->speed;
		cmd.duplex = pd->duplex;
2478
	}
L
Lennert Buytenhek 已提交
2479 2480 2481 2482

	mv643xx_eth_set_settings(mp->dev, &cmd);

	return 0;
2483 2484
}

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
	u32 pscr;

	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	if (pscr & SERIAL_PORT_ENABLE) {
		pscr &= ~SERIAL_PORT_ENABLE;
		wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	}

	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
	if (mp->phy_addr == -1) {
		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
		if (speed == SPEED_1000)
			pscr |= SET_GMII_SPEED_TO_1000;
		else if (speed == SPEED_100)
			pscr |= SET_MII_SPEED_TO_100;

		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;

		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
		if (duplex == DUPLEX_FULL)
			pscr |= SET_FULL_DUPLEX_MODE;
	}

	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
}

2513
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2514
{
2515
	struct mv643xx_eth_platform_data *pd;
2516
	struct mv643xx_eth_private *mp;
2517 2518 2519
	struct net_device *dev;
	struct resource *res;
	DECLARE_MAC_BUF(mac);
L
Lennert Buytenhek 已提交
2520
	int err;
L
Linus Torvalds 已提交
2521

2522 2523
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
L
Lennert Buytenhek 已提交
2524 2525
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data\n");
2526 2527
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2528

2529
	if (pd->shared == NULL) {
L
Lennert Buytenhek 已提交
2530 2531
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data->shared\n");
2532 2533
		return -ENODEV;
	}
2534

2535
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2536 2537
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2538

2539
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2540 2541 2542 2543 2544
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
	mp->port_num = pd->port_number;

2545
	mp->dev = dev;
2546 2547
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2548
#endif
L
Linus Torvalds 已提交
2549

L
Lennert Buytenhek 已提交
2550 2551 2552 2553 2554 2555 2556
	set_params(mp, pd);

	spin_lock_init(&mp->lock);

	mib_counters_clear(mp);
	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

2557 2558 2559 2560 2561 2562 2563 2564 2565
	if (mp->phy_addr != -1) {
		err = phy_init(mp, pd);
		if (err)
			goto out;

		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
	} else {
		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
	}
2566
	init_pscr(mp, pd->speed, pd->duplex);
L
Lennert Buytenhek 已提交
2567 2568


2569 2570 2571
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2572

L
Lennert Buytenhek 已提交
2573
	dev->hard_start_xmit = mv643xx_eth_xmit;
2574 2575 2576
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;
L
Lennert Buytenhek 已提交
2577 2578 2579
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->do_ioctl = mv643xx_eth_ioctl;
	dev->change_mtu = mv643xx_eth_change_mtu;
2580 2581
	dev->tx_timeout = mv643xx_eth_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
2582
	dev->poll_controller = mv643xx_eth_netpoll;
2583 2584 2585
#endif
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
2586

2587
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2588
	/*
2589 2590
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2591
	 */
2592
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2593
	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2594
#endif
L
Linus Torvalds 已提交
2595

L
Lennert Buytenhek 已提交
2596
	SET_NETDEV_DEV(dev, &pdev->dev);
2597

2598
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
2599
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2600

2601 2602 2603
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2604

L
Lennert Buytenhek 已提交
2605 2606
	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
		   mp->port_num, print_mac(mac, dev->dev_addr));
L
Linus Torvalds 已提交
2607

2608
	if (dev->features & NETIF_F_SG)
L
Lennert Buytenhek 已提交
2609
		dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
L
Linus Torvalds 已提交
2610

2611
	if (dev->features & NETIF_F_IP_CSUM)
L
Lennert Buytenhek 已提交
2612
		dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
L
Linus Torvalds 已提交
2613

2614
#ifdef MV643XX_ETH_NAPI
L
Lennert Buytenhek 已提交
2615
	dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2616
#endif
L
Linus Torvalds 已提交
2617

2618
	if (mp->tx_desc_sram_size > 0)
L
Lennert Buytenhek 已提交
2619
		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
L
Linus Torvalds 已提交
2620

2621
	return 0;
L
Linus Torvalds 已提交
2622

2623 2624
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2625

2626
	return err;
L
Linus Torvalds 已提交
2627 2628
}

2629
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2630
{
L
Lennert Buytenhek 已提交
2631
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2632

L
Lennert Buytenhek 已提交
2633
	unregister_netdev(mp->dev);
2634
	flush_scheduled_work();
L
Lennert Buytenhek 已提交
2635
	free_netdev(mp->dev);
2636 2637

	platform_set_drvdata(pdev, NULL);
L
Lennert Buytenhek 已提交
2638

2639
	return 0;
L
Linus Torvalds 已提交
2640 2641
}

2642
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2643
{
L
Lennert Buytenhek 已提交
2644
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2645

2646
	/* Mask all interrupts on ethernet port */
L
Lennert Buytenhek 已提交
2647 2648
	wrl(mp, INT_MASK(mp->port_num), 0);
	rdl(mp, INT_MASK(mp->port_num));
2649

L
Lennert Buytenhek 已提交
2650 2651
	if (netif_running(mp->dev))
		port_reset(mp);
2652 2653
}

2654
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
2655 2656 2657
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
2658
	.driver = {
L
Lennert Buytenhek 已提交
2659
		.name	= MV643XX_ETH_NAME,
2660 2661 2662 2663
		.owner	= THIS_MODULE,
	},
};

2664
static int __init mv643xx_eth_init_module(void)
2665
{
2666
	int rc;
2667

2668 2669 2670 2671 2672 2673
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
2674

2675
	return rc;
2676
}
L
Lennert Buytenhek 已提交
2677
module_init(mv643xx_eth_init_module);
2678

2679
static void __exit mv643xx_eth_cleanup_module(void)
2680
{
2681 2682
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2683
}
2684
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2685

2686 2687
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
2688
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
2689
MODULE_LICENSE("GPL");
2690
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
2691
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);