mv643xx_eth.c 64.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50 51 52
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58
static char mv643xx_eth_driver_version[] = "1.2";
59

60 61 62
#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
#define MV643XX_ETH_NAPI
#define MV643XX_ETH_TX_FAST_REFILL
63

64
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65 66 67 68 69 70 71 72
#define MAX_DESCS_PER_SKB	(MAX_SKB_FRAGS + 1)
#else
#define MAX_DESCS_PER_SKB	1
#endif

/*
 * Registers shared between all ports.
 */
73 74 75 76 77 78 79
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
80 81 82 83

/*
 * Per-port registers.
 */
84
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
85
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
86 87 88 89 90 91
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
92
#define  TX_FIFO_EMPTY			0x00000400
93
#define  TX_IN_PROGRESS			0x00000080
94 95 96 97 98 99
#define  PORT_SPEED_MASK		0x00000030
#define  PORT_SPEED_1000		0x00000010
#define  PORT_SPEED_100			0x00000020
#define  PORT_SPEED_10			0x00000000
#define  FLOW_CONTROL_ENABLED		0x00000008
#define  FULL_DUPLEX			0x00000004
100
#define  LINK_UP			0x00000002
101
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
102 103
#define TXQ_FIX_PRIO_CONF(p)		(0x044c + ((p) << 10))
#define TX_BW_RATE(p)			(0x0450 + ((p) << 10))
104
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
105
#define TX_BW_BURST(p)			(0x045c + ((p) << 10))
106
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
107
#define  INT_TX_END_0			0x00080000
108
#define  INT_TX_END			0x07f80000
109
#define  INT_RX				0x0007fbfc
110
#define  INT_EXT			0x00000002
111
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
112 113 114 115
#define  INT_EXT_LINK			0x00100000
#define  INT_EXT_PHY			0x00010000
#define  INT_EXT_TX_ERROR_0		0x00000100
#define  INT_EXT_TX_0			0x00000001
116
#define  INT_EXT_TX			0x0000ffff
117 118 119
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
120 121 122 123
#define TXQ_FIX_PRIO_CONF_MOVED(p)	(0x04dc + ((p) << 10))
#define TX_BW_RATE_MOVED(p)		(0x04e0 + ((p) << 10))
#define TX_BW_MTU_MOVED(p)		(0x04e8 + ((p) << 10))
#define TX_BW_BURST_MOVED(p)		(0x04ec + ((p) << 10))
124
#define RXQ_CURRENT_DESC_PTR(p, q)	(0x060c + ((p) << 10) + ((q) << 4))
125
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
126 127 128 129
#define TXQ_CURRENT_DESC_PTR(p, q)	(0x06c0 + ((p) << 10) + ((q) << 2))
#define TXQ_BW_TOKENS(p, q)		(0x0700 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_CONF(p, q)		(0x0704 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_WRR_CONF(p, q)		(0x0708 + ((p) << 10) + ((q) << 4))
130 131 132 133
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
134

135 136 137 138

/*
 * SDMA configuration register.
 */
139
#define RX_BURST_SIZE_16_64BIT		(4 << 1)
140 141
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
142
#define TX_BURST_SIZE_16_64BIT		(4 << 22)
143 144 145

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
146 147
		RX_BURST_SIZE_16_64BIT	|	\
		TX_BURST_SIZE_16_64BIT
148 149
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
150
		RX_BURST_SIZE_16_64BIT	|	\
151 152
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
153
		TX_BURST_SIZE_16_64BIT
154 155 156 157
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

158 159 160 161 162 163 164

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
165
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
166 167 168 169 170 171 172
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
173

174 175
#define DEFAULT_RX_QUEUE_SIZE		400
#define DEFAULT_TX_QUEUE_SIZE		800
176 177


178 179
/*
 * RX/TX descriptors.
180 181
 */
#if defined(__BIG_ENDIAN)
182
struct rx_desc {
183 184 185 186 187 188 189
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

190
struct tx_desc {
191 192 193 194 195 196 197
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
198
struct rx_desc {
199 200 201 202 203 204 205
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

206
struct tx_desc {
207 208 209 210 211 212 213 214 215 216
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

217
/* RX & TX descriptor command */
218
#define BUFFER_OWNED_BY_DMA		0x80000000
219 220

/* RX & TX descriptor status */
221
#define ERROR_SUMMARY			0x00000001
222 223

/* RX descriptor status */
224 225 226 227
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
228 229

/* TX descriptor command */
230 231 232 233 234 235 236 237
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
238 239
#define MAC_HDR_EXTRA_4_BYTES		0x00008000
#define MAC_HDR_EXTRA_8_BYTES		0x00000200
240

241
#define TX_IHL_SHIFT			11
242 243


244
/* global *******************************************************************/
245
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
246 247 248
	/*
	 * Ethernet controller base address.
	 */
249
	void __iomem *base;
250

L
Lennert Buytenhek 已提交
251 252 253
	/*
	 * Protects access to SMI_REG, which is shared between ports.
	 */
254 255
	spinlock_t phy_lock;

L
Lennert Buytenhek 已提交
256 257 258
	/*
	 * Per-port MBUS window access register value.
	 */
259 260
	u32 win_protect;

L
Lennert Buytenhek 已提交
261 262 263
	/*
	 * Hardware-specific parameters.
	 */
264
	unsigned int t_clk;
265
	int extended_rx_coal_limit;
266
	int tx_bw_control_moved;
267 268 269 270
};


/* per-port *****************************************************************/
271
struct mib_counters {
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

304
struct rx_queue {
305 306
	int index;

307 308 309 310 311 312 313 314 315 316 317 318 319 320
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;

	struct timer_list rx_oom;
};

321
struct tx_queue {
322 323
	int index;

324
	int tx_ring_size;
325

326 327 328
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
329

330
	struct tx_desc *tx_desc_area;
331 332 333
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
	struct sk_buff **tx_skb;
334 335 336 337
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
L
Lennert Buytenhek 已提交
338
	int port_num;
339

L
Lennert Buytenhek 已提交
340
	struct net_device *dev;
341

L
Lennert Buytenhek 已提交
342 343
	struct mv643xx_eth_shared_private *shared_smi;
	int phy_addr;
344 345 346

	spinlock_t lock;

L
Lennert Buytenhek 已提交
347 348
	struct mib_counters mib_counters;
	struct work_struct tx_timeout_task;
349
	struct mii_if_info mii;
350 351 352 353 354 355 356

	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
357 358
	u8 rxq_mask;
	int rxq_primary;
359
	struct napi_struct napi;
360
	struct rx_queue rxq[8];
361 362 363 364 365 366 367

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
368 369 370
	u8 txq_mask;
	int txq_primary;
	struct tx_queue txq[8];
371 372 373
#ifdef MV643XX_ETH_TX_FAST_REFILL
	int tx_clean_threshold;
#endif
374
};
L
Linus Torvalds 已提交
375

376

377
/* port register accessors **************************************************/
378
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
379
{
380
	return readl(mp->shared->base + offset);
381
}
382

383
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
384
{
385
	writel(data, mp->shared->base + offset);
386
}
387 388


389
/* rxq/txq helper functions *************************************************/
390
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
391
{
392
	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
393
}
394

395 396
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
397
	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
398 399
}

400
static void rxq_enable(struct rx_queue *rxq)
401
{
402
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
403
	wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
404
}
L
Linus Torvalds 已提交
405

406 407 408
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
409
	u8 mask = 1 << rxq->index;
L
Linus Torvalds 已提交
410

411 412 413
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
414 415
}

416 417 418 419 420 421 422 423 424 425 426
static void txq_reset_hw_ptr(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
	u32 addr;

	addr = (u32)txq->tx_desc_dma;
	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
	wrl(mp, off, addr);
}

427
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
428
{
429
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
430
	wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index);
L
Linus Torvalds 已提交
431 432
}

433
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
434
{
435
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
436
	u8 mask = 1 << txq->index;
437

438 439 440 441 442 443 444 445 446
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

static void __txq_maybe_wake(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);

447 448 449 450 451 452
	/*
	 * netif_{stop,wake}_queue() flow control only applies to
	 * the primary queue.
	 */
	BUG_ON(txq->index != mp->txq_primary);

453 454
	if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
		netif_wake_queue(mp->dev);
L
Linus Torvalds 已提交
455 456
}

457 458

/* rx ***********************************************************************/
459
static void txq_reclaim(struct tx_queue *txq, int force);
460

461
static void rxq_refill(struct rx_queue *rxq)
L
Linus Torvalds 已提交
462
{
463
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
464
	unsigned long flags;
L
Linus Torvalds 已提交
465

466
	spin_lock_irqsave(&mp->lock, flags);
467

468 469
	while (rxq->rx_desc_count < rxq->rx_ring_size) {
		int skb_size;
470 471 472 473
		struct sk_buff *skb;
		int unaligned;
		int rx;

474 475 476 477 478 479 480 481 482 483
		/*
		 * Reserve 2+14 bytes for an ethernet header (the
		 * hardware automatically prepends 2 bytes of dummy
		 * data to each received packet), 4 bytes for a VLAN
		 * header, and 4 bytes for the trailing FCS -- 24
		 * bytes total.
		 */
		skb_size = mp->dev->mtu + 24;

		skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
484
		if (skb == NULL)
L
Linus Torvalds 已提交
485
			break;
486

R
Ralf Baechle 已提交
487
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
488
		if (unaligned)
R
Ralf Baechle 已提交
489
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
490

491 492 493
		rxq->rx_desc_count++;
		rx = rxq->rx_used_desc;
		rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
494

495 496 497 498
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
						skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = skb_size;
		rxq->rx_skb[rx] = skb;
499
		wmb();
500
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
501 502 503
						RX_ENABLE_INTERRUPT;
		wmb();

L
Lennert Buytenhek 已提交
504 505 506 507 508 509
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
L
Linus Torvalds 已提交
510
	}
511

512 513
	if (rxq->rx_desc_count != rxq->rx_ring_size)
		mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
514 515

	spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
516 517
}

518
static inline void rxq_refill_timer_wrapper(unsigned long data)
L
Linus Torvalds 已提交
519
{
520
	rxq_refill((struct rx_queue *)data);
L
Linus Torvalds 已提交
521 522
}

523
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
524
{
525 526 527
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
528

529
	rx = 0;
530
	while (rx < budget && rxq->rx_desc_count) {
L
Lennert Buytenhek 已提交
531
		struct rx_desc *rx_desc;
532
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
533
		struct sk_buff *skb;
534
		unsigned long flags;
535

536
		spin_lock_irqsave(&mp->lock, flags);
537

538
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
539

540 541 542 543 544 545
		cmd_sts = rx_desc->cmd_sts;
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			spin_unlock_irqrestore(&mp->lock, flags);
			break;
		}
		rmb();
L
Linus Torvalds 已提交
546

547 548
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
549

550
		rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size;
551

552
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
553

L
Lennert Buytenhek 已提交
554 555
		dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
				 mp->dev->mtu + 24, DMA_FROM_DEVICE);
556 557
		rxq->rx_desc_count--;
		rx++;
558

559 560
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
561 562 563 564 565
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
566
		 */
L
Linus Torvalds 已提交
567
		stats->rx_packets++;
L
Lennert Buytenhek 已提交
568
		stats->rx_bytes += rx_desc->byte_cnt - 2;
569

L
Linus Torvalds 已提交
570
		/*
L
Lennert Buytenhek 已提交
571 572 573
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
574
		 */
575
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
576
					(RX_FIRST_DESC | RX_LAST_DESC))
577
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
578
			stats->rx_dropped++;
L
Lennert Buytenhek 已提交
579

580
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
581
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
582
				if (net_ratelimit())
L
Lennert Buytenhek 已提交
583 584 585
					dev_printk(KERN_ERR, &mp->dev->dev,
						   "received packet spanning "
						   "multiple descriptors\n");
L
Linus Torvalds 已提交
586
			}
L
Lennert Buytenhek 已提交
587

588
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
589 590 591 592 593 594 595 596
				stats->rx_errors++;

			dev_kfree_skb_irq(skb);
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
L
Lennert Buytenhek 已提交
597
			skb_put(skb, rx_desc->byte_cnt - 2 - 4);
L
Linus Torvalds 已提交
598

599
			if (cmd_sts & LAYER_4_CHECKSUM_OK) {
L
Linus Torvalds 已提交
600 601
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				skb->csum = htons(
602
					(cmd_sts & 0x0007fff8) >> 3);
L
Linus Torvalds 已提交
603
			}
604
			skb->protocol = eth_type_trans(skb, mp->dev);
605
#ifdef MV643XX_ETH_NAPI
L
Linus Torvalds 已提交
606 607 608 609 610
			netif_receive_skb(skb);
#else
			netif_rx(skb);
#endif
		}
L
Lennert Buytenhek 已提交
611

612
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
613
	}
L
Lennert Buytenhek 已提交
614

615
	rxq_refill(rxq);
L
Linus Torvalds 已提交
616

617
	return rx;
L
Linus Torvalds 已提交
618 619
}

620 621
#ifdef MV643XX_ETH_NAPI
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
622
{
623 624
	struct mv643xx_eth_private *mp;
	int rx;
625
	int i;
626 627

	mp = container_of(napi, struct mv643xx_eth_private, napi);
628

629
#ifdef MV643XX_ETH_TX_FAST_REFILL
630 631
	if (++mp->tx_clean_threshold > 5) {
		mp->tx_clean_threshold = 0;
632 633 634
		for (i = 0; i < 8; i++)
			if (mp->txq_mask & (1 << i))
				txq_reclaim(mp->txq + i, 0);
635 636

		if (netif_carrier_ok(mp->dev)) {
637
			spin_lock_irq(&mp->lock);
638
			__txq_maybe_wake(mp->txq + mp->txq_primary);
639
			spin_unlock_irq(&mp->lock);
640
		}
641
	}
642
#endif
643

644 645 646 647
	rx = 0;
	for (i = 7; rx < budget && i >= 0; i--)
		if (mp->rxq_mask & (1 << i))
			rx += rxq_process(mp->rxq + i, budget - rx);
648

649 650
	if (rx < budget) {
		netif_rx_complete(mp->dev, napi);
651
		wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
652
	}
653

654
	return rx;
655
}
656
#endif
657

658 659 660

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
661
{
662
	int frag;
L
Linus Torvalds 已提交
663

664
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
665 666
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
667
			return 1;
L
Linus Torvalds 已提交
668
	}
669

670 671
	return 0;
}
672

673
static int txq_alloc_desc_index(struct tx_queue *txq)
674 675
{
	int tx_desc_curr;
676

677
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
678

679 680
	tx_desc_curr = txq->tx_curr_desc;
	txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size;
681

682
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
683

684 685
	return tx_desc_curr;
}
686

687
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
688
{
689
	int nr_frags = skb_shinfo(skb)->nr_frags;
690
	int frag;
L
Linus Torvalds 已提交
691

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
			txq->tx_skb[tx_index] = skb;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
			txq->tx_skb[tx_index] = NULL;
		}

715 716 717 718 719 720 721
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
722 723
}

724 725 726 727
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
728

729
static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
730
{
731
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
732
	int nr_frags = skb_shinfo(skb)->nr_frags;
733
	int tx_index;
734
	struct tx_desc *desc;
735 736
	u32 cmd_sts;
	int length;
L
Linus Torvalds 已提交
737

738
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
L
Linus Torvalds 已提交
739

740 741
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];
742 743

	if (nr_frags) {
744
		txq_submit_frag_skb(txq, skb);
745 746

		length = skb_headlen(skb);
747
		txq->tx_skb[tx_index] = NULL;
748
	} else {
749
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
750
		length = skb->len;
751
		txq->tx_skb[tx_index] = skb;
752 753 754 755 756 757
	}

	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
758 759 760 761
		int mac_hdr_len;

		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
		       skb->protocol != htons(ETH_P_8021Q));
762

763 764 765
		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
766

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
		mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
		switch (mac_hdr_len - ETH_HLEN) {
		case 0:
			break;
		case 4:
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
			break;
		case 8:
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
			break;
		case 12:
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
			break;
		default:
			if (net_ratelimit())
				dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
				   "mac header length is %d?!\n", mac_hdr_len);
			break;
		}

788 789
		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
790
			cmd_sts |= UDP_FRAME;
791 792 793 794 795 796 797 798 799 800
			desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
			break;
		case IPPROTO_TCP:
			desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
			break;
		default:
			BUG();
		}
	} else {
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
801
		cmd_sts |= 5 << TX_IHL_SHIFT;
802 803 804 805 806 807 808
		desc->l4i_chk = 0;
	}

	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

809 810 811 812
	/* clear TX_END interrupt status */
	wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
	rdl(mp, INT_CAUSE(mp->port_num));

813 814
	/* ensure all descriptors are written before poking hardware */
	wmb();
815
	txq_enable(txq);
816

817
	txq->tx_desc_count += nr_frags + 1;
L
Linus Torvalds 已提交
818 819
}

L
Lennert Buytenhek 已提交
820
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
821
{
822
	struct mv643xx_eth_private *mp = netdev_priv(dev);
823
	struct net_device_stats *stats = &dev->stats;
824
	struct tx_queue *txq;
825
	unsigned long flags;
826

827 828
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
		stats->tx_dropped++;
L
Lennert Buytenhek 已提交
829 830 831
		dev_printk(KERN_DEBUG, &dev->dev,
			   "failed to linearize skb with tiny "
			   "unaligned fragment\n");
832 833 834 835 836
		return NETDEV_TX_BUSY;
	}

	spin_lock_irqsave(&mp->lock, flags);

837
	txq = mp->txq + mp->txq_primary;
838 839

	if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
840
		spin_unlock_irqrestore(&mp->lock, flags);
841 842 843 844 845
		if (txq->index == mp->txq_primary && net_ratelimit())
			dev_printk(KERN_ERR, &dev->dev,
				   "primary tx queue full?!\n");
		kfree_skb(skb);
		return NETDEV_TX_OK;
846 847
	}

848
	txq_submit_skb(txq, skb);
849 850 851 852
	stats->tx_bytes += skb->len;
	stats->tx_packets++;
	dev->trans_start = jiffies;

853 854 855 856 857 858 859
	if (txq->index == mp->txq_primary) {
		int entries_left;

		entries_left = txq->tx_ring_size - txq->tx_desc_count;
		if (entries_left < MAX_DESCS_PER_SKB)
			netif_stop_queue(dev);
	}
860 861 862 863

	spin_unlock_irqrestore(&mp->lock, flags);

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
864 865
}

866

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

890 891 892 893 894 895 896 897 898
	if (mp->shared->tx_bw_control_moved) {
		wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
	} else {
		wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
	}
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

915 916
	wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14);
	wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index),
917 918 919 920 921 922 923 924 925 926 927 928
			(bucket_size << 10) | token_rate);
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
929 930 931 932
	if (mp->shared->tx_bw_control_moved)
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
	else
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
933 934

	val = rdl(mp, off);
935
	val |= 1 << txq->index;
936 937 938 939 940 941 942 943 944 945 946 947
	wrl(mp, off, val);
}

static void txq_set_wrr(struct tx_queue *txq, int weight)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn off fixed priority mode.
	 */
948 949 950 951
	if (mp->shared->tx_bw_control_moved)
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
	else
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
952 953

	val = rdl(mp, off);
954
	val &= ~(1 << txq->index);
955 956 957 958 959
	wrl(mp, off, val);

	/*
	 * Configure WRR weight for this queue.
	 */
960
	off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
961 962 963 964 965 966 967

	val = rdl(mp, off);
	val = (val & ~0xff) | (weight & 0xff);
	wrl(mp, off, val);
}


968
/* mii management interface *************************************************/
L
Lennert Buytenhek 已提交
969 970 971 972
#define SMI_BUSY		0x10000000
#define SMI_READ_VALID		0x08000000
#define SMI_OPCODE_READ		0x04000000
#define SMI_OPCODE_WRITE	0x00000000
973

L
Lennert Buytenhek 已提交
974 975
static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
			 unsigned int reg, unsigned int *value)
L
Linus Torvalds 已提交
976
{
977
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
978
	unsigned long flags;
L
Linus Torvalds 已提交
979 980
	int i;

981 982 983 984
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
985
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
986
		if (i == 1000) {
987 988 989
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
990
		udelay(10);
L
Linus Torvalds 已提交
991 992
	}

L
Lennert Buytenhek 已提交
993
	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
L
Linus Torvalds 已提交
994

995
	/* now wait for the data to be valid */
996
	for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
997
		if (i == 1000) {
998 999 1000
			printk("%s: PHY read timeout\n", mp->dev->name);
			goto out;
		}
1001
		udelay(10);
1002 1003 1004 1005 1006
	}

	*value = readl(smi_reg) & 0xffff;
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
L
Linus Torvalds 已提交
1007 1008
}

L
Lennert Buytenhek 已提交
1009 1010 1011
static void smi_reg_write(struct mv643xx_eth_private *mp,
			  unsigned int addr,
			  unsigned int reg, unsigned int value)
L
Linus Torvalds 已提交
1012
{
1013
	void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
1014
	unsigned long flags;
L
Linus Torvalds 已提交
1015 1016
	int i;

1017 1018 1019 1020
	/* the SMI register is a shared resource */
	spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);

	/* wait for the SMI register to become available */
1021
	for (i = 0; readl(smi_reg) & SMI_BUSY; i++) {
1022
		if (i == 1000) {
1023 1024 1025
			printk("%s: PHY busy timeout\n", mp->dev->name);
			goto out;
		}
1026
		udelay(10);
L
Linus Torvalds 已提交
1027 1028
	}

L
Lennert Buytenhek 已提交
1029 1030
	writel(SMI_OPCODE_WRITE | (reg << 21) |
		(addr << 16) | (value & 0xffff), smi_reg);
1031 1032 1033
out:
	spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
}
L
Linus Torvalds 已提交
1034

1035 1036

/* mib counters *************************************************************/
L
Lennert Buytenhek 已提交
1037
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1038
{
L
Lennert Buytenhek 已提交
1039
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
1040 1041
}

L
Lennert Buytenhek 已提交
1042
static void mib_counters_clear(struct mv643xx_eth_private *mp)
1043
{
L
Lennert Buytenhek 已提交
1044 1045 1046 1047
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
1048
}
1049

L
Lennert Buytenhek 已提交
1050
static void mib_counters_update(struct mv643xx_eth_private *mp)
1051
{
1052
	struct mib_counters *p = &mp->mib_counters;
1053

L
Lennert Buytenhek 已提交
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	p->good_octets_received += mib_read(mp, 0x00);
	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
1086 1087
}

1088 1089

/* ethtool ******************************************************************/
1090
struct mv643xx_eth_stats {
1091 1092
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
1093 1094
	int netdev_off;
	int mp_off;
1095 1096
};

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1144 1145
};

1146
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1147
{
1148
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1149 1150 1151 1152 1153 1154
	int err;

	spin_lock_irq(&mp->lock);
	err = mii_ethtool_gset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);

L
Lennert Buytenhek 已提交
1155 1156 1157
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1158 1159 1160 1161 1162 1163
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

1164 1165
static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
1166 1167 1168 1169 1170
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_status;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));

1171 1172
	cmd->supported = SUPPORTED_MII;
	cmd->advertising = ADVERTISED_MII;
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		cmd->speed = SPEED_10;
		break;
	case PORT_SPEED_100:
		cmd->speed = SPEED_100;
		break;
	case PORT_SPEED_1000:
		cmd->speed = SPEED_1000;
		break;
	default:
		cmd->speed = -1;
		break;
	}
	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
	cmd->port = PORT_MII;
	cmd->phy_address = 0;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->autoneg = AUTONEG_DISABLE;
	cmd->maxtxpkt = 1;
	cmd->maxrxpkt = 1;

	return 0;
}

1198
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1199
{
1200
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1201 1202
	int err;

L
Lennert Buytenhek 已提交
1203 1204 1205 1206 1207
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1208 1209 1210
	spin_lock_irq(&mp->lock);
	err = mii_ethtool_sset(&mp->mii, cmd);
	spin_unlock_irq(&mp->lock);
1211

1212 1213
	return err;
}
L
Linus Torvalds 已提交
1214

1215 1216 1217 1218 1219
static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
	return -EINVAL;
}

L
Lennert Buytenhek 已提交
1220 1221
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1222
{
1223 1224
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1225
	strncpy(drvinfo->fw_version, "N/A", 32);
L
Lennert Buytenhek 已提交
1226
	strncpy(drvinfo->bus_info, "platform", 32);
1227
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1228
}
L
Linus Torvalds 已提交
1229

L
Lennert Buytenhek 已提交
1230
static int mv643xx_eth_nway_reset(struct net_device *dev)
1231
{
1232
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1233

1234 1235
	return mii_nway_restart(&mp->mii);
}
L
Linus Torvalds 已提交
1236

1237 1238 1239 1240 1241
static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
{
	return -EINVAL;
}

1242 1243
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1244
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1245

1246 1247
	return mii_link_ok(&mp->mii);
}
L
Linus Torvalds 已提交
1248

1249 1250 1251 1252 1253
static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
{
	return 1;
}

L
Lennert Buytenhek 已提交
1254 1255
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1256 1257
{
	int i;
L
Linus Torvalds 已提交
1258

L
Lennert Buytenhek 已提交
1259 1260
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1261
			memcpy(data + i * ETH_GSTRING_LEN,
1262
				mv643xx_eth_stats[i].stat_string,
1263
				ETH_GSTRING_LEN);
1264 1265 1266
		}
	}
}
L
Linus Torvalds 已提交
1267

L
Lennert Buytenhek 已提交
1268 1269 1270
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1271
{
L
Lennert Buytenhek 已提交
1272
	struct mv643xx_eth_private *mp = dev->priv;
1273
	int i;
L
Linus Torvalds 已提交
1274

L
Lennert Buytenhek 已提交
1275
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1276

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1290
	}
1291
}
L
Linus Torvalds 已提交
1292

L
Lennert Buytenhek 已提交
1293
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1294
{
L
Lennert Buytenhek 已提交
1295
	if (sset == ETH_SS_STATS)
1296
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1297 1298

	return -EOPNOTSUPP;
1299
}
L
Linus Torvalds 已提交
1300

1301
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1302 1303 1304 1305 1306
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
	.get_link		= mv643xx_eth_get_link,
1307
	.set_sg			= ethtool_op_set_sg,
L
Lennert Buytenhek 已提交
1308 1309
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1310
	.get_sset_count		= mv643xx_eth_get_sset_count,
1311
};
L
Linus Torvalds 已提交
1312

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
	.get_settings		= mv643xx_eth_get_settings_phyless,
	.set_settings		= mv643xx_eth_set_settings_phyless,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset_phyless,
	.get_link		= mv643xx_eth_get_link_phyless,
	.set_sg			= ethtool_op_set_sg,
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
	.get_sset_count		= mv643xx_eth_get_sset_count,
};

1325

1326
/* address handling *********************************************************/
1327
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1328 1329 1330
{
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1331

L
Lennert Buytenhek 已提交
1332 1333
	mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
L
Linus Torvalds 已提交
1334

1335 1336 1337 1338 1339 1340
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1341
}
L
Linus Torvalds 已提交
1342

1343
static void init_mac_tables(struct mv643xx_eth_private *mp)
1344
{
L
Lennert Buytenhek 已提交
1345
	int i;
L
Linus Torvalds 已提交
1346

L
Lennert Buytenhek 已提交
1347 1348 1349
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1350
	}
L
Lennert Buytenhek 已提交
1351 1352 1353

	for (i = 0; i < 0x10; i += 4)
		wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1354
}
1355

1356
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1357
				   int table, unsigned char entry)
1358 1359
{
	unsigned int table_reg;
1360

1361
	/* Set "accepts frame bit" at specified table entry */
L
Lennert Buytenhek 已提交
1362 1363 1364
	table_reg = rdl(mp, table + (entry & 0xfc));
	table_reg |= 0x01 << (8 * (entry & 3));
	wrl(mp, table + (entry & 0xfc), table_reg);
L
Linus Torvalds 已提交
1365 1366
}

1367
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1368
{
1369 1370 1371
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1372

L
Lennert Buytenhek 已提交
1373 1374
	mac_l = (addr[4] << 8) | addr[5];
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1375

L
Lennert Buytenhek 已提交
1376 1377
	wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
L
Linus Torvalds 已提交
1378

L
Lennert Buytenhek 已提交
1379
	table = UNICAST_TABLE(mp->port_num);
1380
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1381 1382
}

L
Lennert Buytenhek 已提交
1383
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1384
{
1385
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1386

L
Lennert Buytenhek 已提交
1387 1388 1389
	/* +2 is for the offset of the HW addr type */
	memcpy(dev->dev_addr, addr + 2, 6);

1390 1391
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1392 1393 1394 1395

	return 0;
}

1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

L
Lennert Buytenhek 已提交
1414
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
L
Linus Torvalds 已提交
1415
{
L
Lennert Buytenhek 已提交
1416 1417 1418 1419
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_config;
	struct dev_addr_list *addr;
	int i;
1420

L
Lennert Buytenhek 已提交
1421 1422 1423 1424 1425 1426
	port_config = rdl(mp, PORT_CONFIG(mp->port_num));
	if (dev->flags & IFF_PROMISC)
		port_config |= UNICAST_PROMISCUOUS_MODE;
	else
		port_config &= ~UNICAST_PROMISCUOUS_MODE;
	wrl(mp, PORT_CONFIG(mp->port_num), port_config);
L
Linus Torvalds 已提交
1427

L
Lennert Buytenhek 已提交
1428 1429 1430
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		int port_num = mp->port_num;
		u32 accept = 0x01010101;
1431

L
Lennert Buytenhek 已提交
1432 1433 1434
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1435 1436 1437
		}
		return;
	}
1438

L
Lennert Buytenhek 已提交
1439 1440 1441
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
L
Linus Torvalds 已提交
1442 1443
	}

L
Lennert Buytenhek 已提交
1444 1445 1446
	for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
		u8 *a = addr->da_addr;
		int table;
1447

L
Lennert Buytenhek 已提交
1448 1449
		if (addr->da_addrlen != 6)
			continue;
L
Linus Torvalds 已提交
1450

L
Lennert Buytenhek 已提交
1451 1452 1453 1454 1455
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
			table = SPECIAL_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, a[5]);
		} else {
			int crc = addr_crc(a);
L
Linus Torvalds 已提交
1456

L
Lennert Buytenhek 已提交
1457 1458 1459 1460
			table = OTHER_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, crc);
		}
	}
1461
}
1462 1463


1464
/* rx/tx queue initialisation ***********************************************/
1465
static int rxq_init(struct mv643xx_eth_private *mp, int index)
1466
{
1467
	struct rx_queue *rxq = mp->rxq + index;
1468 1469
	struct rx_desc *rx_desc;
	int size;
1470 1471
	int i;

1472 1473
	rxq->index = index;

1474 1475 1476 1477 1478 1479 1480 1481
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

1482
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
1483 1484 1485 1486 1487 1488 1489
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1490 1491
	}

1492 1493 1494 1495 1496 1497
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1498

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
		int nexti = (i + 1) % rxq->rx_ring_size;
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	init_timer(&rxq->rx_oom);
	rxq->rx_oom.data = (unsigned long)rxq;
	rxq->rx_oom.function = rxq_refill_timer_wrapper;

	return 0;


out_free:
1523
	if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
1524 1525 1526 1527 1528 1529 1530 1531
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1532
}
1533

1534
static void rxq_deinit(struct rx_queue *rxq)
1535
{
1536 1537 1538 1539
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1540

1541
	del_timer_sync(&rxq->rx_oom);
1542

1543 1544 1545 1546
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1547
		}
1548
	}
L
Linus Torvalds 已提交
1549

1550 1551 1552 1553 1554 1555
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

1556 1557
	if (rxq->index == mp->rxq_primary &&
	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1558
		iounmap(rxq->rx_desc_area);
1559
	else
1560 1561 1562 1563
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1564
}
L
Linus Torvalds 已提交
1565

1566
static int txq_init(struct mv643xx_eth_private *mp, int index)
1567
{
1568
	struct tx_queue *txq = mp->txq + index;
1569 1570
	struct tx_desc *tx_desc;
	int size;
1571
	int i;
L
Linus Torvalds 已提交
1572

1573 1574
	txq->index = index;

1575 1576 1577 1578 1579 1580 1581 1582
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

1583
	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) {
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
		goto out;
1597
	}
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;
	txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
								GFP_KERNEL);
	if (txq->tx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx skb ring\n");
		goto out_free;
	}

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
1611
		struct tx_desc *txd = tx_desc + i;
1612
		int nexti = (i + 1) % txq->tx_ring_size;
1613 1614 1615

		txd->cmd_sts = 0;
		txd->next_desc_ptr = txq->tx_desc_dma +
1616 1617 1618 1619 1620
					nexti * sizeof(struct tx_desc);
	}

	return 0;

1621

1622
out_free:
1623
	if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
1624 1625 1626 1627 1628
		iounmap(txq->tx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  txq->tx_desc_area,
				  txq->tx_desc_dma);
1629

1630 1631
out:
	return -ENOMEM;
1632
}
L
Linus Torvalds 已提交
1633

1634
static void txq_reclaim(struct tx_queue *txq, int force)
1635
{
1636
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1637
	unsigned long flags;
L
Linus Torvalds 已提交
1638

1639 1640 1641 1642 1643 1644 1645 1646
	spin_lock_irqsave(&mp->lock, flags);
	while (txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;
		dma_addr_t addr;
		int count;
1647

1648 1649
		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
1650
		cmd_sts = desc->cmd_sts;
1651

1652 1653 1654 1655 1656
		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			if (!force)
				break;
			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
		}
L
Linus Torvalds 已提交
1657

1658 1659
		txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size;
		txq->tx_desc_count--;
L
Linus Torvalds 已提交
1660

1661 1662
		addr = desc->buf_ptr;
		count = desc->byte_cnt;
1663 1664
		skb = txq->tx_skb[tx_index];
		txq->tx_skb[tx_index] = NULL;
1665

1666
		if (cmd_sts & ERROR_SUMMARY) {
1667 1668
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
1669
		}
L
Linus Torvalds 已提交
1670

1671 1672 1673
		/*
		 * Drop mp->lock while we free the skb.
		 */
1674
		spin_unlock_irqrestore(&mp->lock, flags);
L
Linus Torvalds 已提交
1675

1676
		if (cmd_sts & TX_FIRST_DESC)
1677 1678 1679
			dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
		else
			dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1680

1681 1682
		if (skb)
			dev_kfree_skb_irq(skb);
1683

1684
		spin_lock_irqsave(&mp->lock, flags);
1685
	}
1686
	spin_unlock_irqrestore(&mp->lock, flags);
1687
}
L
Linus Torvalds 已提交
1688

1689
static void txq_deinit(struct tx_queue *txq)
1690
{
1691
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1692

1693 1694
	txq_disable(txq);
	txq_reclaim(txq, 1);
L
Linus Torvalds 已提交
1695

1696
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1697

1698 1699
	if (txq->index == mp->txq_primary &&
	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1700
		iounmap(txq->tx_desc_area);
1701
	else
1702 1703 1704 1705
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);

	kfree(txq->tx_skb);
1706
}
L
Linus Torvalds 已提交
1707 1708


1709
/* netdev ops and related ***************************************************/
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
static void handle_link_event(struct mv643xx_eth_private *mp)
{
	struct net_device *dev = mp->dev;
	u32 port_status;
	int speed;
	int duplex;
	int fc;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));
	if (!(port_status & LINK_UP)) {
		if (netif_carrier_ok(dev)) {
			int i;

			printk(KERN_INFO "%s: link down\n", dev->name);

			netif_carrier_off(dev);
			netif_stop_queue(dev);

			for (i = 0; i < 8; i++) {
				struct tx_queue *txq = mp->txq + i;

				if (mp->txq_mask & (1 << i)) {
					txq_reclaim(txq, 1);
					txq_reset_hw_ptr(txq);
				}
			}
		}
		return;
	}

	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		speed = 10;
		break;
	case PORT_SPEED_100:
		speed = 100;
		break;
	case PORT_SPEED_1000:
		speed = 1000;
		break;
	default:
		speed = -1;
		break;
	}
	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;

	printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
			 "flow control %sabled\n", dev->name,
			 speed, duplex ? "full" : "half",
			 fc ? "en" : "dis");

	if (!netif_carrier_ok(dev)) {
		netif_carrier_on(dev);
		netif_wake_queue(dev);
	}
}

L
Lennert Buytenhek 已提交
1768
static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1769 1770
{
	struct net_device *dev = (struct net_device *)dev_id;
1771
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
1772 1773
	u32 int_cause;
	u32 int_cause_ext;
1774

1775 1776
	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
			(INT_TX_END | INT_RX | INT_EXT);
L
Lennert Buytenhek 已提交
1777 1778 1779 1780
	if (int_cause == 0)
		return IRQ_NONE;

	int_cause_ext = 0;
1781
	if (int_cause & INT_EXT) {
1782
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1783
				& (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1784
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1785
	}
L
Linus Torvalds 已提交
1786

1787 1788
	if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
		handle_link_event(mp);
L
Linus Torvalds 已提交
1789

1790 1791 1792
	/*
	 * RxBuffer or RxError set for any of the 8 queues?
	 */
1793
#ifdef MV643XX_ETH_NAPI
1794
	if (int_cause & INT_RX) {
1795
		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
1796 1797
		wrl(mp, INT_MASK(mp->port_num), 0x00000000);
		rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
1798

1799
		netif_rx_schedule(dev, &mp->napi);
1800
	}
1801
#else
1802 1803 1804 1805 1806 1807 1808
	if (int_cause & INT_RX) {
		int i;

		for (i = 7; i >= 0; i--)
			if (mp->rxq_mask & (1 << i))
				rxq_process(mp->rxq + i, INT_MAX);
	}
1809
#endif
L
Lennert Buytenhek 已提交
1810

1811 1812 1813
	/*
	 * TxBuffer or TxError set for any of the 8 queues?
	 */
1814
	if (int_cause_ext & INT_EXT_TX) {
1815 1816 1817 1818 1819
		int i;

		for (i = 0; i < 8; i++)
			if (mp->txq_mask & (1 << i))
				txq_reclaim(mp->txq + i, 0);
1820 1821 1822 1823 1824

		/*
		 * Enough space again in the primary TX queue for a
		 * full packet?
		 */
1825 1826 1827 1828 1829
		if (netif_carrier_ok(dev)) {
			spin_lock(&mp->lock);
			__txq_maybe_wake(mp->txq + mp->txq_primary);
			spin_unlock(&mp->lock);
		}
1830
	}
1831

1832 1833 1834 1835 1836 1837 1838
	/*
	 * Any TxEnd interrupts?
	 */
	if (int_cause & INT_TX_END) {
		int i;

		wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1839 1840

		spin_lock(&mp->lock);
1841 1842
		for (i = 0; i < 8; i++) {
			struct tx_queue *txq = mp->txq + i;
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
			u32 hw_desc_ptr;
			u32 expected_ptr;

			if ((int_cause & (INT_TX_END_0 << i)) == 0)
				continue;

			hw_desc_ptr =
				rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
			expected_ptr = (u32)txq->tx_desc_dma +
				txq->tx_curr_desc * sizeof(struct tx_desc);

			if (hw_desc_ptr != expected_ptr)
1855 1856
				txq_enable(txq);
		}
1857
		spin_unlock(&mp->lock);
1858
	}
L
Linus Torvalds 已提交
1859

1860
	return IRQ_HANDLED;
L
Linus Torvalds 已提交
1861 1862
}

1863
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1864
{
L
Lennert Buytenhek 已提交
1865
	unsigned int data;
L
Linus Torvalds 已提交
1866

1867 1868 1869
	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
	data |= BMCR_RESET;
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
L
Linus Torvalds 已提交
1870

1871 1872
	do {
		udelay(1);
1873 1874
		smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
	} while (data & BMCR_RESET);
L
Linus Torvalds 已提交
1875 1876
}

L
Lennert Buytenhek 已提交
1877
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1878
{
1879
	u32 pscr;
1880
	int i;
L
Linus Torvalds 已提交
1881

1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
	/*
	 * Perform PHY reset, if there is a PHY.
	 */
	if (mp->phy_addr != -1) {
		struct ethtool_cmd cmd;

		mv643xx_eth_get_settings(mp->dev, &cmd);
		phy_reset(mp);
		mv643xx_eth_set_settings(mp->dev, &cmd);
	}
L
Linus Torvalds 已提交
1892

1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));

	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	pscr |= DO_NOT_FORCE_LINK_FAIL;
	if (mp->phy_addr == -1)
		pscr |= FORCE_LINK_PASS;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

1908 1909 1910
	/*
	 * Configure TX path and queues.
	 */
1911
	tx_set_rate(mp, 1000000000, 16777216);
1912 1913
	for (i = 0; i < 8; i++) {
		struct tx_queue *txq = mp->txq + i;
1914

1915 1916 1917
		if ((mp->txq_mask & (1 << i)) == 0)
			continue;

1918
		txq_reset_hw_ptr(txq);
1919 1920
		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
1921 1922
	}

L
Lennert Buytenhek 已提交
1923 1924 1925 1926
	/*
	 * Add configured unicast address to address filter table.
	 */
	uc_addr_set(mp, mp->dev->dev_addr);
L
Linus Torvalds 已提交
1927

1928 1929 1930 1931
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
	 * frames to RX queue #0.
	 */
1932
	wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000);
1933

1934 1935 1936
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1937
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1938

1939
	/*
1940
	 * Enable the receive queues.
1941
	 */
1942 1943 1944
	for (i = 0; i < 8; i++) {
		struct rx_queue *rxq = mp->rxq + i;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1945
		u32 addr;
L
Linus Torvalds 已提交
1946

1947 1948 1949
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

1950 1951 1952
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
1953

1954 1955
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
1956 1957
}

1958
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1959
{
1960
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1961
	u32 val;
L
Linus Torvalds 已提交
1962

1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	val = rdl(mp, SDMA_CONFIG(mp->port_num));
	if (mp->shared->extended_rx_coal_limit) {
		if (coal > 0xffff)
			coal = 0xffff;
		val &= ~0x023fff80;
		val |= (coal & 0x8000) << 10;
		val |= (coal & 0x7fff) << 7;
	} else {
		if (coal > 0x3fff)
			coal = 0x3fff;
		val &= ~0x003fff00;
		val |= (coal & 0x3fff) << 8;
	}
	wrl(mp, SDMA_CONFIG(mp->port_num), val);
L
Linus Torvalds 已提交
1977 1978
}

1979
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
1980
{
1981
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
1982

L
Lennert Buytenhek 已提交
1983 1984 1985
	if (coal > 0x3fff)
		coal = 0x3fff;
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1986 1987
}

1988
static int mv643xx_eth_open(struct net_device *dev)
1989
{
1990
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1991
	int err;
1992
	int i;
1993

L
Lennert Buytenhek 已提交
1994 1995 1996
	wrl(mp, INT_CAUSE(mp->port_num), 0);
	wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
	rdl(mp, INT_CAUSE_EXT(mp->port_num));
1997

L
Lennert Buytenhek 已提交
1998 1999 2000
	err = request_irq(dev->irq, mv643xx_eth_irq,
			  IRQF_SHARED | IRQF_SAMPLE_RANDOM,
			  dev->name, dev);
2001
	if (err) {
L
Lennert Buytenhek 已提交
2002
		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2003
		return -EAGAIN;
2004 2005
	}

L
Lennert Buytenhek 已提交
2006
	init_mac_tables(mp);
2007

2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
	for (i = 0; i < 8; i++) {
		if ((mp->rxq_mask & (1 << i)) == 0)
			continue;

		err = rxq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->rxq_mask & (1 << i))
					rxq_deinit(mp->rxq + i);
			goto out;
		}

		rxq_refill(mp->rxq + i);
	}
2022

2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
	for (i = 0; i < 8; i++) {
		if ((mp->txq_mask & (1 << i)) == 0)
			continue;

		err = txq_init(mp, i);
		if (err) {
			while (--i >= 0)
				if (mp->txq_mask & (1 << i))
					txq_deinit(mp->txq + i);
			goto out_free;
		}
	}
2035

2036
#ifdef MV643XX_ETH_NAPI
2037 2038
	napi_enable(&mp->napi);
#endif
2039

2040 2041 2042
	netif_carrier_off(dev);
	netif_stop_queue(dev);

L
Lennert Buytenhek 已提交
2043
	port_start(mp);
2044

2045 2046
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
2047

L
Lennert Buytenhek 已提交
2048 2049
	wrl(mp, INT_MASK_EXT(mp->port_num),
	    INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
2050

2051
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2052

2053 2054
	return 0;

2055

L
Lennert Buytenhek 已提交
2056
out_free:
2057 2058 2059
	for (i = 0; i < 8; i++)
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
L
Lennert Buytenhek 已提交
2060
out:
2061 2062 2063
	free_irq(dev->irq, dev);

	return err;
2064 2065
}

2066
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2067
{
L
Lennert Buytenhek 已提交
2068
	unsigned int data;
2069
	int i;
L
Linus Torvalds 已提交
2070

2071 2072 2073
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_disable(mp->rxq + i);
2074 2075
		if (mp->txq_mask & (1 << i))
			txq_disable(mp->txq + i);
2076
	}
2077 2078 2079 2080 2081 2082

	while (1) {
		u32 ps = rdl(mp, PORT_STATUS(mp->port_num));

		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
			break;
2083
		udelay(10);
2084
	}
L
Linus Torvalds 已提交
2085

2086
	/* Reset the Enable bit in the Configuration Register */
L
Lennert Buytenhek 已提交
2087 2088 2089 2090 2091
	data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
L
Linus Torvalds 已提交
2092 2093
}

2094
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
2095
{
2096
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2097
	int i;
L
Linus Torvalds 已提交
2098

L
Lennert Buytenhek 已提交
2099 2100
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
2101

2102
#ifdef MV643XX_ETH_NAPI
2103 2104 2105 2106
	napi_disable(&mp->napi);
#endif
	netif_carrier_off(dev);
	netif_stop_queue(dev);
L
Linus Torvalds 已提交
2107

L
Lennert Buytenhek 已提交
2108 2109
	free_irq(dev->irq, dev);

2110
	port_reset(mp);
L
Lennert Buytenhek 已提交
2111
	mib_counters_update(mp);
L
Linus Torvalds 已提交
2112

2113 2114 2115
	for (i = 0; i < 8; i++) {
		if (mp->rxq_mask & (1 << i))
			rxq_deinit(mp->rxq + i);
2116 2117
		if (mp->txq_mask & (1 << i))
			txq_deinit(mp->txq + i);
2118
	}
L
Linus Torvalds 已提交
2119

2120
	return 0;
L
Linus Torvalds 已提交
2121 2122
}

L
Lennert Buytenhek 已提交
2123
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
2124
{
2125
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2126

2127 2128 2129 2130
	if (mp->phy_addr != -1)
		return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);

	return -EOPNOTSUPP;
L
Linus Torvalds 已提交
2131 2132
}

2133
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
2134
{
2135 2136
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
2137
	if (new_mtu < 64 || new_mtu > 9500)
2138
		return -EINVAL;
L
Linus Torvalds 已提交
2139

2140
	dev->mtu = new_mtu;
2141 2142
	tx_set_rate(mp, 1000000000, 16777216);

2143 2144
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
2145

2146 2147 2148 2149
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
2150
	 * due to memory being full.
2151 2152 2153
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
L
Lennert Buytenhek 已提交
2154 2155 2156
		dev_printk(KERN_ERR, &dev->dev,
			   "fatal error on re-opening device after "
			   "MTU change\n");
2157 2158 2159
	}

	return 0;
L
Linus Torvalds 已提交
2160 2161
}

L
Lennert Buytenhek 已提交
2162
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
2163
{
L
Lennert Buytenhek 已提交
2164
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
2165

L
Lennert Buytenhek 已提交
2166 2167 2168
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
		netif_stop_queue(mp->dev);
2169

L
Lennert Buytenhek 已提交
2170 2171
		port_reset(mp);
		port_start(mp);
2172

2173
		__txq_maybe_wake(mp->txq + mp->txq_primary);
L
Lennert Buytenhek 已提交
2174
	}
2175 2176 2177
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
2178
{
2179
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2180

L
Lennert Buytenhek 已提交
2181
	dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
2182

2183
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
2184 2185
}

2186
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
2187
static void mv643xx_eth_netpoll(struct net_device *dev)
2188
{
L
Lennert Buytenhek 已提交
2189
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2190

L
Lennert Buytenhek 已提交
2191 2192
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
2193

L
Lennert Buytenhek 已提交
2194
	mv643xx_eth_irq(dev->irq, dev);
2195

2196
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2197
}
2198
#endif
2199

L
Lennert Buytenhek 已提交
2200
static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
2201
{
2202
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2203 2204
	int val;

L
Lennert Buytenhek 已提交
2205 2206
	smi_reg_read(mp, addr, reg, &val);

2207
	return val;
2208 2209
}

L
Lennert Buytenhek 已提交
2210
static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
2211
{
2212
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2213
	smi_reg_write(mp, addr, reg, val);
2214
}
2215 2216


2217
/* platform glue ************************************************************/
2218 2219 2220
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
2221
{
2222
	void __iomem *base = msp->base;
2223 2224 2225
	u32 win_enable;
	u32 win_protect;
	int i;
2226

2227 2228 2229 2230 2231
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
2232 2233
	}

2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2251 2252
}

2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
{
	/*
	 * Check whether we have a 14-bit coal limit field in bits
	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
	 * SDMA config register.
	 */
	writel(0x02000000, msp->base + SDMA_CONFIG(0));
	if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000)
		msp->extended_rx_coal_limit = 1;
	else
		msp->extended_rx_coal_limit = 0;
2265 2266 2267 2268 2269 2270 2271 2272 2273 2274

	/*
	 * Check whether the TX rate control registers are in the
	 * old or the new place.
	 */
	writel(1, msp->base + TX_BW_MTU_MOVED(0));
	if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1)
		msp->tx_bw_control_moved = 1;
	else
		msp->tx_bw_control_moved = 0;
2275 2276
}

2277
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2278
{
2279
	static int mv643xx_eth_version_printed = 0;
2280
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2281
	struct mv643xx_eth_shared_private *msp;
2282 2283
	struct resource *res;
	int ret;
2284

2285
	if (!mv643xx_eth_version_printed++)
2286 2287
		printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
			"driver version %s\n", mv643xx_eth_driver_version);
2288

2289 2290 2291 2292
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2293

2294 2295 2296 2297 2298 2299
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2300 2301
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
		goto out_free;

	spin_lock_init(&msp->phy_lock);

	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

L
Lennert Buytenhek 已提交
2312 2313 2314 2315
	/*
	 * Detect hardware parameters.
	 */
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2316
	infer_hw_params(msp);
L
Lennert Buytenhek 已提交
2317 2318 2319

	platform_set_drvdata(pdev, msp);

2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
	return 0;

out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2330
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2331

2332
	iounmap(msp->base);
2333 2334 2335
	kfree(msp);

	return 0;
2336 2337
}

2338
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2339 2340
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2341
	.driver = {
L
Lennert Buytenhek 已提交
2342
		.name	= MV643XX_ETH_SHARED_NAME,
2343 2344 2345 2346
		.owner	= THIS_MODULE,
	},
};

2347
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2348
{
2349
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2350
	u32 data;
L
Linus Torvalds 已提交
2351

L
Lennert Buytenhek 已提交
2352 2353 2354 2355
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2356 2357
}

2358
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2359
{
L
Lennert Buytenhek 已提交
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
	else
		uc_addr_get(mp, dev->dev_addr);

	if (pd->phy_addr == -1) {
		mp->shared_smi = NULL;
		mp->phy_addr = -1;
	} else {
		mp->shared_smi = mp->shared;
		if (pd->shared_smi != NULL)
			mp->shared_smi = platform_get_drvdata(pd->shared_smi);

		if (pd->force_phy_addr || pd->phy_addr) {
			mp->phy_addr = pd->phy_addr & 0x3f;
			phy_addr_set(mp, mp->phy_addr);
		} else {
			mp->phy_addr = phy_addr_get(mp);
		}
	}
L
Linus Torvalds 已提交
2392

L
Lennert Buytenhek 已提交
2393 2394 2395 2396 2397
	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2398

2399 2400 2401 2402 2403 2404
	if (pd->rx_queue_mask)
		mp->rxq_mask = pd->rx_queue_mask;
	else
		mp->rxq_mask = 0x01;
	mp->rxq_primary = fls(mp->rxq_mask) - 1;

L
Lennert Buytenhek 已提交
2405 2406 2407 2408 2409
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
		mp->default_tx_ring_size = pd->tx_queue_size;
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
2410 2411 2412 2413 2414 2415

	if (pd->tx_queue_mask)
		mp->txq_mask = pd->tx_queue_mask;
	else
		mp->txq_mask = 0x01;
	mp->txq_primary = fls(mp->txq_mask) - 1;
L
Linus Torvalds 已提交
2416 2417
}

2418
static int phy_detect(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2419
{
L
Lennert Buytenhek 已提交
2420 2421 2422
	unsigned int data;
	unsigned int data2;

2423 2424
	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE);
L
Linus Torvalds 已提交
2425

2426 2427
	smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2);
	if (((data ^ data2) & BMCR_ANENABLE) == 0)
L
Lennert Buytenhek 已提交
2428
		return -ENODEV;
L
Linus Torvalds 已提交
2429

2430
	smi_reg_write(mp, mp->phy_addr, MII_BMCR, data);
L
Linus Torvalds 已提交
2431

2432
	return 0;
L
Linus Torvalds 已提交
2433 2434
}

L
Lennert Buytenhek 已提交
2435 2436
static int phy_init(struct mv643xx_eth_private *mp,
		    struct mv643xx_eth_platform_data *pd)
2437
{
L
Lennert Buytenhek 已提交
2438 2439
	struct ethtool_cmd cmd;
	int err;
2440

L
Lennert Buytenhek 已提交
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	err = phy_detect(mp);
	if (err) {
		dev_printk(KERN_INFO, &mp->dev->dev,
			   "no PHY detected at addr %d\n", mp->phy_addr);
		return err;
	}
	phy_reset(mp);

	mp->mii.phy_id = mp->phy_addr;
	mp->mii.phy_id_mask = 0x3f;
	mp->mii.reg_num_mask = 0x1f;
	mp->mii.dev = mp->dev;
	mp->mii.mdio_read = mv643xx_eth_mdio_read;
	mp->mii.mdio_write = mv643xx_eth_mdio_write;
2455

L
Lennert Buytenhek 已提交
2456
	mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2457

L
Lennert Buytenhek 已提交
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
	memset(&cmd, 0, sizeof(cmd));

	cmd.port = PORT_MII;
	cmd.transceiver = XCVR_INTERNAL;
	cmd.phy_address = mp->phy_addr;
	if (pd->speed == 0) {
		cmd.autoneg = AUTONEG_ENABLE;
		cmd.speed = SPEED_100;
		cmd.advertising = ADVERTISED_10baseT_Half  |
				  ADVERTISED_10baseT_Full  |
				  ADVERTISED_100baseT_Half |
				  ADVERTISED_100baseT_Full;
2470
		if (mp->mii.supports_gmii)
L
Lennert Buytenhek 已提交
2471
			cmd.advertising |= ADVERTISED_1000baseT_Full;
2472
	} else {
L
Lennert Buytenhek 已提交
2473 2474 2475
		cmd.autoneg = AUTONEG_DISABLE;
		cmd.speed = pd->speed;
		cmd.duplex = pd->duplex;
2476
	}
L
Lennert Buytenhek 已提交
2477 2478 2479 2480

	mv643xx_eth_set_settings(mp->dev, &cmd);

	return 0;
2481 2482
}

2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
	u32 pscr;

	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	if (pscr & SERIAL_PORT_ENABLE) {
		pscr &= ~SERIAL_PORT_ENABLE;
		wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	}

	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
	if (mp->phy_addr == -1) {
		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
		if (speed == SPEED_1000)
			pscr |= SET_GMII_SPEED_TO_1000;
		else if (speed == SPEED_100)
			pscr |= SET_MII_SPEED_TO_100;

		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;

		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
		if (duplex == DUPLEX_FULL)
			pscr |= SET_FULL_DUPLEX_MODE;
	}

	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
}

2511
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2512
{
2513
	struct mv643xx_eth_platform_data *pd;
2514
	struct mv643xx_eth_private *mp;
2515 2516 2517
	struct net_device *dev;
	struct resource *res;
	DECLARE_MAC_BUF(mac);
L
Lennert Buytenhek 已提交
2518
	int err;
L
Linus Torvalds 已提交
2519

2520 2521
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
L
Lennert Buytenhek 已提交
2522 2523
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data\n");
2524 2525
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2526

2527
	if (pd->shared == NULL) {
L
Lennert Buytenhek 已提交
2528 2529
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data->shared\n");
2530 2531
		return -ENODEV;
	}
2532

2533
	dev = alloc_etherdev(sizeof(struct mv643xx_eth_private));
2534 2535
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2536

2537
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2538 2539 2540 2541 2542
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
	mp->port_num = pd->port_number;

2543
	mp->dev = dev;
2544 2545
#ifdef MV643XX_ETH_NAPI
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2546
#endif
L
Linus Torvalds 已提交
2547

L
Lennert Buytenhek 已提交
2548 2549 2550 2551 2552 2553 2554
	set_params(mp, pd);

	spin_lock_init(&mp->lock);

	mib_counters_clear(mp);
	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

2555 2556 2557 2558 2559 2560 2561 2562 2563
	if (mp->phy_addr != -1) {
		err = phy_init(mp, pd);
		if (err)
			goto out;

		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
	} else {
		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
	}
2564
	init_pscr(mp, pd->speed, pd->duplex);
L
Lennert Buytenhek 已提交
2565 2566


2567 2568 2569
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2570

L
Lennert Buytenhek 已提交
2571
	dev->hard_start_xmit = mv643xx_eth_xmit;
2572 2573 2574
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;
L
Lennert Buytenhek 已提交
2575 2576 2577
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->do_ioctl = mv643xx_eth_ioctl;
	dev->change_mtu = mv643xx_eth_change_mtu;
2578 2579
	dev->tx_timeout = mv643xx_eth_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
2580
	dev->poll_controller = mv643xx_eth_netpoll;
2581 2582 2583
#endif
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
2584

2585
#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2586
	/*
2587 2588
	 * Zero copy can only work if we use Discovery II memory. Else, we will
	 * have to map the buffers to ISA memory which is only 16 MB
2589
	 */
2590
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2591
	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2592
#endif
L
Linus Torvalds 已提交
2593

L
Lennert Buytenhek 已提交
2594
	SET_NETDEV_DEV(dev, &pdev->dev);
2595

2596
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
2597
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2598

2599 2600 2601
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2602

L
Lennert Buytenhek 已提交
2603 2604
	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
		   mp->port_num, print_mac(mac, dev->dev_addr));
L
Linus Torvalds 已提交
2605

2606
	if (dev->features & NETIF_F_SG)
L
Lennert Buytenhek 已提交
2607
		dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
L
Linus Torvalds 已提交
2608

2609
	if (dev->features & NETIF_F_IP_CSUM)
L
Lennert Buytenhek 已提交
2610
		dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
L
Linus Torvalds 已提交
2611

2612
#ifdef MV643XX_ETH_NAPI
L
Lennert Buytenhek 已提交
2613
	dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2614
#endif
L
Linus Torvalds 已提交
2615

2616
	if (mp->tx_desc_sram_size > 0)
L
Lennert Buytenhek 已提交
2617
		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
L
Linus Torvalds 已提交
2618

2619
	return 0;
L
Linus Torvalds 已提交
2620

2621 2622
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2623

2624
	return err;
L
Linus Torvalds 已提交
2625 2626
}

2627
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2628
{
L
Lennert Buytenhek 已提交
2629
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2630

L
Lennert Buytenhek 已提交
2631
	unregister_netdev(mp->dev);
2632
	flush_scheduled_work();
L
Lennert Buytenhek 已提交
2633
	free_netdev(mp->dev);
2634 2635

	platform_set_drvdata(pdev, NULL);
L
Lennert Buytenhek 已提交
2636

2637
	return 0;
L
Linus Torvalds 已提交
2638 2639
}

2640
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2641
{
L
Lennert Buytenhek 已提交
2642
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2643

2644
	/* Mask all interrupts on ethernet port */
L
Lennert Buytenhek 已提交
2645 2646
	wrl(mp, INT_MASK(mp->port_num), 0);
	rdl(mp, INT_MASK(mp->port_num));
2647

L
Lennert Buytenhek 已提交
2648 2649
	if (netif_running(mp->dev))
		port_reset(mp);
2650 2651
}

2652
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
2653 2654 2655
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
2656
	.driver = {
L
Lennert Buytenhek 已提交
2657
		.name	= MV643XX_ETH_NAME,
2658 2659 2660 2661
		.owner	= THIS_MODULE,
	},
};

2662
static int __init mv643xx_eth_init_module(void)
2663
{
2664
	int rc;
2665

2666 2667 2668 2669 2670 2671
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
2672

2673
	return rc;
2674
}
L
Lennert Buytenhek 已提交
2675
module_init(mv643xx_eth_init_module);
2676

2677
static void __exit mv643xx_eth_cleanup_module(void)
2678
{
2679 2680
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2681
}
2682
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2683

2684 2685
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
2686
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
2687
MODULE_LICENSE("GPL");
2688
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
2689
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);