mv643xx_eth.c 65.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
37

L
Linus Torvalds 已提交
38 39
#include <linux/init.h>
#include <linux/dma-mapping.h>
40
#include <linux/in.h>
L
Linus Torvalds 已提交
41 42 43 44 45
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
46
#include <linux/platform_device.h>
47 48 49 50
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
51
#include <linux/phy.h>
52
#include <linux/mv643xx_eth.h>
L
Linus Torvalds 已提交
53 54 55
#include <asm/io.h>
#include <asm/types.h>
#include <asm/system.h>
56

57
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58
static char mv643xx_eth_driver_version[] = "1.4";
59

60 61 62 63

/*
 * Registers shared between all ports.
 */
64 65
#define PHY_ADDR			0x0000
#define SMI_REG				0x0004
66 67 68 69 70 71 72
#define  SMI_BUSY			0x10000000
#define  SMI_READ_VALID			0x08000000
#define  SMI_OPCODE_READ		0x04000000
#define  SMI_OPCODE_WRITE		0x00000000
#define ERR_INT_CAUSE			0x0080
#define  ERR_INT_SMI_DONE		0x00000010
#define ERR_INT_MASK			0x0084
73 74 75 76 77
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
78 79 80 81

/*
 * Per-port registers.
 */
82
#define PORT_CONFIG(p)			(0x0400 + ((p) << 10))
83
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
84 85 86 87 88 89
#define PORT_CONFIG_EXT(p)		(0x0404 + ((p) << 10))
#define MAC_ADDR_LOW(p)			(0x0414 + ((p) << 10))
#define MAC_ADDR_HIGH(p)		(0x0418 + ((p) << 10))
#define SDMA_CONFIG(p)			(0x041c + ((p) << 10))
#define PORT_SERIAL_CONTROL(p)		(0x043c + ((p) << 10))
#define PORT_STATUS(p)			(0x0444 + ((p) << 10))
90
#define  TX_FIFO_EMPTY			0x00000400
91
#define  TX_IN_PROGRESS			0x00000080
92 93 94 95 96 97
#define  PORT_SPEED_MASK		0x00000030
#define  PORT_SPEED_1000		0x00000010
#define  PORT_SPEED_100			0x00000020
#define  PORT_SPEED_10			0x00000000
#define  FLOW_CONTROL_ENABLED		0x00000008
#define  FULL_DUPLEX			0x00000004
98
#define  LINK_UP			0x00000002
99
#define TXQ_COMMAND(p)			(0x0448 + ((p) << 10))
100 101
#define TXQ_FIX_PRIO_CONF(p)		(0x044c + ((p) << 10))
#define TX_BW_RATE(p)			(0x0450 + ((p) << 10))
102
#define TX_BW_MTU(p)			(0x0458 + ((p) << 10))
103
#define TX_BW_BURST(p)			(0x045c + ((p) << 10))
104
#define INT_CAUSE(p)			(0x0460 + ((p) << 10))
105
#define  INT_TX_END			0x07f80000
106
#define  INT_RX				0x000003fc
107
#define  INT_EXT			0x00000002
108
#define INT_CAUSE_EXT(p)		(0x0464 + ((p) << 10))
109 110
#define  INT_EXT_LINK_PHY		0x00110000
#define  INT_EXT_TX			0x000000ff
111 112 113
#define INT_MASK(p)			(0x0468 + ((p) << 10))
#define INT_MASK_EXT(p)			(0x046c + ((p) << 10))
#define TX_FIFO_URGENT_THRESHOLD(p)	(0x0474 + ((p) << 10))
114 115 116 117
#define TXQ_FIX_PRIO_CONF_MOVED(p)	(0x04dc + ((p) << 10))
#define TX_BW_RATE_MOVED(p)		(0x04e0 + ((p) << 10))
#define TX_BW_MTU_MOVED(p)		(0x04e8 + ((p) << 10))
#define TX_BW_BURST_MOVED(p)		(0x04ec + ((p) << 10))
118
#define RXQ_CURRENT_DESC_PTR(p, q)	(0x060c + ((p) << 10) + ((q) << 4))
119
#define RXQ_COMMAND(p)			(0x0680 + ((p) << 10))
120 121 122 123
#define TXQ_CURRENT_DESC_PTR(p, q)	(0x06c0 + ((p) << 10) + ((q) << 2))
#define TXQ_BW_TOKENS(p, q)		(0x0700 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_CONF(p, q)		(0x0704 + ((p) << 10) + ((q) << 4))
#define TXQ_BW_WRR_CONF(p, q)		(0x0708 + ((p) << 10) + ((q) << 4))
124 125 126 127
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
128

129 130 131 132

/*
 * SDMA configuration register.
 */
133
#define RX_BURST_SIZE_16_64BIT		(4 << 1)
134 135
#define BLM_RX_NO_SWAP			(1 << 4)
#define BLM_TX_NO_SWAP			(1 << 5)
136
#define TX_BURST_SIZE_16_64BIT		(4 << 22)
137 138 139

#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
140 141
		RX_BURST_SIZE_16_64BIT	|	\
		TX_BURST_SIZE_16_64BIT
142 143
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
144
		RX_BURST_SIZE_16_64BIT	|	\
145 146
		BLM_RX_NO_SWAP		|	\
		BLM_TX_NO_SWAP		|	\
147
		TX_BURST_SIZE_16_64BIT
148 149 150 151
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

152 153 154 155 156 157 158

/*
 * Port serial control register.
 */
#define SET_MII_SPEED_TO_100			(1 << 24)
#define SET_GMII_SPEED_TO_1000			(1 << 23)
#define SET_FULL_DUPLEX_MODE			(1 << 21)
159
#define MAX_RX_PACKET_9700BYTE			(5 << 17)
160 161 162 163 164 165 166
#define DISABLE_AUTO_NEG_SPEED_GMII		(1 << 13)
#define DO_NOT_FORCE_LINK_FAIL			(1 << 10)
#define SERIAL_PORT_CONTROL_RESERVED		(1 << 9)
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL		(1 << 3)
#define DISABLE_AUTO_NEG_FOR_DUPLEX		(1 << 2)
#define FORCE_LINK_PASS				(1 << 1)
#define SERIAL_PORT_ENABLE			(1 << 0)
167

168 169
#define DEFAULT_RX_QUEUE_SIZE		128
#define DEFAULT_TX_QUEUE_SIZE		256
170 171


172 173
/*
 * RX/TX descriptors.
174 175
 */
#if defined(__BIG_ENDIAN)
176
struct rx_desc {
177 178 179 180 181 182 183
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

184
struct tx_desc {
185 186 187 188 189 190 191
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
192
struct rx_desc {
193 194 195 196 197 198 199
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

200
struct tx_desc {
201 202 203 204 205 206 207 208 209 210
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

211
/* RX & TX descriptor command */
212
#define BUFFER_OWNED_BY_DMA		0x80000000
213 214

/* RX & TX descriptor status */
215
#define ERROR_SUMMARY			0x00000001
216 217

/* RX descriptor status */
218 219 220 221
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
222 223

/* TX descriptor command */
224 225 226 227 228 229 230 231
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
232 233
#define MAC_HDR_EXTRA_4_BYTES		0x00008000
#define MAC_HDR_EXTRA_8_BYTES		0x00000200
234

235
#define TX_IHL_SHIFT			11
236 237


238
/* global *******************************************************************/
239
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
240 241 242
	/*
	 * Ethernet controller base address.
	 */
243
	void __iomem *base;
244

245 246 247 248 249
	/*
	 * Points at the right SMI instance to use.
	 */
	struct mv643xx_eth_shared_private *smi;

L
Lennert Buytenhek 已提交
250
	/*
251
	 * Provides access to local SMI interface.
L
Lennert Buytenhek 已提交
252
	 */
253
	struct mii_bus smi_bus;
254

255 256 257 258 259 260 261 262 263
	/*
	 * If we have access to the error interrupt pin (which is
	 * somewhat misnamed as it not only reflects internal errors
	 * but also reflects SMI completion), use that to wait for
	 * SMI access completion instead of polling the SMI busy bit.
	 */
	int err_interrupt;
	wait_queue_head_t smi_busy_wait;

L
Lennert Buytenhek 已提交
264 265 266
	/*
	 * Per-port MBUS window access register value.
	 */
267 268
	u32 win_protect;

L
Lennert Buytenhek 已提交
269 270 271
	/*
	 * Hardware-specific parameters.
	 */
272
	unsigned int t_clk;
273
	int extended_rx_coal_limit;
274
	int tx_bw_control;
275 276
};

277 278 279 280
#define TX_BW_CONTROL_ABSENT		0
#define TX_BW_CONTROL_OLD_LAYOUT	1
#define TX_BW_CONTROL_NEW_LAYOUT	2

281 282

/* per-port *****************************************************************/
283
struct mib_counters {
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
};

316
struct rx_queue {
317 318
	int index;

319 320 321 322 323 324 325 326 327 328 329 330
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;
};

331
struct tx_queue {
332 333
	int index;

334
	int tx_ring_size;
335

336 337 338
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
339

340
	struct tx_desc *tx_desc_area;
341 342
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
343 344

	struct sk_buff_head tx_skb;
345 346 347 348

	unsigned long tx_packets;
	unsigned long tx_bytes;
	unsigned long tx_dropped;
349 350 351 352
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
L
Lennert Buytenhek 已提交
353
	int port_num;
354

L
Lennert Buytenhek 已提交
355
	struct net_device *dev;
356

357
	struct phy_device *phy;
358

359 360
	struct timer_list mib_counters_timer;
	spinlock_t mib_counters_lock;
L
Lennert Buytenhek 已提交
361
	struct mib_counters mib_counters;
362

L
Lennert Buytenhek 已提交
363
	struct work_struct tx_timeout_task;
364

365 366 367 368 369 370 371 372
	struct napi_struct napi;
	u8 work_link;
	u8 work_tx;
	u8 work_tx_end;
	u8 work_rx;
	u8 work_rx_refill;
	u8 work_rx_oom;

373 374 375
	int skb_size;
	struct sk_buff_head rx_recycle;

376 377 378 379 380 381
	/*
	 * RX state.
	 */
	int default_rx_ring_size;
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
382
	int rxq_count;
383
	struct timer_list rx_oom;
384
	struct rx_queue rxq[8];
385 386 387 388 389 390 391

	/*
	 * TX state.
	 */
	int default_tx_ring_size;
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
392
	int txq_count;
393
	struct tx_queue txq[8];
394
};
L
Linus Torvalds 已提交
395

396

397
/* port register accessors **************************************************/
398
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
399
{
400
	return readl(mp->shared->base + offset);
401
}
402

403
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
404
{
405
	writel(data, mp->shared->base + offset);
406
}
407 408


409
/* rxq/txq helper functions *************************************************/
410
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
411
{
412
	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
413
}
414

415 416
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
417
	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
418 419
}

420
static void rxq_enable(struct rx_queue *rxq)
421
{
422
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
423
	wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
424
}
L
Linus Torvalds 已提交
425

426 427 428
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
429
	u8 mask = 1 << rxq->index;
L
Linus Torvalds 已提交
430

431 432 433
	wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
434 435
}

436 437 438 439 440 441 442 443 444 445 446
static void txq_reset_hw_ptr(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
	u32 addr;

	addr = (u32)txq->tx_desc_dma;
	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
	wrl(mp, off, addr);
}

447
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
448
{
449
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
450
	wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index);
L
Linus Torvalds 已提交
451 452
}

453
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
454
{
455
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
456
	u8 mask = 1 << txq->index;
457

458 459 460 461 462
	wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8);
	while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask)
		udelay(10);
}

463
static void txq_maybe_wake(struct tx_queue *txq)
464 465
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
466
	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
467

468 469 470 471 472 473
	if (netif_tx_queue_stopped(nq)) {
		__netif_tx_lock(nq, smp_processor_id());
		if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
			netif_tx_wake_queue(nq);
		__netif_tx_unlock(nq);
	}
L
Linus Torvalds 已提交
474 475
}

476

477
/* rx napi ******************************************************************/
478
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
479
{
480 481 482
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
483

484
	rx = 0;
485
	while (rx < budget && rxq->rx_desc_count) {
L
Lennert Buytenhek 已提交
486
		struct rx_desc *rx_desc;
487
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
488
		struct sk_buff *skb;
489
		u16 byte_cnt;
490

491
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
492

493
		cmd_sts = rx_desc->cmd_sts;
494
		if (cmd_sts & BUFFER_OWNED_BY_DMA)
495 496
			break;
		rmb();
L
Linus Torvalds 已提交
497

498 499
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
500

501 502 503
		rxq->rx_curr_desc++;
		if (rxq->rx_curr_desc == rxq->rx_ring_size)
			rxq->rx_curr_desc = 0;
504

505
		dma_unmap_single(NULL, rx_desc->buf_ptr,
506
				 rx_desc->buf_size, DMA_FROM_DEVICE);
507 508
		rxq->rx_desc_count--;
		rx++;
509

510 511
		mp->work_rx_refill |= 1 << rxq->index;

512 513
		byte_cnt = rx_desc->byte_cnt;

514 515
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
516 517 518 519 520
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
521
		 */
L
Linus Torvalds 已提交
522
		stats->rx_packets++;
523
		stats->rx_bytes += byte_cnt - 2;
524

L
Linus Torvalds 已提交
525
		/*
L
Lennert Buytenhek 已提交
526 527 528
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
529
		 */
530
		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
531
					(RX_FIRST_DESC | RX_LAST_DESC))
532
				|| (cmd_sts & ERROR_SUMMARY)) {
L
Linus Torvalds 已提交
533
			stats->rx_dropped++;
L
Lennert Buytenhek 已提交
534

535
			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
536
				(RX_FIRST_DESC | RX_LAST_DESC)) {
L
Linus Torvalds 已提交
537
				if (net_ratelimit())
L
Lennert Buytenhek 已提交
538 539 540
					dev_printk(KERN_ERR, &mp->dev->dev,
						   "received packet spanning "
						   "multiple descriptors\n");
L
Linus Torvalds 已提交
541
			}
L
Lennert Buytenhek 已提交
542

543
			if (cmd_sts & ERROR_SUMMARY)
L
Linus Torvalds 已提交
544 545
				stats->rx_errors++;

546
			dev_kfree_skb(skb);
L
Linus Torvalds 已提交
547 548 549 550 551
		} else {
			/*
			 * The -4 is for the CRC in the trailer of the
			 * received packet
			 */
552
			skb_put(skb, byte_cnt - 2 - 4);
L
Linus Torvalds 已提交
553

554
			if (cmd_sts & LAYER_4_CHECKSUM_OK)
L
Linus Torvalds 已提交
555
				skb->ip_summed = CHECKSUM_UNNECESSARY;
556
			skb->protocol = eth_type_trans(skb, mp->dev);
L
Linus Torvalds 已提交
557 558
			netif_receive_skb(skb);
		}
L
Lennert Buytenhek 已提交
559

560
		mp->dev->last_rx = jiffies;
L
Linus Torvalds 已提交
561
	}
L
Lennert Buytenhek 已提交
562

563 564 565
	if (rx < budget)
		mp->work_rx &= ~(1 << rxq->index);

566
	return rx;
L
Linus Torvalds 已提交
567 568
}

569
static int rxq_refill(struct rx_queue *rxq, int budget)
570
{
571 572
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int refilled;
573

574 575 576 577 578
	refilled = 0;
	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
		struct sk_buff *skb;
		int unaligned;
		int rx;
579

580 581 582 583 584
		skb = __skb_dequeue(&mp->rx_recycle);
		if (skb == NULL)
			skb = dev_alloc_skb(mp->skb_size +
					    dma_get_cache_alignment() - 1);

585 586 587 588
		if (skb == NULL) {
			mp->work_rx_oom |= 1 << rxq->index;
			goto oom;
		}
589

590 591 592
		unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
		if (unaligned)
			skb_reserve(skb, dma_get_cache_alignment() - unaligned);
593

594 595
		refilled++;
		rxq->rx_desc_count++;
596

597 598 599
		rx = rxq->rx_used_desc++;
		if (rxq->rx_used_desc == rxq->rx_ring_size)
			rxq->rx_used_desc = 0;
600

601
		rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
602 603
						mp->skb_size, DMA_FROM_DEVICE);
		rxq->rx_desc_area[rx].buf_size = mp->skb_size;
604 605 606 607 608
		rxq->rx_skb[rx] = skb;
		wmb();
		rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
						RX_ENABLE_INTERRUPT;
		wmb();
609

610 611 612 613 614 615 616 617 618 619 620 621 622
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
	}

	if (refilled < budget)
		mp->work_rx_refill &= ~(1 << rxq->index);

oom:
	return refilled;
623 624
}

625 626 627

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
628
{
629
	int frag;
L
Linus Torvalds 已提交
630

631
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
632 633
		skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
		if (fragp->size <= 8 && fragp->page_offset & 7)
634
			return 1;
L
Linus Torvalds 已提交
635
	}
636

637 638
	return 0;
}
639

640
static int txq_alloc_desc_index(struct tx_queue *txq)
641 642
{
	int tx_desc_curr;
643

644
	BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
L
Linus Torvalds 已提交
645

646 647 648
	tx_desc_curr = txq->tx_curr_desc++;
	if (txq->tx_curr_desc == txq->tx_ring_size)
		txq->tx_curr_desc = 0;
649

650
	BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
651

652 653
	return tx_desc_curr;
}
654

655
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
656
{
657
	int nr_frags = skb_shinfo(skb)->nr_frags;
658
	int frag;
L
Linus Torvalds 已提交
659

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;

		this_frag = &skb_shinfo(skb)->frags[frag];
		tx_index = txq_alloc_desc_index(txq);
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
		}

681 682 683 684 685 686 687
		desc->l4i_chk = 0;
		desc->byte_cnt = this_frag->size;
		desc->buf_ptr = dma_map_page(NULL, this_frag->page,
						this_frag->page_offset,
						this_frag->size,
						DMA_TO_DEVICE);
	}
L
Linus Torvalds 已提交
688 689
}

690 691 692 693
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}
L
Linus Torvalds 已提交
694

695
static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
L
Linus Torvalds 已提交
696
{
697
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
698
	int nr_frags = skb_shinfo(skb)->nr_frags;
699
	int tx_index;
700
	struct tx_desc *desc;
701
	u32 cmd_sts;
702
	u16 l4i_chk;
703
	int length;
L
Linus Torvalds 已提交
704

705
	cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
706
	l4i_chk = 0;
707 708

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
709
		int tag_bytes;
710 711 712

		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
		       skb->protocol != htons(ETH_P_8021Q));
713

714 715 716 717 718 719 720
		tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
		if (unlikely(tag_bytes & ~12)) {
			if (skb_checksum_help(skb) == 0)
				goto no_csum;
			kfree_skb(skb);
			return 1;
		}
721

722
		if (tag_bytes & 4)
723
			cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
724
		if (tag_bytes & 8)
725
			cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
726 727 728 729

		cmd_sts |= GEN_TCP_UDP_CHECKSUM |
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;
730

731 732
		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
733
			cmd_sts |= UDP_FRAME;
734
			l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
735 736
			break;
		case IPPROTO_TCP:
737
			l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
738 739 740 741 742
			break;
		default:
			BUG();
		}
	} else {
743
no_csum:
744
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
745
		cmd_sts |= 5 << TX_IHL_SHIFT;
746 747
	}

748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
	tx_index = txq_alloc_desc_index(txq);
	desc = &txq->tx_desc_area[tx_index];

	if (nr_frags) {
		txq_submit_frag_skb(txq, skb);
		length = skb_headlen(skb);
	} else {
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
		length = skb->len;
	}

	desc->l4i_chk = l4i_chk;
	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);

763 764
	__skb_queue_tail(&txq->tx_skb, skb);

765 766 767 768
	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

769 770
	/* clear TX_END status */
	mp->work_tx_end &= ~(1 << txq->index);
771

772 773
	/* ensure all descriptors are written before poking hardware */
	wmb();
774
	txq_enable(txq);
775

776
	txq->tx_desc_count += nr_frags + 1;
777 778

	return 0;
L
Linus Torvalds 已提交
779 780
}

L
Lennert Buytenhek 已提交
781
static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
782
{
783
	struct mv643xx_eth_private *mp = netdev_priv(dev);
784
	int queue;
785
	struct tx_queue *txq;
786
	struct netdev_queue *nq;
787

788 789 790 791
	queue = skb_get_queue_mapping(skb);
	txq = mp->txq + queue;
	nq = netdev_get_tx_queue(dev, queue);

792
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
793
		txq->tx_dropped++;
L
Lennert Buytenhek 已提交
794 795 796
		dev_printk(KERN_DEBUG, &dev->dev,
			   "failed to linearize skb with tiny "
			   "unaligned fragment\n");
797 798 799
		return NETDEV_TX_BUSY;
	}

800
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
801 802
		if (net_ratelimit())
			dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
803 804
		kfree_skb(skb);
		return NETDEV_TX_OK;
805 806
	}

807 808 809 810 811 812
	if (!txq_submit_skb(txq, skb)) {
		int entries_left;

		txq->tx_bytes += skb->len;
		txq->tx_packets++;
		dev->trans_start = jiffies;
813

814 815 816 817
		entries_left = txq->tx_ring_size - txq->tx_desc_count;
		if (entries_left < MAX_SKB_FRAGS + 1)
			netif_tx_stop_queue(nq);
	}
818 819

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
820 821
}

822

823 824 825 826
/* tx napi ******************************************************************/
static void txq_kick(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
827
	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
828 829 830
	u32 hw_desc_ptr;
	u32 expected_ptr;

831
	__netif_tx_lock(nq, smp_processor_id());
832 833 834 835 836 837 838 839 840 841 842 843

	if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index))
		goto out;

	hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index));
	expected_ptr = (u32)txq->tx_desc_dma +
				txq->tx_curr_desc * sizeof(struct tx_desc);

	if (hw_desc_ptr != expected_ptr)
		txq_enable(txq);

out:
844
	__netif_tx_unlock(nq);
845 846 847 848 849 850 851

	mp->work_tx_end &= ~(1 << txq->index);
}

static int txq_reclaim(struct tx_queue *txq, int budget, int force)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
852
	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
853 854
	int reclaimed;

855
	__netif_tx_lock(nq, smp_processor_id());
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880

	reclaimed = 0;
	while (reclaimed < budget && txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;

		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
		cmd_sts = desc->cmd_sts;

		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			if (!force)
				break;
			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
		}

		txq->tx_used_desc = tx_index + 1;
		if (txq->tx_used_desc == txq->tx_ring_size)
			txq->tx_used_desc = 0;

		reclaimed++;
		txq->tx_desc_count--;

881 882 883
		skb = NULL;
		if (cmd_sts & TX_LAST_DESC)
			skb = __skb_dequeue(&txq->tx_skb);
884 885 886 887 888 889

		if (cmd_sts & ERROR_SUMMARY) {
			dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
			mp->dev->stats.tx_errors++;
		}

890 891 892 893 894 895 896
		if (cmd_sts & TX_FIRST_DESC) {
			dma_unmap_single(NULL, desc->buf_ptr,
					 desc->byte_cnt, DMA_TO_DEVICE);
		} else {
			dma_unmap_page(NULL, desc->buf_ptr,
				       desc->byte_cnt, DMA_TO_DEVICE);
		}
897

898 899 900 901 902 903 904 905
		if (skb != NULL) {
			if (skb_queue_len(&mp->rx_recycle) <
					mp->default_rx_ring_size &&
			    skb_recycle_check(skb, mp->skb_size))
				__skb_queue_head(&mp->rx_recycle, skb);
			else
				dev_kfree_skb(skb);
		}
906 907
	}

908 909
	__netif_tx_unlock(nq);

910 911 912 913 914 915 916
	if (reclaimed < budget)
		mp->work_tx &= ~(1 << txq->index);

	return reclaimed;
}


917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

940 941
	switch (mp->shared->tx_bw_control) {
	case TX_BW_CONTROL_OLD_LAYOUT:
942 943 944
		wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
945 946 947 948 949 950
		break;
	case TX_BW_CONTROL_NEW_LAYOUT:
		wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
		wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
		wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
		break;
951
	}
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

	token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

968 969
	wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14);
	wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index),
970 971 972 973 974 975 976 977 978 979 980 981
			(bucket_size << 10) | token_rate);
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
982 983 984
	off = 0;
	switch (mp->shared->tx_bw_control) {
	case TX_BW_CONTROL_OLD_LAYOUT:
985
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
986 987 988 989 990
		break;
	case TX_BW_CONTROL_NEW_LAYOUT:
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
		break;
	}
991

992 993 994 995 996
	if (off) {
		val = rdl(mp, off);
		val |= 1 << txq->index;
		wrl(mp, off, val);
	}
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
}

static void txq_set_wrr(struct tx_queue *txq, int weight)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn off fixed priority mode.
	 */
1008 1009 1010
	off = 0;
	switch (mp->shared->tx_bw_control) {
	case TX_BW_CONTROL_OLD_LAYOUT:
1011
		off = TXQ_FIX_PRIO_CONF(mp->port_num);
1012 1013 1014 1015 1016
		break;
	case TX_BW_CONTROL_NEW_LAYOUT:
		off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
		break;
	}
1017

1018 1019 1020 1021
	if (off) {
		val = rdl(mp, off);
		val &= ~(1 << txq->index);
		wrl(mp, off, val);
1022

1023 1024 1025 1026
		/*
		 * Configure WRR weight for this queue.
		 */
		off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
1027

1028 1029 1030 1031
		val = rdl(mp, off);
		val = (val & ~0xff) | (weight & 0xff);
		wrl(mp, off, val);
	}
1032 1033 1034
}


1035
/* mii management interface *************************************************/
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
{
	struct mv643xx_eth_shared_private *msp = dev_id;

	if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
		writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
		wake_up(&msp->smi_busy_wait);
		return IRQ_HANDLED;
	}

	return IRQ_NONE;
}
1048

1049
static int smi_is_done(struct mv643xx_eth_shared_private *msp)
L
Linus Torvalds 已提交
1050
{
1051 1052
	return !(readl(msp->base + SMI_REG) & SMI_BUSY);
}
L
Linus Torvalds 已提交
1053

1054 1055 1056 1057
static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
{
	if (msp->err_interrupt == NO_IRQ) {
		int i;
1058

1059 1060 1061 1062
		for (i = 0; !smi_is_done(msp); i++) {
			if (i == 10)
				return -ETIMEDOUT;
			msleep(10);
1063
		}
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074

		return 0;
	}

	if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
				msecs_to_jiffies(100)))
		return -ETIMEDOUT;

	return 0;
}

1075
static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1076
{
1077
	struct mv643xx_eth_shared_private *msp = bus->priv;
1078 1079 1080 1081
	void __iomem *smi_reg = msp->base + SMI_REG;
	int ret;

	if (smi_wait_ready(msp)) {
1082 1083
		printk("mv643xx_eth: SMI bus busy timeout\n");
		return -ETIMEDOUT;
L
Linus Torvalds 已提交
1084 1085
	}

L
Lennert Buytenhek 已提交
1086
	writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
L
Linus Torvalds 已提交
1087

1088
	if (smi_wait_ready(msp)) {
1089 1090
		printk("mv643xx_eth: SMI bus busy timeout\n");
		return -ETIMEDOUT;
1091 1092 1093 1094
	}

	ret = readl(smi_reg);
	if (!(ret & SMI_READ_VALID)) {
1095 1096
		printk("mv643xx_eth: SMI bus read not valid\n");
		return -ENODEV;
1097 1098
	}

1099
	return ret & 0xffff;
L
Linus Torvalds 已提交
1100 1101
}

1102
static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
L
Linus Torvalds 已提交
1103
{
1104
	struct mv643xx_eth_shared_private *msp = bus->priv;
1105
	void __iomem *smi_reg = msp->base + SMI_REG;
L
Linus Torvalds 已提交
1106

1107
	if (smi_wait_ready(msp)) {
1108
		printk("mv643xx_eth: SMI bus busy timeout\n");
1109
		return -ETIMEDOUT;
L
Linus Torvalds 已提交
1110 1111
	}

L
Lennert Buytenhek 已提交
1112
	writel(SMI_OPCODE_WRITE | (reg << 21) |
1113
		(addr << 16) | (val & 0xffff), smi_reg);
1114

1115 1116 1117 1118
	if (smi_wait_ready(msp)) {
		printk("mv643xx_eth: SMI bus busy timeout\n");
		return -ETIMEDOUT;
	}
1119 1120

	return 0;
1121
}
L
Linus Torvalds 已提交
1122

1123

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
/* statistics ***************************************************************/
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	unsigned long tx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_dropped = 0;
	int i;

	for (i = 0; i < mp->txq_count; i++) {
		struct tx_queue *txq = mp->txq + i;

		tx_packets += txq->tx_packets;
		tx_bytes += txq->tx_bytes;
		tx_dropped += txq->tx_dropped;
	}

	stats->tx_packets = tx_packets;
	stats->tx_bytes = tx_bytes;
	stats->tx_dropped = tx_dropped;

	return stats;
}

L
Lennert Buytenhek 已提交
1149
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1150
{
L
Lennert Buytenhek 已提交
1151
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
1152 1153
}

L
Lennert Buytenhek 已提交
1154
static void mib_counters_clear(struct mv643xx_eth_private *mp)
1155
{
L
Lennert Buytenhek 已提交
1156 1157 1158 1159
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
1160
}
1161

L
Lennert Buytenhek 已提交
1162
static void mib_counters_update(struct mv643xx_eth_private *mp)
1163
{
1164
	struct mib_counters *p = &mp->mib_counters;
1165

1166
	spin_lock(&mp->mib_counters_lock);
L
Lennert Buytenhek 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	p->good_octets_received += mib_read(mp, 0x00);
	p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	spin_unlock(&mp->mib_counters_lock);

	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}

static void mib_counters_timer_wrapper(unsigned long _mp)
{
	struct mv643xx_eth_private *mp = (void *)_mp;

	mib_counters_update(mp);
1209 1210
}

1211 1212

/* ethtool ******************************************************************/
1213
struct mv643xx_eth_stats {
1214 1215
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
1216 1217
	int netdev_off;
	int mp_off;
1218 1219
};

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1267 1268
};

1269
static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1270
{
1271
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1272 1273
	int err;

1274 1275 1276
	err = phy_read_status(mp->phy);
	if (err == 0)
		err = phy_ethtool_gset(mp->phy, cmd);
1277

L
Lennert Buytenhek 已提交
1278 1279 1280
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1281 1282 1283 1284 1285 1286
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

1287 1288
static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
1289 1290 1291 1292 1293
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_status;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));

1294 1295
	cmd->supported = SUPPORTED_MII;
	cmd->advertising = ADVERTISED_MII;
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		cmd->speed = SPEED_10;
		break;
	case PORT_SPEED_100:
		cmd->speed = SPEED_100;
		break;
	case PORT_SPEED_1000:
		cmd->speed = SPEED_1000;
		break;
	default:
		cmd->speed = -1;
		break;
	}
	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	cmd->port = PORT_MII;
	cmd->phy_address = 0;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->autoneg = AUTONEG_DISABLE;
	cmd->maxtxpkt = 1;
	cmd->maxrxpkt = 1;

	return 0;
}

1321
static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1322
{
1323
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1324

L
Lennert Buytenhek 已提交
1325 1326 1327 1328 1329
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1330
	return phy_ethtool_sset(mp->phy, cmd);
1331
}
L
Linus Torvalds 已提交
1332

1333 1334 1335 1336 1337
static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
{
	return -EINVAL;
}

L
Lennert Buytenhek 已提交
1338 1339
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1340
{
1341 1342
	strncpy(drvinfo->driver,  mv643xx_eth_driver_name, 32);
	strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
1343
	strncpy(drvinfo->fw_version, "N/A", 32);
L
Lennert Buytenhek 已提交
1344
	strncpy(drvinfo->bus_info, "platform", 32);
1345
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1346
}
L
Linus Torvalds 已提交
1347

L
Lennert Buytenhek 已提交
1348
static int mv643xx_eth_nway_reset(struct net_device *dev)
1349
{
1350
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1351

1352
	return genphy_restart_aneg(mp->phy);
1353
}
L
Linus Torvalds 已提交
1354

1355 1356 1357 1358 1359
static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
{
	return -EINVAL;
}

1360 1361
static u32 mv643xx_eth_get_link(struct net_device *dev)
{
1362
	return !!netif_carrier_ok(dev);
1363 1364
}

L
Lennert Buytenhek 已提交
1365 1366
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1367 1368
{
	int i;
L
Linus Torvalds 已提交
1369

L
Lennert Buytenhek 已提交
1370 1371
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1372
			memcpy(data + i * ETH_GSTRING_LEN,
1373
				mv643xx_eth_stats[i].stat_string,
1374
				ETH_GSTRING_LEN);
1375 1376 1377
		}
	}
}
L
Linus Torvalds 已提交
1378

L
Lennert Buytenhek 已提交
1379 1380 1381
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1382
{
1383
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1384
	int i;
L
Linus Torvalds 已提交
1385

1386
	mv643xx_eth_get_stats(dev);
L
Lennert Buytenhek 已提交
1387
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1388

1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1402
	}
1403
}
L
Linus Torvalds 已提交
1404

L
Lennert Buytenhek 已提交
1405
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1406
{
L
Lennert Buytenhek 已提交
1407
	if (sset == ETH_SS_STATS)
1408
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1409 1410

	return -EOPNOTSUPP;
1411
}
L
Linus Torvalds 已提交
1412

1413
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1414 1415 1416 1417 1418
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
	.get_link		= mv643xx_eth_get_link,
1419
	.set_sg			= ethtool_op_set_sg,
L
Lennert Buytenhek 已提交
1420 1421
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1422
	.get_sset_count		= mv643xx_eth_get_sset_count,
1423
};
L
Linus Torvalds 已提交
1424

1425 1426 1427 1428 1429
static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
	.get_settings		= mv643xx_eth_get_settings_phyless,
	.set_settings		= mv643xx_eth_set_settings_phyless,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset_phyless,
1430
	.get_link		= mv643xx_eth_get_link,
1431 1432 1433 1434 1435 1436
	.set_sg			= ethtool_op_set_sg,
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
	.get_sset_count		= mv643xx_eth_get_sset_count,
};

1437

1438
/* address handling *********************************************************/
1439
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1440 1441 1442
{
	unsigned int mac_h;
	unsigned int mac_l;
L
Linus Torvalds 已提交
1443

L
Lennert Buytenhek 已提交
1444 1445
	mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
	mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
L
Linus Torvalds 已提交
1446

1447 1448 1449 1450 1451 1452
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1453
}
L
Linus Torvalds 已提交
1454

1455
static void init_mac_tables(struct mv643xx_eth_private *mp)
1456
{
L
Lennert Buytenhek 已提交
1457
	int i;
L
Linus Torvalds 已提交
1458

L
Lennert Buytenhek 已提交
1459 1460 1461
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1462
	}
L
Lennert Buytenhek 已提交
1463 1464 1465

	for (i = 0; i < 0x10; i += 4)
		wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1466
}
1467

1468
static void set_filter_table_entry(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1469
				   int table, unsigned char entry)
1470 1471
{
	unsigned int table_reg;
1472

1473
	/* Set "accepts frame bit" at specified table entry */
L
Lennert Buytenhek 已提交
1474 1475 1476
	table_reg = rdl(mp, table + (entry & 0xfc));
	table_reg |= 0x01 << (8 * (entry & 3));
	wrl(mp, table + (entry & 0xfc), table_reg);
L
Linus Torvalds 已提交
1477 1478
}

1479
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
L
Linus Torvalds 已提交
1480
{
1481 1482 1483
	unsigned int mac_h;
	unsigned int mac_l;
	int table;
L
Linus Torvalds 已提交
1484

L
Lennert Buytenhek 已提交
1485 1486
	mac_l = (addr[4] << 8) | addr[5];
	mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1487

L
Lennert Buytenhek 已提交
1488 1489
	wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
	wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
L
Linus Torvalds 已提交
1490

L
Lennert Buytenhek 已提交
1491
	table = UNICAST_TABLE(mp->port_num);
1492
	set_filter_table_entry(mp, table, addr[5] & 0x0f);
L
Linus Torvalds 已提交
1493 1494
}

L
Lennert Buytenhek 已提交
1495
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
L
Linus Torvalds 已提交
1496
{
1497
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1498

L
Lennert Buytenhek 已提交
1499 1500 1501
	/* +2 is for the offset of the HW addr type */
	memcpy(dev->dev_addr, addr + 2, 6);

1502 1503
	init_mac_tables(mp);
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1504 1505 1506 1507

	return 0;
}

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

L
Lennert Buytenhek 已提交
1526
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
L
Linus Torvalds 已提交
1527
{
L
Lennert Buytenhek 已提交
1528 1529 1530 1531
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	u32 port_config;
	struct dev_addr_list *addr;
	int i;
1532

L
Lennert Buytenhek 已提交
1533 1534 1535 1536 1537 1538
	port_config = rdl(mp, PORT_CONFIG(mp->port_num));
	if (dev->flags & IFF_PROMISC)
		port_config |= UNICAST_PROMISCUOUS_MODE;
	else
		port_config &= ~UNICAST_PROMISCUOUS_MODE;
	wrl(mp, PORT_CONFIG(mp->port_num), port_config);
L
Linus Torvalds 已提交
1539

L
Lennert Buytenhek 已提交
1540 1541 1542
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
		int port_num = mp->port_num;
		u32 accept = 0x01010101;
1543

L
Lennert Buytenhek 已提交
1544 1545 1546
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1547 1548 1549
		}
		return;
	}
1550

L
Lennert Buytenhek 已提交
1551 1552 1553
	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
L
Linus Torvalds 已提交
1554 1555
	}

L
Lennert Buytenhek 已提交
1556 1557 1558
	for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
		u8 *a = addr->da_addr;
		int table;
1559

L
Lennert Buytenhek 已提交
1560 1561
		if (addr->da_addrlen != 6)
			continue;
L
Linus Torvalds 已提交
1562

L
Lennert Buytenhek 已提交
1563 1564 1565 1566 1567
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
			table = SPECIAL_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, a[5]);
		} else {
			int crc = addr_crc(a);
L
Linus Torvalds 已提交
1568

L
Lennert Buytenhek 已提交
1569 1570 1571 1572
			table = OTHER_MCAST_TABLE(mp->port_num);
			set_filter_table_entry(mp, table, crc);
		}
	}
1573
}
1574 1575


1576
/* rx/tx queue initialisation ***********************************************/
1577
static int rxq_init(struct mv643xx_eth_private *mp, int index)
1578
{
1579
	struct rx_queue *rxq = mp->rxq + index;
1580 1581
	struct rx_desc *rx_desc;
	int size;
1582 1583
	int i;

1584 1585
	rxq->index = index;

1586 1587 1588 1589 1590 1591 1592 1593
	rxq->rx_ring_size = mp->default_rx_ring_size;

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

1594
	if (index == 0 && size <= mp->rx_desc_sram_size) {
1595 1596 1597 1598 1599 1600 1601
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
		rxq->rx_desc_area = dma_alloc_coherent(NULL, size,
							&rxq->rx_desc_dma,
							GFP_KERNEL);
1602 1603
	}

1604 1605 1606 1607 1608 1609
	if (rxq->rx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1610

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	rxq->rx_desc_area_size = size;
	rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
								GFP_KERNEL);
	if (rxq->rx_skb == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate rx skb ring\n");
		goto out_free;
	}

	rx_desc = (struct rx_desc *)rxq->rx_desc_area;
	for (i = 0; i < rxq->rx_ring_size; i++) {
1622 1623 1624 1625 1626 1627
		int nexti;

		nexti = i + 1;
		if (nexti == rxq->rx_ring_size)
			nexti = 0;

1628 1629 1630 1631 1632 1633 1634 1635
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	return 0;


out_free:
1636
	if (index == 0 && size <= mp->rx_desc_sram_size)
1637 1638 1639 1640 1641 1642 1643 1644
		iounmap(rxq->rx_desc_area);
	else
		dma_free_coherent(NULL, size,
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1645
}
1646

1647
static void rxq_deinit(struct rx_queue *rxq)
1648
{
1649 1650 1651 1652
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1653

1654 1655 1656 1657
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1658
		}
1659
	}
L
Linus Torvalds 已提交
1660

1661 1662 1663 1664 1665 1666
	if (rxq->rx_desc_count) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "error freeing rx ring -- %d skbs stuck\n",
			   rxq->rx_desc_count);
	}

1667
	if (rxq->index == 0 &&
1668
	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1669
		iounmap(rxq->rx_desc_area);
1670
	else
1671 1672 1673 1674
		dma_free_coherent(NULL, rxq->rx_desc_area_size,
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1675
}
L
Linus Torvalds 已提交
1676

1677
static int txq_init(struct mv643xx_eth_private *mp, int index)
1678
{
1679
	struct tx_queue *txq = mp->txq + index;
1680 1681
	struct tx_desc *tx_desc;
	int size;
1682
	int i;
L
Linus Torvalds 已提交
1683

1684 1685
	txq->index = index;

1686 1687 1688 1689 1690 1691 1692 1693
	txq->tx_ring_size = mp->default_tx_ring_size;

	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

1694
	if (index == 0 && size <= mp->tx_desc_sram_size) {
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
		txq->tx_desc_area = dma_alloc_coherent(NULL, size,
							&txq->tx_desc_dma,
							GFP_KERNEL);
	}

	if (txq->tx_desc_area == NULL) {
		dev_printk(KERN_ERR, &mp->dev->dev,
			   "can't allocate tx ring (%d bytes)\n", size);
1707
		return -ENOMEM;
1708
	}
1709 1710 1711 1712 1713 1714
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;

	tx_desc = (struct tx_desc *)txq->tx_desc_area;
	for (i = 0; i < txq->tx_ring_size; i++) {
1715
		struct tx_desc *txd = tx_desc + i;
1716 1717 1718 1719 1720
		int nexti;

		nexti = i + 1;
		if (nexti == txq->tx_ring_size)
			nexti = 0;
1721 1722 1723

		txd->cmd_sts = 0;
		txd->next_desc_ptr = txq->tx_desc_dma +
1724 1725 1726
					nexti * sizeof(struct tx_desc);
	}

1727
	skb_queue_head_init(&txq->tx_skb);
1728

1729
	return 0;
1730
}
L
Linus Torvalds 已提交
1731

1732
static void txq_deinit(struct tx_queue *txq)
1733
{
1734
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1735

1736
	txq_disable(txq);
1737
	txq_reclaim(txq, txq->tx_ring_size, 1);
L
Linus Torvalds 已提交
1738

1739
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
1740

1741
	if (txq->index == 0 &&
1742
	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1743
		iounmap(txq->tx_desc_area);
1744
	else
1745 1746
		dma_free_coherent(NULL, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);
1747
}
L
Linus Torvalds 已提交
1748 1749


1750
/* netdev ops and related ***************************************************/
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
{
	u32 int_cause;
	u32 int_cause_ext;

	int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
			(INT_TX_END | INT_RX | INT_EXT);
	if (int_cause == 0)
		return 0;

	int_cause_ext = 0;
	if (int_cause & INT_EXT)
		int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num));

	int_cause &= INT_TX_END | INT_RX;
	if (int_cause) {
		wrl(mp, INT_CAUSE(mp->port_num), ~int_cause);
		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
				~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff);
		mp->work_rx |= (int_cause & INT_RX) >> 2;
	}

	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
	if (int_cause_ext) {
		wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
		if (int_cause_ext & INT_EXT_LINK_PHY)
			mp->work_link = 1;
		mp->work_tx |= int_cause_ext & INT_EXT_TX;
	}

	return 1;
}

static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	if (unlikely(!mv643xx_eth_collect_events(mp)))
		return IRQ_NONE;

	wrl(mp, INT_MASK(mp->port_num), 0);
	napi_schedule(&mp->napi);

	return IRQ_HANDLED;
}

1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
static void handle_link_event(struct mv643xx_eth_private *mp)
{
	struct net_device *dev = mp->dev;
	u32 port_status;
	int speed;
	int duplex;
	int fc;

	port_status = rdl(mp, PORT_STATUS(mp->port_num));
	if (!(port_status & LINK_UP)) {
		if (netif_carrier_ok(dev)) {
			int i;

			printk(KERN_INFO "%s: link down\n", dev->name);

			netif_carrier_off(dev);

1815
			for (i = 0; i < mp->txq_count; i++) {
1816 1817
				struct tx_queue *txq = mp->txq + i;

1818
				txq_reclaim(txq, txq->tx_ring_size, 1);
1819
				txq_reset_hw_ptr(txq);
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
			}
		}
		return;
	}

	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		speed = 10;
		break;
	case PORT_SPEED_100:
		speed = 100;
		break;
	case PORT_SPEED_1000:
		speed = 1000;
		break;
	default:
		speed = -1;
		break;
	}
	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;

	printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
			 "flow control %sabled\n", dev->name,
			 speed, duplex ? "full" : "half",
			 fc ? "en" : "dis");

1847
	if (!netif_carrier_ok(dev))
1848 1849 1850
		netif_carrier_on(dev);
}

1851
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1852
{
1853 1854
	struct mv643xx_eth_private *mp;
	int work_done;
1855

1856
	mp = container_of(napi, struct mv643xx_eth_private, napi);
L
Lennert Buytenhek 已提交
1857

1858 1859
	mp->work_rx_refill |= mp->work_rx_oom;
	mp->work_rx_oom = 0;
L
Linus Torvalds 已提交
1860

1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
	work_done = 0;
	while (work_done < budget) {
		u8 queue_mask;
		int queue;
		int work_tbd;

		if (mp->work_link) {
			mp->work_link = 0;
			handle_link_event(mp);
			continue;
		}
L
Linus Torvalds 已提交
1872

1873 1874 1875 1876 1877 1878 1879
		queue_mask = mp->work_tx | mp->work_tx_end |
				mp->work_rx | mp->work_rx_refill;
		if (!queue_mask) {
			if (mv643xx_eth_collect_events(mp))
				continue;
			break;
		}
L
Linus Torvalds 已提交
1880

1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
		queue = fls(queue_mask) - 1;
		queue_mask = 1 << queue;

		work_tbd = budget - work_done;
		if (work_tbd > 16)
			work_tbd = 16;

		if (mp->work_tx_end & queue_mask) {
			txq_kick(mp->txq + queue);
		} else if (mp->work_tx & queue_mask) {
			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
			txq_maybe_wake(mp->txq + queue);
		} else if (mp->work_rx & queue_mask) {
			work_done += rxq_process(mp->rxq + queue, work_tbd);
		} else if (mp->work_rx_refill & queue_mask) {
			work_done += rxq_refill(mp->rxq + queue, work_tbd);
		} else {
			BUG();
		}
1900
	}
L
Lennert Buytenhek 已提交
1901

1902 1903 1904 1905 1906
	if (work_done < budget) {
		if (mp->work_rx_oom)
			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
		napi_complete(napi);
		wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
1907
	}
1908

1909 1910
	return work_done;
}
1911

1912 1913 1914
static inline void oom_timer_wrapper(unsigned long data)
{
	struct mv643xx_eth_private *mp = (void *)data;
L
Linus Torvalds 已提交
1915

1916
	napi_schedule(&mp->napi);
L
Linus Torvalds 已提交
1917 1918
}

1919
static void phy_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1920
{
1921 1922
	int data;

1923
	data = phy_read(mp->phy, MII_BMCR);
1924 1925
	if (data < 0)
		return;
L
Linus Torvalds 已提交
1926

1927
	data |= BMCR_RESET;
1928
	if (phy_write(mp->phy, MII_BMCR, data) < 0)
1929
		return;
L
Linus Torvalds 已提交
1930

1931
	do {
1932
		data = phy_read(mp->phy, MII_BMCR);
1933
	} while (data >= 0 && data & BMCR_RESET);
L
Linus Torvalds 已提交
1934 1935
}

L
Lennert Buytenhek 已提交
1936
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
1937
{
1938
	u32 pscr;
1939
	int i;
L
Linus Torvalds 已提交
1940

1941 1942 1943
	/*
	 * Perform PHY reset, if there is a PHY.
	 */
1944
	if (mp->phy != NULL) {
1945 1946 1947 1948 1949 1950
		struct ethtool_cmd cmd;

		mv643xx_eth_get_settings(mp->dev, &cmd);
		phy_reset(mp);
		mv643xx_eth_set_settings(mp->dev, &cmd);
	}
L
Linus Torvalds 已提交
1951

1952 1953 1954 1955 1956 1957 1958 1959 1960
	/*
	 * Configure basic link parameters.
	 */
	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));

	pscr |= SERIAL_PORT_ENABLE;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	pscr |= DO_NOT_FORCE_LINK_FAIL;
1961
	if (mp->phy == NULL)
1962 1963 1964 1965 1966
		pscr |= FORCE_LINK_PASS;
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);

	wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);

1967 1968 1969
	/*
	 * Configure TX path and queues.
	 */
1970
	tx_set_rate(mp, 1000000000, 16777216);
1971
	for (i = 0; i < mp->txq_count; i++) {
1972
		struct tx_queue *txq = mp->txq + i;
1973

1974
		txq_reset_hw_ptr(txq);
1975 1976
		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
1977 1978
	}

L
Lennert Buytenhek 已提交
1979 1980 1981 1982
	/*
	 * Add configured unicast address to address filter table.
	 */
	uc_addr_set(mp, mp->dev->dev_addr);
L
Linus Torvalds 已提交
1983

1984 1985
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1986 1987
	 * frames to RX queue #0, and include the pseudo-header when
	 * calculating receive checksums.
1988
	 */
1989
	wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000);
1990

1991 1992 1993
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
1994
	wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1995

1996
	/*
1997
	 * Enable the receive queues.
1998
	 */
1999
	for (i = 0; i < mp->rxq_count; i++) {
2000 2001
		struct rx_queue *rxq = mp->rxq + i;
		int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
2002
		u32 addr;
L
Linus Torvalds 已提交
2003

2004 2005 2006
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
		wrl(mp, off, addr);
L
Linus Torvalds 已提交
2007

2008 2009
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
2010 2011
}

2012
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
2013
{
2014
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2015
	u32 val;
L
Linus Torvalds 已提交
2016

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
	val = rdl(mp, SDMA_CONFIG(mp->port_num));
	if (mp->shared->extended_rx_coal_limit) {
		if (coal > 0xffff)
			coal = 0xffff;
		val &= ~0x023fff80;
		val |= (coal & 0x8000) << 10;
		val |= (coal & 0x7fff) << 7;
	} else {
		if (coal > 0x3fff)
			coal = 0x3fff;
		val &= ~0x003fff00;
		val |= (coal & 0x3fff) << 8;
	}
	wrl(mp, SDMA_CONFIG(mp->port_num), val);
L
Linus Torvalds 已提交
2031 2032
}

2033
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
L
Linus Torvalds 已提交
2034
{
2035
	unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
L
Linus Torvalds 已提交
2036

L
Lennert Buytenhek 已提交
2037 2038 2039
	if (coal > 0x3fff)
		coal = 0x3fff;
	wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
2040 2041
}

2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
{
	int skb_size;

	/*
	 * Reserve 2+14 bytes for an ethernet header (the hardware
	 * automatically prepends 2 bytes of dummy data to each
	 * received packet), 16 bytes for up to four VLAN tags, and
	 * 4 bytes for the trailing FCS -- 36 bytes total.
	 */
	skb_size = mp->dev->mtu + 36;

	/*
	 * Make sure that the skb size is a multiple of 8 bytes, as
	 * the lower three bits of the receive descriptor's buffer
	 * size field are ignored by the hardware.
	 */
	mp->skb_size = (skb_size + 7) & ~7;
}

2062
static int mv643xx_eth_open(struct net_device *dev)
2063
{
2064
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2065
	int err;
2066
	int i;
2067

L
Lennert Buytenhek 已提交
2068 2069 2070
	wrl(mp, INT_CAUSE(mp->port_num), 0);
	wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
	rdl(mp, INT_CAUSE_EXT(mp->port_num));
2071

L
Lennert Buytenhek 已提交
2072
	err = request_irq(dev->irq, mv643xx_eth_irq,
2073
			  IRQF_SHARED, dev->name, dev);
2074
	if (err) {
L
Lennert Buytenhek 已提交
2075
		dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2076
		return -EAGAIN;
2077 2078
	}

L
Lennert Buytenhek 已提交
2079
	init_mac_tables(mp);
2080

2081 2082
	mv643xx_eth_recalc_skb_size(mp);

2083 2084
	napi_enable(&mp->napi);

2085 2086
	skb_queue_head_init(&mp->rx_recycle);

2087
	for (i = 0; i < mp->rxq_count; i++) {
2088 2089 2090
		err = rxq_init(mp, i);
		if (err) {
			while (--i >= 0)
2091
				rxq_deinit(mp->rxq + i);
2092 2093 2094
			goto out;
		}

2095
		rxq_refill(mp->rxq + i, INT_MAX);
2096 2097
	}

2098
	if (mp->work_rx_oom) {
2099 2100
		mp->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&mp->rx_oom);
2101
	}
2102

2103
	for (i = 0; i < mp->txq_count; i++) {
2104 2105 2106
		err = txq_init(mp, i);
		if (err) {
			while (--i >= 0)
2107
				txq_deinit(mp->txq + i);
2108 2109 2110
			goto out_free;
		}
	}
2111

2112 2113
	netif_carrier_off(dev);

L
Lennert Buytenhek 已提交
2114
	port_start(mp);
2115

2116 2117
	set_rx_coal(mp, 0);
	set_tx_coal(mp, 0);
2118

2119
	wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX);
2120
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2121

2122 2123
	return 0;

2124

L
Lennert Buytenhek 已提交
2125
out_free:
2126 2127
	for (i = 0; i < mp->rxq_count; i++)
		rxq_deinit(mp->rxq + i);
L
Lennert Buytenhek 已提交
2128
out:
2129 2130 2131
	free_irq(dev->irq, dev);

	return err;
2132 2133
}

2134
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2135
{
L
Lennert Buytenhek 已提交
2136
	unsigned int data;
2137
	int i;
L
Linus Torvalds 已提交
2138

2139 2140 2141 2142
	for (i = 0; i < mp->rxq_count; i++)
		rxq_disable(mp->rxq + i);
	for (i = 0; i < mp->txq_count; i++)
		txq_disable(mp->txq + i);
2143 2144 2145 2146 2147 2148

	while (1) {
		u32 ps = rdl(mp, PORT_STATUS(mp->port_num));

		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
			break;
2149
		udelay(10);
2150
	}
L
Linus Torvalds 已提交
2151

2152
	/* Reset the Enable bit in the Configuration Register */
L
Lennert Buytenhek 已提交
2153 2154 2155 2156 2157
	data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
L
Linus Torvalds 已提交
2158 2159
}

2160
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
2161
{
2162
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2163
	int i;
L
Linus Torvalds 已提交
2164

L
Lennert Buytenhek 已提交
2165 2166
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
L
Linus Torvalds 已提交
2167

2168 2169
	del_timer_sync(&mp->mib_counters_timer);

2170
	napi_disable(&mp->napi);
2171

2172 2173
	del_timer_sync(&mp->rx_oom);

2174
	netif_carrier_off(dev);
L
Linus Torvalds 已提交
2175

L
Lennert Buytenhek 已提交
2176 2177
	free_irq(dev->irq, dev);

2178
	port_reset(mp);
2179
	mv643xx_eth_get_stats(dev);
L
Lennert Buytenhek 已提交
2180
	mib_counters_update(mp);
L
Linus Torvalds 已提交
2181

2182 2183
	skb_queue_purge(&mp->rx_recycle);

2184 2185 2186 2187
	for (i = 0; i < mp->rxq_count; i++)
		rxq_deinit(mp->rxq + i);
	for (i = 0; i < mp->txq_count; i++)
		txq_deinit(mp->txq + i);
L
Linus Torvalds 已提交
2188

2189
	return 0;
L
Linus Torvalds 已提交
2190 2191
}

L
Lennert Buytenhek 已提交
2192
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
2193
{
2194
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2195

2196 2197
	if (mp->phy != NULL)
		return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
2198 2199

	return -EOPNOTSUPP;
L
Linus Torvalds 已提交
2200 2201
}

2202
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
2203
{
2204 2205
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
2206
	if (new_mtu < 64 || new_mtu > 9500)
2207
		return -EINVAL;
L
Linus Torvalds 已提交
2208

2209
	dev->mtu = new_mtu;
2210
	mv643xx_eth_recalc_skb_size(mp);
2211 2212
	tx_set_rate(mp, 1000000000, 16777216);

2213 2214
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
2215

2216 2217 2218 2219
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
2220
	 * due to memory being full.
2221 2222 2223
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
L
Lennert Buytenhek 已提交
2224 2225 2226
		dev_printk(KERN_ERR, &dev->dev,
			   "fatal error on re-opening device after "
			   "MTU change\n");
2227 2228 2229
	}

	return 0;
L
Linus Torvalds 已提交
2230 2231
}

L
Lennert Buytenhek 已提交
2232
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
2233
{
L
Lennert Buytenhek 已提交
2234
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
2235

L
Lennert Buytenhek 已提交
2236 2237
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
2238
		netif_tx_stop_all_queues(mp->dev);
L
Lennert Buytenhek 已提交
2239 2240
		port_reset(mp);
		port_start(mp);
2241
		netif_tx_wake_all_queues(mp->dev);
L
Lennert Buytenhek 已提交
2242
	}
2243 2244 2245
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
2246
{
2247
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2248

L
Lennert Buytenhek 已提交
2249
	dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
2250

2251
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
2252 2253
}

2254
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
2255
static void mv643xx_eth_netpoll(struct net_device *dev)
2256
{
L
Lennert Buytenhek 已提交
2257
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2258

L
Lennert Buytenhek 已提交
2259 2260
	wrl(mp, INT_MASK(mp->port_num), 0x00000000);
	rdl(mp, INT_MASK(mp->port_num));
2261

L
Lennert Buytenhek 已提交
2262
	mv643xx_eth_irq(dev->irq, dev);
2263

2264
	wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2265
}
2266
#endif
2267 2268


2269
/* platform glue ************************************************************/
2270 2271 2272
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
			      struct mbus_dram_target_info *dram)
2273
{
2274
	void __iomem *base = msp->base;
2275 2276 2277
	u32 win_enable;
	u32 win_protect;
	int i;
2278

2279 2280 2281 2282 2283
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
2284 2285
	}

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
		struct mbus_dram_window *cs = dram->cs + i;

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2303 2304
}

2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
{
	/*
	 * Check whether we have a 14-bit coal limit field in bits
	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
	 * SDMA config register.
	 */
	writel(0x02000000, msp->base + SDMA_CONFIG(0));
	if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000)
		msp->extended_rx_coal_limit = 1;
	else
		msp->extended_rx_coal_limit = 0;
2317 2318

	/*
2319 2320 2321
	 * Check whether the MAC supports TX rate control, and if
	 * yes, whether its associated registers are in the old or
	 * the new place.
2322 2323
	 */
	writel(1, msp->base + TX_BW_MTU_MOVED(0));
2324 2325 2326 2327 2328 2329 2330 2331 2332
	if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) {
		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
	} else {
		writel(7, msp->base + TX_BW_RATE(0));
		if (readl(msp->base + TX_BW_RATE(0)) & 7)
			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
		else
			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
	}
2333 2334
}

2335
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2336
{
2337
	static int mv643xx_eth_version_printed = 0;
2338
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2339
	struct mv643xx_eth_shared_private *msp;
2340 2341
	struct resource *res;
	int ret;
2342

2343
	if (!mv643xx_eth_version_printed++)
2344 2345
		printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet "
			"driver version %s\n", mv643xx_eth_driver_version);
2346

2347 2348 2349 2350
	ret = -EINVAL;
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto out;
2351

2352 2353 2354 2355 2356 2357
	ret = -ENOMEM;
	msp = kmalloc(sizeof(*msp), GFP_KERNEL);
	if (msp == NULL)
		goto out;
	memset(msp, 0, sizeof(*msp));

2358 2359
	msp->base = ioremap(res->start, res->end - res->start + 1);
	if (msp->base == NULL)
2360 2361
		goto out_free;

2362 2363 2364 2365 2366 2367 2368 2369 2370
	/*
	 * Set up and register SMI bus.
	 */
	if (pd == NULL || pd->shared_smi == NULL) {
		msp->smi_bus.priv = msp;
		msp->smi_bus.name = "mv643xx_eth smi";
		msp->smi_bus.read = smi_bus_read;
		msp->smi_bus.write = smi_bus_write,
		snprintf(msp->smi_bus.id, MII_BUS_ID_SIZE, "%d", pdev->id);
2371
		msp->smi_bus.parent = &pdev->dev;
2372 2373 2374 2375 2376
		msp->smi_bus.phy_mask = 0xffffffff;
		if (mdiobus_register(&msp->smi_bus) < 0)
			goto out_unmap;
		msp->smi = msp;
	} else {
2377
		msp->smi = platform_get_drvdata(pd->shared_smi);
2378
	}
2379

2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
	msp->err_interrupt = NO_IRQ;
	init_waitqueue_head(&msp->smi_busy_wait);

	/*
	 * Check whether the error interrupt is hooked up.
	 */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res != NULL) {
		int err;

		err = request_irq(res->start, mv643xx_eth_err_irq,
				  IRQF_SHARED, "mv643xx_eth", msp);
		if (!err) {
			writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
			msp->err_interrupt = res->start;
		}
	}

2398 2399 2400 2401 2402 2403
	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
	if (pd != NULL && pd->dram != NULL)
		mv643xx_eth_conf_mbus_windows(msp, pd->dram);

L
Lennert Buytenhek 已提交
2404 2405 2406 2407
	/*
	 * Detect hardware parameters.
	 */
	msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2408
	infer_hw_params(msp);
L
Lennert Buytenhek 已提交
2409 2410 2411

	platform_set_drvdata(pdev, msp);

2412 2413
	return 0;

2414 2415
out_unmap:
	iounmap(msp->base);
2416 2417 2418 2419 2420 2421 2422 2423
out_free:
	kfree(msp);
out:
	return ret;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2424
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2425
	struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2426

2427 2428
	if (pd == NULL || pd->shared_smi == NULL)
		mdiobus_unregister(&msp->smi_bus);
2429 2430
	if (msp->err_interrupt != NO_IRQ)
		free_irq(msp->err_interrupt, msp);
2431
	iounmap(msp->base);
2432 2433 2434
	kfree(msp);

	return 0;
2435 2436
}

2437
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2438 2439
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2440
	.driver = {
L
Lennert Buytenhek 已提交
2441
		.name	= MV643XX_ETH_SHARED_NAME,
2442 2443 2444 2445
		.owner	= THIS_MODULE,
	},
};

2446
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2447
{
2448
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2449
	u32 data;
L
Linus Torvalds 已提交
2450

L
Lennert Buytenhek 已提交
2451 2452 2453 2454
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2455 2456
}

2457
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2458
{
L
Lennert Buytenhek 已提交
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;

	if (is_valid_ether_addr(pd->mac_addr))
		memcpy(dev->dev_addr, pd->mac_addr, 6);
	else
		uc_addr_get(mp, dev->dev_addr);

	mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
	if (pd->rx_queue_size)
		mp->default_rx_ring_size = pd->rx_queue_size;
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2481

2482
	mp->rxq_count = pd->rx_queue_count ? : 1;
2483

L
Lennert Buytenhek 已提交
2484 2485 2486 2487 2488
	mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
	if (pd->tx_queue_size)
		mp->default_tx_ring_size = pd->tx_queue_size;
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
2489

2490
	mp->txq_count = pd->tx_queue_count ? : 1;
L
Linus Torvalds 已提交
2491 2492
}

2493 2494
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
				   int phy_addr)
L
Linus Torvalds 已提交
2495
{
2496 2497 2498 2499 2500
	struct mii_bus *bus = &mp->shared->smi->smi_bus;
	struct phy_device *phydev;
	int start;
	int num;
	int i;
2501

2502 2503 2504 2505 2506 2507 2508
	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
		start = phy_addr_get(mp) & 0x1f;
		num = 32;
	} else {
		start = phy_addr & 0x1f;
		num = 1;
	}
2509

2510 2511 2512
	phydev = NULL;
	for (i = 0; i < num; i++) {
		int addr = (start + i) & 0x1f;
L
Lennert Buytenhek 已提交
2513

2514 2515
		if (bus->phy_map[addr] == NULL)
			mdiobus_scan(bus, addr);
L
Linus Torvalds 已提交
2516

2517 2518 2519 2520 2521 2522
		if (phydev == NULL) {
			phydev = bus->phy_map[addr];
			if (phydev != NULL)
				phy_addr_set(mp, addr);
		}
	}
L
Linus Torvalds 已提交
2523

2524
	return phydev;
L
Linus Torvalds 已提交
2525 2526
}

2527
static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2528
{
2529
	struct phy_device *phy = mp->phy;
2530

L
Lennert Buytenhek 已提交
2531 2532
	phy_reset(mp);

2533 2534 2535 2536 2537 2538 2539
	phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);

	if (speed == 0) {
		phy->autoneg = AUTONEG_ENABLE;
		phy->speed = 0;
		phy->duplex = 0;
		phy->advertising = phy->supported | ADVERTISED_Autoneg;
2540
	} else {
2541 2542 2543 2544
		phy->autoneg = AUTONEG_DISABLE;
		phy->advertising = 0;
		phy->speed = speed;
		phy->duplex = duplex;
2545
	}
2546
	phy_start_aneg(phy);
2547 2548
}

2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
	u32 pscr;

	pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
	if (pscr & SERIAL_PORT_ENABLE) {
		pscr &= ~SERIAL_PORT_ENABLE;
		wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
	}

	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2560
	if (mp->phy == NULL) {
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576
		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
		if (speed == SPEED_1000)
			pscr |= SET_GMII_SPEED_TO_1000;
		else if (speed == SPEED_100)
			pscr |= SET_MII_SPEED_TO_100;

		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;

		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
		if (duplex == DUPLEX_FULL)
			pscr |= SET_FULL_DUPLEX_MODE;
	}

	wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
}

2577
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
2578
{
2579
	struct mv643xx_eth_platform_data *pd;
2580
	struct mv643xx_eth_private *mp;
2581 2582 2583
	struct net_device *dev;
	struct resource *res;
	DECLARE_MAC_BUF(mac);
L
Lennert Buytenhek 已提交
2584
	int err;
L
Linus Torvalds 已提交
2585

2586 2587
	pd = pdev->dev.platform_data;
	if (pd == NULL) {
L
Lennert Buytenhek 已提交
2588 2589
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data\n");
2590 2591
		return -ENODEV;
	}
L
Linus Torvalds 已提交
2592

2593
	if (pd->shared == NULL) {
L
Lennert Buytenhek 已提交
2594 2595
		dev_printk(KERN_ERR, &pdev->dev,
			   "no mv643xx_eth_platform_data->shared\n");
2596 2597
		return -ENODEV;
	}
2598

2599
	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2600 2601
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
2602

2603
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
2604 2605 2606 2607 2608
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
	mp->port_num = pd->port_number;

2609
	mp->dev = dev;
2610

L
Lennert Buytenhek 已提交
2611
	set_params(mp, pd);
2612
	dev->real_num_tx_queues = mp->txq_count;
L
Lennert Buytenhek 已提交
2613

2614 2615
	if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
		mp->phy = phy_scan(mp, pd->phy_addr);
2616

2617 2618
	if (mp->phy != NULL) {
		phy_init(mp, pd->speed, pd->duplex);
2619 2620 2621 2622
		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
	} else {
		SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
	}
2623

2624
	init_pscr(mp, pd->speed, pd->duplex);
L
Lennert Buytenhek 已提交
2625

2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638

	mib_counters_clear(mp);

	init_timer(&mp->mib_counters_timer);
	mp->mib_counters_timer.data = (unsigned long)mp;
	mp->mib_counters_timer.function = mib_counters_timer_wrapper;
	mp->mib_counters_timer.expires = jiffies + 30 * HZ;
	add_timer(&mp->mib_counters_timer);

	spin_lock_init(&mp->mib_counters_lock);

	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

2639 2640 2641 2642 2643 2644
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);

	init_timer(&mp->rx_oom);
	mp->rx_oom.data = (unsigned long)mp;
	mp->rx_oom.function = oom_timer_wrapper;

L
Lennert Buytenhek 已提交
2645

2646 2647 2648
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
2649

2650
	dev->get_stats = mv643xx_eth_get_stats;
L
Lennert Buytenhek 已提交
2651
	dev->hard_start_xmit = mv643xx_eth_xmit;
2652 2653 2654
	dev->open = mv643xx_eth_open;
	dev->stop = mv643xx_eth_stop;
	dev->set_multicast_list = mv643xx_eth_set_rx_mode;
L
Lennert Buytenhek 已提交
2655 2656 2657
	dev->set_mac_address = mv643xx_eth_set_mac_address;
	dev->do_ioctl = mv643xx_eth_ioctl;
	dev->change_mtu = mv643xx_eth_change_mtu;
2658 2659
	dev->tx_timeout = mv643xx_eth_tx_timeout;
#ifdef CONFIG_NET_POLL_CONTROLLER
2660
	dev->poll_controller = mv643xx_eth_netpoll;
2661 2662 2663
#endif
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
2664

2665
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2666
	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
L
Linus Torvalds 已提交
2667

L
Lennert Buytenhek 已提交
2668
	SET_NETDEV_DEV(dev, &pdev->dev);
2669

2670
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
2671
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
2672

2673 2674 2675
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
2676

L
Lennert Buytenhek 已提交
2677 2678
	dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
		   mp->port_num, print_mac(mac, dev->dev_addr));
L
Linus Torvalds 已提交
2679

2680
	if (mp->tx_desc_sram_size > 0)
L
Lennert Buytenhek 已提交
2681
		dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
L
Linus Torvalds 已提交
2682

2683
	return 0;
L
Linus Torvalds 已提交
2684

2685 2686
out:
	free_netdev(dev);
L
Linus Torvalds 已提交
2687

2688
	return err;
L
Linus Torvalds 已提交
2689 2690
}

2691
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
2692
{
L
Lennert Buytenhek 已提交
2693
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
2694

L
Lennert Buytenhek 已提交
2695
	unregister_netdev(mp->dev);
2696 2697
	if (mp->phy != NULL)
		phy_detach(mp->phy);
2698
	flush_scheduled_work();
L
Lennert Buytenhek 已提交
2699
	free_netdev(mp->dev);
2700 2701

	platform_set_drvdata(pdev, NULL);
L
Lennert Buytenhek 已提交
2702

2703
	return 0;
L
Linus Torvalds 已提交
2704 2705
}

2706
static void mv643xx_eth_shutdown(struct platform_device *pdev)
2707
{
L
Lennert Buytenhek 已提交
2708
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2709

2710
	/* Mask all interrupts on ethernet port */
L
Lennert Buytenhek 已提交
2711 2712
	wrl(mp, INT_MASK(mp->port_num), 0);
	rdl(mp, INT_MASK(mp->port_num));
2713

L
Lennert Buytenhek 已提交
2714 2715
	if (netif_running(mp->dev))
		port_reset(mp);
2716 2717
}

2718
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
2719 2720 2721
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
2722
	.driver = {
L
Lennert Buytenhek 已提交
2723
		.name	= MV643XX_ETH_NAME,
2724 2725 2726 2727
		.owner	= THIS_MODULE,
	},
};

2728
static int __init mv643xx_eth_init_module(void)
2729
{
2730
	int rc;
2731

2732 2733 2734 2735 2736 2737
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
2738

2739
	return rc;
2740
}
L
Lennert Buytenhek 已提交
2741
module_init(mv643xx_eth_init_module);
2742

2743
static void __exit mv643xx_eth_cleanup_module(void)
2744
{
2745 2746
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
2747
}
2748
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
2749

2750 2751
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
2752
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
2753
MODULE_LICENSE("GPL");
2754
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
2755
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);