mv643xx_eth.c 76.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
L
Linus Torvalds 已提交
3 4 5
 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
 *
 * Based on the 64360 driver from:
6 7
 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
 *		      Rabeeh Khoury <rabeeh@marvell.com>
L
Linus Torvalds 已提交
8 9
 *
 * Copyright (C) 2003 PMC-Sierra, Inc.,
10
 *	written by Manish Lachwani
L
Linus Torvalds 已提交
11 12 13
 *
 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
 *
14
 * Copyright (C) 2004-2006 MontaVista Software, Inc.
L
Linus Torvalds 已提交
15 16 17 18 19
 *			   Dale Farnsworth <dale@farnsworth.org>
 *
 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
 *				     <sjhill@realitydiluted.com>
 *
20 21 22
 * Copyright (C) 2007-2008 Marvell Semiconductor
 *			   Lennert Buytenhek <buytenh@marvell.com>
 *
23 24
 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
 *
L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32 33 34 35
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
36
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
L
Linus Torvalds 已提交
37
 */
38

39 40
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

L
Linus Torvalds 已提交
41 42
#include <linux/init.h>
#include <linux/dma-mapping.h>
43
#include <linux/in.h>
44
#include <linux/ip.h>
45
#include <net/tso.h>
L
Linus Torvalds 已提交
46 47 48 49 50
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
51
#include <linux/platform_device.h>
52 53 54 55
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
56
#include <linux/phy.h>
57
#include <linux/mv643xx_eth.h>
L
Lennert Buytenhek 已提交
58
#include <linux/io.h>
59
#include <linux/interrupt.h>
L
Lennert Buytenhek 已提交
60
#include <linux/types.h>
61
#include <linux/slab.h>
62
#include <linux/clk.h>
63 64 65
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
66
#include <linux/of_mdio.h>
67

68
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
69
static char mv643xx_eth_driver_version[] = "1.4";
70

71 72 73 74

/*
 * Registers shared between all ports.
 */
75 76 77 78 79 80
#define PHY_ADDR			0x0000
#define WINDOW_BASE(w)			(0x0200 + ((w) << 3))
#define WINDOW_SIZE(w)			(0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w)		(0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE		0x0290
#define WINDOW_PROTECT(w)		(0x0294 + ((w) << 4))
81 82

/*
83 84
 * Main per-port registers.  These live at offset 0x0400 for
 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
85
 */
86
#define PORT_CONFIG			0x0000
87
#define  UNICAST_PROMISCUOUS_MODE	0x00000001
88 89 90 91
#define PORT_CONFIG_EXT			0x0004
#define MAC_ADDR_LOW			0x0014
#define MAC_ADDR_HIGH			0x0018
#define SDMA_CONFIG			0x001c
92 93 94 95 96 97
#define  TX_BURST_SIZE_16_64BIT		0x01000000
#define  TX_BURST_SIZE_4_64BIT		0x00800000
#define  BLM_TX_NO_SWAP			0x00000020
#define  BLM_RX_NO_SWAP			0x00000010
#define  RX_BURST_SIZE_16_64BIT		0x00000008
#define  RX_BURST_SIZE_4_64BIT		0x00000004
98
#define PORT_SERIAL_CONTROL		0x003c
99 100 101 102 103 104 105 106 107 108 109
#define  SET_MII_SPEED_TO_100		0x01000000
#define  SET_GMII_SPEED_TO_1000		0x00800000
#define  SET_FULL_DUPLEX_MODE		0x00200000
#define  MAX_RX_PACKET_9700BYTE		0x000a0000
#define  DISABLE_AUTO_NEG_SPEED_GMII	0x00002000
#define  DO_NOT_FORCE_LINK_FAIL		0x00000400
#define  SERIAL_PORT_CONTROL_RESERVED	0x00000200
#define  DISABLE_AUTO_NEG_FOR_FLOW_CTRL	0x00000008
#define  DISABLE_AUTO_NEG_FOR_DUPLEX	0x00000004
#define  FORCE_LINK_PASS		0x00000002
#define  SERIAL_PORT_ENABLE		0x00000001
110
#define PORT_STATUS			0x0044
111
#define  TX_FIFO_EMPTY			0x00000400
112
#define  TX_IN_PROGRESS			0x00000080
113 114 115 116 117 118
#define  PORT_SPEED_MASK		0x00000030
#define  PORT_SPEED_1000		0x00000010
#define  PORT_SPEED_100			0x00000020
#define  PORT_SPEED_10			0x00000000
#define  FLOW_CONTROL_ENABLED		0x00000008
#define  FULL_DUPLEX			0x00000004
119
#define  LINK_UP			0x00000002
120 121
#define TXQ_COMMAND			0x0048
#define TXQ_FIX_PRIO_CONF		0x004c
122 123
#define PORT_SERIAL_CONTROL1		0x004c
#define  CLK125_BYPASS_EN		0x00000010
124 125 126 127
#define TX_BW_RATE			0x0050
#define TX_BW_MTU			0x0058
#define TX_BW_BURST			0x005c
#define INT_CAUSE			0x0060
128
#define  INT_TX_END			0x07f80000
129
#define  INT_TX_END_0			0x00080000
130
#define  INT_RX				0x000003fc
131
#define  INT_RX_0			0x00000004
132
#define  INT_EXT			0x00000002
133
#define INT_CAUSE_EXT			0x0064
134 135
#define  INT_EXT_LINK_PHY		0x00110000
#define  INT_EXT_TX			0x000000ff
136 137 138
#define INT_MASK			0x0068
#define INT_MASK_EXT			0x006c
#define TX_FIFO_URGENT_THRESHOLD	0x0074
139 140
#define RX_DISCARD_FRAME_CNT		0x0084
#define RX_OVERRUN_FRAME_CNT		0x0088
141 142 143 144 145 146 147 148 149 150 151 152 153 154
#define TXQ_FIX_PRIO_CONF_MOVED		0x00dc
#define TX_BW_RATE_MOVED		0x00e0
#define TX_BW_MTU_MOVED			0x00e8
#define TX_BW_BURST_MOVED		0x00ec
#define RXQ_CURRENT_DESC_PTR(q)		(0x020c + ((q) << 4))
#define RXQ_COMMAND			0x0280
#define TXQ_CURRENT_DESC_PTR(q)		(0x02c0 + ((q) << 2))
#define TXQ_BW_TOKENS(q)		(0x0300 + ((q) << 4))
#define TXQ_BW_CONF(q)			(0x0304 + ((q) << 4))
#define TXQ_BW_WRR_CONF(q)		(0x0308 + ((q) << 4))

/*
 * Misc per-port registers.
 */
155 156 157 158
#define MIB_COUNTERS(p)			(0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p)		(0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p)		(0x1500 + ((p) << 10))
#define UNICAST_TABLE(p)		(0x1600 + ((p) << 10))
159

160 161

/*
162
 * SDMA configuration register default value.
163
 */
164 165
#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
166 167
		(RX_BURST_SIZE_4_64BIT	|	\
		 TX_BURST_SIZE_4_64BIT)
168 169
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE		\
170 171 172 173
		(RX_BURST_SIZE_4_64BIT	|	\
		 BLM_RX_NO_SWAP		|	\
		 BLM_TX_NO_SWAP		|	\
		 TX_BURST_SIZE_4_64BIT)
174 175 176 177
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

178 179

/*
180
 * Misc definitions.
181
 */
182
#define DEFAULT_RX_QUEUE_SIZE	128
183
#define DEFAULT_TX_QUEUE_SIZE	512
184
#define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
185

186
#define TSO_HEADER_SIZE		128
187

188 189 190 191
/* Max number of allowed TCP segments for software TSO */
#define MV643XX_MAX_TSO_SEGS 100
#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)

192 193 194
#define IS_TSO_HEADER(txq, addr) \
	((addr >= txq->tso_hdrs_dma) && \
	 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
195 196
/*
 * RX/TX descriptors.
197 198
 */
#if defined(__BIG_ENDIAN)
199
struct rx_desc {
200 201 202 203 204 205 206
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u16 buf_size;		/* Buffer size				*/
	u32 cmd_sts;		/* Descriptor command status		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
};

207
struct tx_desc {
208 209 210 211 212 213 214
	u16 byte_cnt;		/* buffer byte count			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u32 cmd_sts;		/* Command/status field			*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
215
struct rx_desc {
216 217 218 219 220 221 222
	u32 cmd_sts;		/* Descriptor command status		*/
	u16 buf_size;		/* Buffer size				*/
	u16 byte_cnt;		/* Descriptor buffer byte count		*/
	u32 buf_ptr;		/* Descriptor buffer pointer		*/
	u32 next_desc_ptr;	/* Next descriptor pointer		*/
};

223
struct tx_desc {
224 225 226 227 228 229 230 231 232 233
	u32 cmd_sts;		/* Command/status field			*/
	u16 l4i_chk;		/* CPU provided TCP checksum		*/
	u16 byte_cnt;		/* buffer byte count			*/
	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif

234
/* RX & TX descriptor command */
235
#define BUFFER_OWNED_BY_DMA		0x80000000
236 237

/* RX & TX descriptor status */
238
#define ERROR_SUMMARY			0x00000001
239 240

/* RX descriptor status */
241 242 243 244
#define LAYER_4_CHECKSUM_OK		0x40000000
#define RX_ENABLE_INTERRUPT		0x20000000
#define RX_FIRST_DESC			0x08000000
#define RX_LAST_DESC			0x04000000
245 246 247 248 249 250
#define RX_IP_HDR_OK			0x02000000
#define RX_PKT_IS_IPV4			0x01000000
#define RX_PKT_IS_ETHERNETV2		0x00800000
#define RX_PKT_LAYER4_TYPE_MASK		0x00600000
#define RX_PKT_LAYER4_TYPE_TCP_IPV4	0x00000000
#define RX_PKT_IS_VLAN_TAGGED		0x00080000
251 252

/* TX descriptor command */
253 254 255 256 257 258 259 260
#define TX_ENABLE_INTERRUPT		0x00800000
#define GEN_CRC				0x00400000
#define TX_FIRST_DESC			0x00200000
#define TX_LAST_DESC			0x00100000
#define ZERO_PADDING			0x00080000
#define GEN_IP_V4_CHECKSUM		0x00040000
#define GEN_TCP_UDP_CHECKSUM		0x00020000
#define UDP_FRAME			0x00010000
261
#define MAC_HDR_EXTRA_4_BYTES		0x00008000
262
#define GEN_TCP_UDP_CHK_FULL		0x00000400
263
#define MAC_HDR_EXTRA_8_BYTES		0x00000200
264

265
#define TX_IHL_SHIFT			11
266 267


268
/* global *******************************************************************/
269
struct mv643xx_eth_shared_private {
L
Lennert Buytenhek 已提交
270 271 272
	/*
	 * Ethernet controller base address.
	 */
273
	void __iomem *base;
274

L
Lennert Buytenhek 已提交
275 276 277
	/*
	 * Per-port MBUS window access register value.
	 */
278 279
	u32 win_protect;

L
Lennert Buytenhek 已提交
280 281 282
	/*
	 * Hardware-specific parameters.
	 */
283
	int extended_rx_coal_limit;
284
	int tx_bw_control;
285
	int tx_csum_limit;
286
	struct clk *clk;
287 288
};

289 290 291 292
#define TX_BW_CONTROL_ABSENT		0
#define TX_BW_CONTROL_OLD_LAYOUT	1
#define TX_BW_CONTROL_NEW_LAYOUT	2

293 294 295
static int mv643xx_eth_open(struct net_device *dev);
static int mv643xx_eth_stop(struct net_device *dev);

296 297

/* per-port *****************************************************************/
298
struct mib_counters {
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	u64 good_octets_received;
	u32 bad_octets_received;
	u32 internal_mac_transmit_err;
	u32 good_frames_received;
	u32 bad_frames_received;
	u32 broadcast_frames_received;
	u32 multicast_frames_received;
	u32 frames_64_octets;
	u32 frames_65_to_127_octets;
	u32 frames_128_to_255_octets;
	u32 frames_256_to_511_octets;
	u32 frames_512_to_1023_octets;
	u32 frames_1024_to_max_octets;
	u64 good_octets_sent;
	u32 good_frames_sent;
	u32 excessive_collision;
	u32 multicast_frames_sent;
	u32 broadcast_frames_sent;
	u32 unrec_mac_control_received;
	u32 fc_sent;
	u32 good_fc_received;
	u32 bad_fc_received;
	u32 undersize_received;
	u32 fragments_received;
	u32 oversize_received;
	u32 jabber_received;
	u32 mac_receive_error;
	u32 bad_crc_event;
	u32 collision;
	u32 late_collision;
329 330 331
	/* Non MIB hardware counters */
	u32 rx_discard;
	u32 rx_overrun;
332 333
};

334
struct rx_queue {
335 336
	int index;

337 338 339 340 341 342 343 344 345 346 347 348
	int rx_ring_size;

	int rx_desc_count;
	int rx_curr_desc;
	int rx_used_desc;

	struct rx_desc *rx_desc_area;
	dma_addr_t rx_desc_dma;
	int rx_desc_area_size;
	struct sk_buff **rx_skb;
};

349
struct tx_queue {
350 351
	int index;

352
	int tx_ring_size;
353

354 355 356
	int tx_desc_count;
	int tx_curr_desc;
	int tx_used_desc;
357

358 359 360
	int tx_stop_threshold;
	int tx_wake_threshold;

361 362 363
	char *tso_hdrs;
	dma_addr_t tso_hdrs_dma;

364
	struct tx_desc *tx_desc_area;
365 366
	dma_addr_t tx_desc_dma;
	int tx_desc_area_size;
367 368

	struct sk_buff_head tx_skb;
369 370 371 372

	unsigned long tx_packets;
	unsigned long tx_bytes;
	unsigned long tx_dropped;
373 374 375 376
};

struct mv643xx_eth_private {
	struct mv643xx_eth_shared_private *shared;
377
	void __iomem *base;
L
Lennert Buytenhek 已提交
378
	int port_num;
379

L
Lennert Buytenhek 已提交
380
	struct net_device *dev;
381

382
	struct phy_device *phy;
383

384 385
	struct timer_list mib_counters_timer;
	spinlock_t mib_counters_lock;
L
Lennert Buytenhek 已提交
386
	struct mib_counters mib_counters;
387

L
Lennert Buytenhek 已提交
388
	struct work_struct tx_timeout_task;
389

390
	struct napi_struct napi;
391
	u32 int_mask;
392
	u8 oom;
393 394 395 396 397 398
	u8 work_link;
	u8 work_tx;
	u8 work_tx_end;
	u8 work_rx;
	u8 work_rx_refill;

399 400
	int skb_size;

401 402 403
	/*
	 * RX state.
	 */
404
	int rx_ring_size;
405 406
	unsigned long rx_desc_sram_addr;
	int rx_desc_sram_size;
407
	int rxq_count;
408
	struct timer_list rx_oom;
409
	struct rx_queue rxq[8];
410 411 412 413

	/*
	 * TX state.
	 */
414
	int tx_ring_size;
415 416
	unsigned long tx_desc_sram_addr;
	int tx_desc_sram_size;
417
	int txq_count;
418
	struct tx_queue txq[8];
419 420 421 422 423 424

	/*
	 * Hardware-specific parameters.
	 */
	struct clk *clk;
	unsigned int t_clk;
425
};
L
Linus Torvalds 已提交
426

427

428
/* port register accessors **************************************************/
429
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
430
{
431
	return readl(mp->shared->base + offset);
432
}
433

434 435 436 437 438
static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
{
	return readl(mp->base + offset);
}

439
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
440
{
441
	writel(data, mp->shared->base + offset);
442
}
443

444 445 446 447 448
static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
{
	writel(data, mp->base + offset);
}

449

450
/* rxq/txq helper functions *************************************************/
451
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
452
{
453
	return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
454
}
455

456 457
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
458
	return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
459 460
}

461
static void rxq_enable(struct rx_queue *rxq)
462
{
463
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
464
	wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
465
}
L
Linus Torvalds 已提交
466

467 468 469
static void rxq_disable(struct rx_queue *rxq)
{
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
470
	u8 mask = 1 << rxq->index;
L
Linus Torvalds 已提交
471

472 473
	wrlp(mp, RXQ_COMMAND, mask << 8);
	while (rdlp(mp, RXQ_COMMAND) & mask)
474
		udelay(10);
475 476
}

477 478 479 480 481 482 483
static void txq_reset_hw_ptr(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	u32 addr;

	addr = (u32)txq->tx_desc_dma;
	addr += txq->tx_curr_desc * sizeof(struct tx_desc);
484
	wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
485 486
}

487
static void txq_enable(struct tx_queue *txq)
L
Linus Torvalds 已提交
488
{
489
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
490
	wrlp(mp, TXQ_COMMAND, 1 << txq->index);
L
Linus Torvalds 已提交
491 492
}

493
static void txq_disable(struct tx_queue *txq)
L
Linus Torvalds 已提交
494
{
495
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
496
	u8 mask = 1 << txq->index;
497

498 499
	wrlp(mp, TXQ_COMMAND, mask << 8);
	while (rdlp(mp, TXQ_COMMAND) & mask)
500 501 502
		udelay(10);
}

503
static void txq_maybe_wake(struct tx_queue *txq)
504 505
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
506
	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
507

508 509
	if (netif_tx_queue_stopped(nq)) {
		__netif_tx_lock(nq, smp_processor_id());
510
		if (txq->tx_desc_count <= txq->tx_wake_threshold)
511 512 513
			netif_tx_wake_queue(nq);
		__netif_tx_unlock(nq);
	}
L
Linus Torvalds 已提交
514 515
}

516
static int rxq_process(struct rx_queue *rxq, int budget)
L
Linus Torvalds 已提交
517
{
518 519 520
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	struct net_device_stats *stats = &mp->dev->stats;
	int rx;
L
Linus Torvalds 已提交
521

522
	rx = 0;
523
	while (rx < budget && rxq->rx_desc_count) {
L
Lennert Buytenhek 已提交
524
		struct rx_desc *rx_desc;
525
		unsigned int cmd_sts;
L
Lennert Buytenhek 已提交
526
		struct sk_buff *skb;
527
		u16 byte_cnt;
528

529
		rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
L
Linus Torvalds 已提交
530

531
		cmd_sts = rx_desc->cmd_sts;
532
		if (cmd_sts & BUFFER_OWNED_BY_DMA)
533 534
			break;
		rmb();
L
Linus Torvalds 已提交
535

536 537
		skb = rxq->rx_skb[rxq->rx_curr_desc];
		rxq->rx_skb[rxq->rx_curr_desc] = NULL;
538

539 540 541
		rxq->rx_curr_desc++;
		if (rxq->rx_curr_desc == rxq->rx_ring_size)
			rxq->rx_curr_desc = 0;
542

543
		dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
544
				 rx_desc->buf_size, DMA_FROM_DEVICE);
545 546
		rxq->rx_desc_count--;
		rx++;
547

548 549
		mp->work_rx_refill |= 1 << rxq->index;

550 551
		byte_cnt = rx_desc->byte_cnt;

552 553
		/*
		 * Update statistics.
L
Lennert Buytenhek 已提交
554 555 556 557 558
		 *
		 * Note that the descriptor byte count includes 2 dummy
		 * bytes automatically inserted by the hardware at the
		 * start of the packet (which we don't count), and a 4
		 * byte CRC at the end of the packet (which we do count).
559
		 */
L
Linus Torvalds 已提交
560
		stats->rx_packets++;
561
		stats->rx_bytes += byte_cnt - 2;
562

L
Linus Torvalds 已提交
563
		/*
L
Lennert Buytenhek 已提交
564 565 566
		 * In case we received a packet without first / last bits
		 * on, or the error summary bit is set, the packet needs
		 * to be dropped.
L
Linus Torvalds 已提交
567
		 */
568 569 570 571 572 573 574 575 576 577 578 579 580
		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
			!= (RX_FIRST_DESC | RX_LAST_DESC))
			goto err;

		/*
		 * The -4 is for the CRC in the trailer of the
		 * received packet
		 */
		skb_put(skb, byte_cnt - 2 - 4);

		if (cmd_sts & LAYER_4_CHECKSUM_OK)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		skb->protocol = eth_type_trans(skb, mp->dev);
581

582
		napi_gro_receive(&mp->napi, skb);
583 584 585 586 587 588 589 590 591

		continue;

err:
		stats->rx_dropped++;

		if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
			(RX_FIRST_DESC | RX_LAST_DESC)) {
			if (net_ratelimit())
592 593
				netdev_err(mp->dev,
					   "received packet spanning multiple descriptors\n");
L
Linus Torvalds 已提交
594
		}
595 596 597 598 599

		if (cmd_sts & ERROR_SUMMARY)
			stats->rx_errors++;

		dev_kfree_skb(skb);
L
Linus Torvalds 已提交
600
	}
L
Lennert Buytenhek 已提交
601

602 603 604
	if (rx < budget)
		mp->work_rx &= ~(1 << rxq->index);

605
	return rx;
L
Linus Torvalds 已提交
606 607
}

608
static int rxq_refill(struct rx_queue *rxq, int budget)
609
{
610 611
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int refilled;
612

613 614 615 616
	refilled = 0;
	while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
		struct sk_buff *skb;
		int rx;
617
		struct rx_desc *rx_desc;
618
		int size;
619

E
Eric Dumazet 已提交
620
		skb = netdev_alloc_skb(mp->dev, mp->skb_size);
621

622
		if (skb == NULL) {
623
			mp->oom = 1;
624 625
			goto oom;
		}
626

627 628
		if (SKB_DMA_REALIGN)
			skb_reserve(skb, SKB_DMA_REALIGN);
629

630 631
		refilled++;
		rxq->rx_desc_count++;
632

633 634 635
		rx = rxq->rx_used_desc++;
		if (rxq->rx_used_desc == rxq->rx_ring_size)
			rxq->rx_used_desc = 0;
636

637 638
		rx_desc = rxq->rx_desc_area + rx;

639
		size = skb_end_pointer(skb) - skb->data;
640
		rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
641
						  skb->data, size,
642
						  DMA_FROM_DEVICE);
643
		rx_desc->buf_size = size;
644 645
		rxq->rx_skb[rx] = skb;
		wmb();
646
		rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
647
		wmb();
648

649 650 651 652 653 654 655 656 657 658 659 660 661
		/*
		 * The hardware automatically prepends 2 bytes of
		 * dummy data to each received packet, so that the
		 * IP header ends up 16-byte aligned.
		 */
		skb_reserve(skb, 2);
	}

	if (refilled < budget)
		mp->work_rx_refill &= ~(1 << rxq->index);

oom:
	return refilled;
662 663
}

664 665 666

/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
L
Linus Torvalds 已提交
667
{
668
	int frag;
L
Linus Torvalds 已提交
669

670
	for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
E
Eric Dumazet 已提交
671 672 673
		const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];

		if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
674
			return 1;
L
Linus Torvalds 已提交
675
	}
676

677 678
	return 0;
}
679

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
static inline __be16 sum16_as_be(__sum16 sum)
{
	return (__force __be16)sum;
}

static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
		       u16 *l4i_chk, u32 *command, int length)
{
	int ret;
	u32 cmd = 0;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		int hdr_len;
		int tag_bytes;

		BUG_ON(skb->protocol != htons(ETH_P_IP) &&
		       skb->protocol != htons(ETH_P_8021Q));

		hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
		tag_bytes = hdr_len - ETH_HLEN;

		if (length - hdr_len > mp->shared->tx_csum_limit ||
		    unlikely(tag_bytes & ~12)) {
			ret = skb_checksum_help(skb);
			if (!ret)
				goto no_csum;
			return ret;
		}

		if (tag_bytes & 4)
			cmd |= MAC_HDR_EXTRA_4_BYTES;
		if (tag_bytes & 8)
			cmd |= MAC_HDR_EXTRA_8_BYTES;

714
		cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
715 716 717
			   GEN_IP_V4_CHECKSUM   |
			   ip_hdr(skb)->ihl << TX_IHL_SHIFT;

718 719
		/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
		 * it seems we don't need to pass the initial checksum. */
720 721 722
		switch (ip_hdr(skb)->protocol) {
		case IPPROTO_UDP:
			cmd |= UDP_FRAME;
723
			*l4i_chk = 0;
724 725
			break;
		case IPPROTO_TCP:
726
			*l4i_chk = 0;
727 728 729 730 731 732 733 734 735 736 737 738 739
			break;
		default:
			WARN(1, "protocol not supported");
		}
	} else {
no_csum:
		/* Errata BTS #50, IHL must be 5 if no HW checksum */
		cmd |= 5 << TX_IHL_SHIFT;
	}
	*command = cmd;
	return 0;
}

740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
static inline int
txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
		 struct sk_buff *skb, char *data, int length,
		 bool last_tcp, bool is_last)
{
	int tx_index;
	u32 cmd_sts;
	struct tx_desc *desc;

	tx_index = txq->tx_curr_desc++;
	if (txq->tx_curr_desc == txq->tx_ring_size)
		txq->tx_curr_desc = 0;
	desc = &txq->tx_desc_area[tx_index];

	desc->l4i_chk = 0;
	desc->byte_cnt = length;
	desc->buf_ptr = dma_map_single(dev->dev.parent, data,
				       length, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
		WARN(1, "dma_map_single failed!\n");
		return -ENOMEM;
	}

	cmd_sts = BUFFER_OWNED_BY_DMA;
	if (last_tcp) {
		/* last descriptor in the TCP packet */
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
		/* last descriptor in SKB */
		if (is_last)
			cmd_sts |= TX_ENABLE_INTERRUPT;
	}
	desc->cmd_sts = cmd_sts;
	return 0;
}

static inline void
txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int tx_index;
	struct tx_desc *desc;
	int ret;
	u32 cmd_csum = 0;
	u16 l4i_chk = 0;

	tx_index = txq->tx_curr_desc;
	desc = &txq->tx_desc_area[tx_index];

	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
	if (ret)
		WARN(1, "failed to prepare checksum!");

	/* Should we set this? Can't use the value from skb_tx_csum()
	 * as it's not the correct initial L4 checksum to use. */
	desc->l4i_chk = 0;

	desc->byte_cnt = hdr_len;
	desc->buf_ptr = txq->tso_hdrs_dma +
			txq->tx_curr_desc * TSO_HEADER_SIZE;
	desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
				   GEN_CRC;

	txq->tx_curr_desc++;
	if (txq->tx_curr_desc == txq->tx_ring_size)
		txq->tx_curr_desc = 0;
}

static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
			  struct net_device *dev)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int total_len, data_left, ret;
	int desc_count = 0;
	struct tso_t tso;
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	/* Count needed descriptors */
	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
		netdev_dbg(dev, "not enough descriptors for TSO!\n");
		return -EBUSY;
	}

	/* Initialize the TSO handler, and prepare the first payload */
	tso_start(skb, &tso);

	total_len = skb->len - hdr_len;
	while (total_len > 0) {
		char *hdr;

		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
		total_len -= data_left;
		desc_count++;

		/* prepare packet headers: MAC + IP + TCP */
		hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
		txq_put_hdr_tso(skb, txq, data_left);

		while (data_left > 0) {
			int size;
			desc_count++;

			size = min_t(int, tso.size, data_left);
			ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
					       size == data_left,
					       total_len == 0);
			if (ret)
				goto err_release;
			data_left -= size;
			tso_build_data(skb, &tso, size);
		}
	}

	__skb_queue_tail(&txq->tx_skb, skb);
	skb_tx_timestamp(skb);

	/* clear TX_END status */
	mp->work_tx_end &= ~(1 << txq->index);

	/* ensure all descriptors are written before poking hardware */
	wmb();
	txq_enable(txq);
	txq->tx_desc_count += desc_count;
	return 0;
err_release:
	/* TODO: Release all used data descriptors; header descriptors must not
	 * be DMA-unmapped.
	 */
	return ret;
}

872
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
873
{
874
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
875
	int nr_frags = skb_shinfo(skb)->nr_frags;
876
	int frag;
L
Linus Torvalds 已提交
877

878 879 880 881
	for (frag = 0; frag < nr_frags; frag++) {
		skb_frag_t *this_frag;
		int tx_index;
		struct tx_desc *desc;
882
		void *addr;
883 884

		this_frag = &skb_shinfo(skb)->frags[frag];
885
		addr = page_address(this_frag->page.p) + this_frag->page_offset;
886 887 888
		tx_index = txq->tx_curr_desc++;
		if (txq->tx_curr_desc == txq->tx_ring_size)
			txq->tx_curr_desc = 0;
889 890 891 892 893 894 895 896 897 898 899 900 901 902
		desc = &txq->tx_desc_area[tx_index];

		/*
		 * The last fragment will generate an interrupt
		 * which will free the skb on TX completion.
		 */
		if (frag == nr_frags - 1) {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA |
					ZERO_PADDING | TX_LAST_DESC |
					TX_ENABLE_INTERRUPT;
		} else {
			desc->cmd_sts = BUFFER_OWNED_BY_DMA;
		}

903
		desc->l4i_chk = 0;
E
Eric Dumazet 已提交
904
		desc->byte_cnt = skb_frag_size(this_frag);
905 906
		desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
					       desc->byte_cnt, DMA_TO_DEVICE);
907
	}
L
Linus Torvalds 已提交
908 909
}

910 911
static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
			  struct net_device *dev)
L
Linus Torvalds 已提交
912
{
913
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
914
	int nr_frags = skb_shinfo(skb)->nr_frags;
915
	int tx_index;
916
	struct tx_desc *desc;
917
	u32 cmd_sts;
918
	u16 l4i_chk;
919
	int length, ret;
L
Linus Torvalds 已提交
920

921
	cmd_sts = 0;
922
	l4i_chk = 0;
923

924 925 926 927 928 929
	if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
		if (net_ratelimit())
			netdev_err(dev, "tx queue full?!\n");
		return -EBUSY;
	}

930
	ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
931
	if (ret)
932 933
		return ret;
	cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
934

935 936 937
	tx_index = txq->tx_curr_desc++;
	if (txq->tx_curr_desc == txq->tx_ring_size)
		txq->tx_curr_desc = 0;
938 939 940 941 942 943 944 945 946 947 948 949
	desc = &txq->tx_desc_area[tx_index];

	if (nr_frags) {
		txq_submit_frag_skb(txq, skb);
		length = skb_headlen(skb);
	} else {
		cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
		length = skb->len;
	}

	desc->l4i_chk = l4i_chk;
	desc->byte_cnt = length;
950 951
	desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
				       length, DMA_TO_DEVICE);
952

953 954
	__skb_queue_tail(&txq->tx_skb, skb);

955 956
	skb_tx_timestamp(skb);

957 958 959 960
	/* ensure all other descriptors are written before first cmd_sts */
	wmb();
	desc->cmd_sts = cmd_sts;

961 962
	/* clear TX_END status */
	mp->work_tx_end &= ~(1 << txq->index);
963

964 965
	/* ensure all descriptors are written before poking hardware */
	wmb();
966
	txq_enable(txq);
967

968
	txq->tx_desc_count += nr_frags + 1;
969 970

	return 0;
L
Linus Torvalds 已提交
971 972
}

D
Denis Kirjanov 已提交
973
static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
L
Linus Torvalds 已提交
974
{
975
	struct mv643xx_eth_private *mp = netdev_priv(dev);
976
	int length, queue, ret;
977
	struct tx_queue *txq;
978
	struct netdev_queue *nq;
979

980 981 982 983
	queue = skb_get_queue_mapping(skb);
	txq = mp->txq + queue;
	nq = netdev_get_tx_queue(dev, queue);

984
	if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
985 986
		netdev_printk(KERN_DEBUG, dev,
			      "failed to linearize skb with tiny unaligned fragment\n");
987 988 989
		return NETDEV_TX_BUSY;
	}

990 991
	length = skb->len;

992 993 994
	if (skb_is_gso(skb))
		ret = txq_submit_tso(txq, skb, dev);
	else
995
		ret = txq_submit_skb(txq, skb, dev);
996
	if (!ret) {
997
		txq->tx_bytes += length;
998
		txq->tx_packets++;
999

1000
		if (txq->tx_desc_count >= txq->tx_stop_threshold)
1001
			netif_tx_stop_queue(nq);
1002 1003 1004
	} else {
		txq->tx_dropped++;
		dev_kfree_skb_any(skb);
1005
	}
1006 1007

	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
1008 1009
}

1010

1011 1012 1013 1014
/* tx napi ******************************************************************/
static void txq_kick(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1015
	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1016 1017 1018
	u32 hw_desc_ptr;
	u32 expected_ptr;

1019
	__netif_tx_lock(nq, smp_processor_id());
1020

1021
	if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1022 1023
		goto out;

1024
	hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1025 1026 1027 1028 1029 1030 1031
	expected_ptr = (u32)txq->tx_desc_dma +
				txq->tx_curr_desc * sizeof(struct tx_desc);

	if (hw_desc_ptr != expected_ptr)
		txq_enable(txq);

out:
1032
	__netif_tx_unlock(nq);
1033 1034 1035 1036 1037 1038 1039

	mp->work_tx_end &= ~(1 << txq->index);
}

static int txq_reclaim(struct tx_queue *txq, int budget, int force)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
1040
	struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1041 1042
	int reclaimed;

1043
	__netif_tx_lock_bh(nq);
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

	reclaimed = 0;
	while (reclaimed < budget && txq->tx_desc_count > 0) {
		int tx_index;
		struct tx_desc *desc;
		u32 cmd_sts;
		struct sk_buff *skb;

		tx_index = txq->tx_used_desc;
		desc = &txq->tx_desc_area[tx_index];
		cmd_sts = desc->cmd_sts;

		if (cmd_sts & BUFFER_OWNED_BY_DMA) {
			if (!force)
				break;
			desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
		}

		txq->tx_used_desc = tx_index + 1;
		if (txq->tx_used_desc == txq->tx_ring_size)
			txq->tx_used_desc = 0;

		reclaimed++;
		txq->tx_desc_count--;

1069 1070 1071
		skb = NULL;
		if (cmd_sts & TX_LAST_DESC)
			skb = __skb_dequeue(&txq->tx_skb);
1072 1073

		if (cmd_sts & ERROR_SUMMARY) {
1074
			netdev_info(mp->dev, "tx error\n");
1075 1076 1077
			mp->dev->stats.tx_errors++;
		}

1078 1079 1080
		if (!IS_TSO_HEADER(txq, desc->buf_ptr))
			dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
					 desc->byte_cnt, DMA_TO_DEVICE);
E
Eric Dumazet 已提交
1081
		dev_kfree_skb(skb);
1082 1083
	}

1084
	__netif_tx_unlock_bh(nq);
1085

1086 1087 1088 1089 1090 1091 1092
	if (reclaimed < budget)
		mp->work_tx &= ~(1 << txq->index);

	return reclaimed;
}


1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
/* tx rate control **********************************************************/
/*
 * Set total maximum TX rate (shared by all TX queues for this port)
 * to 'rate' bits per second, with a maximum burst of 'burst' bytes.
 */
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
	int token_rate;
	int mtu;
	int bucket_size;

1104
	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
	if (token_rate > 1023)
		token_rate = 1023;

	mtu = (mp->dev->mtu + 255) >> 8;
	if (mtu > 63)
		mtu = 63;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

1116 1117
	switch (mp->shared->tx_bw_control) {
	case TX_BW_CONTROL_OLD_LAYOUT:
1118 1119 1120
		wrlp(mp, TX_BW_RATE, token_rate);
		wrlp(mp, TX_BW_MTU, mtu);
		wrlp(mp, TX_BW_BURST, bucket_size);
1121 1122
		break;
	case TX_BW_CONTROL_NEW_LAYOUT:
1123 1124 1125
		wrlp(mp, TX_BW_RATE_MOVED, token_rate);
		wrlp(mp, TX_BW_MTU_MOVED, mtu);
		wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1126
		break;
1127
	}
1128 1129 1130 1131 1132 1133 1134 1135
}

static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int token_rate;
	int bucket_size;

1136
	token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1137 1138 1139 1140 1141 1142 1143
	if (token_rate > 1023)
		token_rate = 1023;

	bucket_size = (burst + 255) >> 8;
	if (bucket_size > 65535)
		bucket_size = 65535;

1144 1145
	wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
	wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
}

static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
	int off;
	u32 val;

	/*
	 * Turn on fixed priority mode.
	 */
1157 1158 1159
	off = 0;
	switch (mp->shared->tx_bw_control) {
	case TX_BW_CONTROL_OLD_LAYOUT:
1160
		off = TXQ_FIX_PRIO_CONF;
1161 1162
		break;
	case TX_BW_CONTROL_NEW_LAYOUT:
1163
		off = TXQ_FIX_PRIO_CONF_MOVED;
1164 1165
		break;
	}
1166

1167
	if (off) {
1168
		val = rdlp(mp, off);
1169
		val |= 1 << txq->index;
1170
		wrlp(mp, off, val);
1171
	}
1172 1173 1174
}


1175
/* mii management interface *************************************************/
1176
static void mv643xx_eth_adjust_link(struct net_device *dev)
1177
{
1178
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
	u32 autoneg_disable = FORCE_LINK_PASS |
	             DISABLE_AUTO_NEG_SPEED_GMII |
		     DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
		     DISABLE_AUTO_NEG_FOR_DUPLEX;

	if (mp->phy->autoneg == AUTONEG_ENABLE) {
		/* enable auto negotiation */
		pscr &= ~autoneg_disable;
		goto out_write;
	}

	pscr |= autoneg_disable;

	if (mp->phy->speed == SPEED_1000) {
		/* force gigabit, half duplex not supported */
		pscr |= SET_GMII_SPEED_TO_1000;
		pscr |= SET_FULL_DUPLEX_MODE;
		goto out_write;
	}

	pscr &= ~SET_GMII_SPEED_TO_1000;

	if (mp->phy->speed == SPEED_100)
		pscr |= SET_MII_SPEED_TO_100;
	else
		pscr &= ~SET_MII_SPEED_TO_100;

	if (mp->phy->duplex == DUPLEX_FULL)
		pscr |= SET_FULL_DUPLEX_MODE;
	else
		pscr &= ~SET_FULL_DUPLEX_MODE;

out_write:
	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
/* statistics ***************************************************************/
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	struct net_device_stats *stats = &dev->stats;
	unsigned long tx_packets = 0;
	unsigned long tx_bytes = 0;
	unsigned long tx_dropped = 0;
	int i;

	for (i = 0; i < mp->txq_count; i++) {
		struct tx_queue *txq = mp->txq + i;

		tx_packets += txq->tx_packets;
		tx_bytes += txq->tx_bytes;
		tx_dropped += txq->tx_dropped;
	}

	stats->tx_packets = tx_packets;
	stats->tx_bytes = tx_bytes;
	stats->tx_dropped = tx_dropped;

	return stats;
}

L
Lennert Buytenhek 已提交
1241
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1242
{
L
Lennert Buytenhek 已提交
1243
	return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
L
Linus Torvalds 已提交
1244 1245
}

L
Lennert Buytenhek 已提交
1246
static void mib_counters_clear(struct mv643xx_eth_private *mp)
1247
{
L
Lennert Buytenhek 已提交
1248 1249 1250 1251
	int i;

	for (i = 0; i < 0x80; i += 4)
		mib_read(mp, i);
1252 1253 1254 1255

	/* Clear non MIB hw counters also */
	rdlp(mp, RX_DISCARD_FRAME_CNT);
	rdlp(mp, RX_OVERRUN_FRAME_CNT);
1256
}
1257

L
Lennert Buytenhek 已提交
1258
static void mib_counters_update(struct mv643xx_eth_private *mp)
1259
{
1260
	struct mib_counters *p = &mp->mib_counters;
1261

1262
	spin_lock_bh(&mp->mib_counters_lock);
L
Lennert Buytenhek 已提交
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
	p->good_octets_received += mib_read(mp, 0x00);
	p->bad_octets_received += mib_read(mp, 0x08);
	p->internal_mac_transmit_err += mib_read(mp, 0x0c);
	p->good_frames_received += mib_read(mp, 0x10);
	p->bad_frames_received += mib_read(mp, 0x14);
	p->broadcast_frames_received += mib_read(mp, 0x18);
	p->multicast_frames_received += mib_read(mp, 0x1c);
	p->frames_64_octets += mib_read(mp, 0x20);
	p->frames_65_to_127_octets += mib_read(mp, 0x24);
	p->frames_128_to_255_octets += mib_read(mp, 0x28);
	p->frames_256_to_511_octets += mib_read(mp, 0x2c);
	p->frames_512_to_1023_octets += mib_read(mp, 0x30);
	p->frames_1024_to_max_octets += mib_read(mp, 0x34);
	p->good_octets_sent += mib_read(mp, 0x38);
	p->good_frames_sent += mib_read(mp, 0x40);
	p->excessive_collision += mib_read(mp, 0x44);
	p->multicast_frames_sent += mib_read(mp, 0x48);
	p->broadcast_frames_sent += mib_read(mp, 0x4c);
	p->unrec_mac_control_received += mib_read(mp, 0x50);
	p->fc_sent += mib_read(mp, 0x54);
	p->good_fc_received += mib_read(mp, 0x58);
	p->bad_fc_received += mib_read(mp, 0x5c);
	p->undersize_received += mib_read(mp, 0x60);
	p->fragments_received += mib_read(mp, 0x64);
	p->oversize_received += mib_read(mp, 0x68);
	p->jabber_received += mib_read(mp, 0x6c);
	p->mac_receive_error += mib_read(mp, 0x70);
	p->bad_crc_event += mib_read(mp, 0x74);
	p->collision += mib_read(mp, 0x78);
	p->late_collision += mib_read(mp, 0x7c);
1293 1294 1295
	/* Non MIB hardware counters */
	p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
	p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1296
	spin_unlock_bh(&mp->mib_counters_lock);
1297 1298 1299 1300 1301 1302
}

static void mib_counters_timer_wrapper(unsigned long _mp)
{
	struct mv643xx_eth_private *mp = (void *)_mp;
	mib_counters_update(mp);
1303
	mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1304 1305
}

1306

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
/* interrupt coalescing *****************************************************/
/*
 * Hardware coalescing parameters are set in units of 64 t_clk
 * cycles.  I.e.:
 *
 *	coal_delay_in_usec = 64000000 * register_value / t_clk_rate
 *
 *	register_value = coal_delay_in_usec * t_clk_rate / 64000000
 *
 * In the ->set*() methods, we round the computed register value
 * to the nearest integer.
 */
static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
{
	u32 val = rdlp(mp, SDMA_CONFIG);
	u64 temp;

	if (mp->shared->extended_rx_coal_limit)
		temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
	else
		temp = (val & 0x003fff00) >> 8;

	temp *= 64000000;
1330
	do_div(temp, mp->t_clk);
1331 1332 1333 1334 1335 1336 1337 1338 1339

	return (unsigned int)temp;
}

static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
	u64 temp;
	u32 val;

1340
	temp = (u64)usec * mp->t_clk;
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
	temp += 31999999;
	do_div(temp, 64000000);

	val = rdlp(mp, SDMA_CONFIG);
	if (mp->shared->extended_rx_coal_limit) {
		if (temp > 0xffff)
			temp = 0xffff;
		val &= ~0x023fff80;
		val |= (temp & 0x8000) << 10;
		val |= (temp & 0x7fff) << 7;
	} else {
		if (temp > 0x3fff)
			temp = 0x3fff;
		val &= ~0x003fff00;
		val |= (temp & 0x3fff) << 8;
	}
	wrlp(mp, SDMA_CONFIG, val);
}

static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
{
	u64 temp;

	temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
	temp *= 64000000;
1366
	do_div(temp, mp->t_clk);
1367 1368 1369 1370 1371 1372 1373 1374

	return (unsigned int)temp;
}

static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
	u64 temp;

1375
	temp = (u64)usec * mp->t_clk;
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	temp += 31999999;
	do_div(temp, 64000000);

	if (temp > 0x3fff)
		temp = 0x3fff;

	wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
}


1386
/* ethtool ******************************************************************/
1387
struct mv643xx_eth_stats {
1388 1389
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
1390 1391
	int netdev_off;
	int mp_off;
1392 1393
};

1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
#define SSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct net_device_stats, m),		\
	  offsetof(struct net_device, stats.m), -1 }

#define MIBSTAT(m)						\
	{ #m, FIELD_SIZEOF(struct mib_counters, m),		\
	  -1, offsetof(struct mv643xx_eth_private, mib_counters.m) }

static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
	SSTAT(rx_packets),
	SSTAT(tx_packets),
	SSTAT(rx_bytes),
	SSTAT(tx_bytes),
	SSTAT(rx_errors),
	SSTAT(tx_errors),
	SSTAT(rx_dropped),
	SSTAT(tx_dropped),
	MIBSTAT(good_octets_received),
	MIBSTAT(bad_octets_received),
	MIBSTAT(internal_mac_transmit_err),
	MIBSTAT(good_frames_received),
	MIBSTAT(bad_frames_received),
	MIBSTAT(broadcast_frames_received),
	MIBSTAT(multicast_frames_received),
	MIBSTAT(frames_64_octets),
	MIBSTAT(frames_65_to_127_octets),
	MIBSTAT(frames_128_to_255_octets),
	MIBSTAT(frames_256_to_511_octets),
	MIBSTAT(frames_512_to_1023_octets),
	MIBSTAT(frames_1024_to_max_octets),
	MIBSTAT(good_octets_sent),
	MIBSTAT(good_frames_sent),
	MIBSTAT(excessive_collision),
	MIBSTAT(multicast_frames_sent),
	MIBSTAT(broadcast_frames_sent),
	MIBSTAT(unrec_mac_control_received),
	MIBSTAT(fc_sent),
	MIBSTAT(good_fc_received),
	MIBSTAT(bad_fc_received),
	MIBSTAT(undersize_received),
	MIBSTAT(fragments_received),
	MIBSTAT(oversize_received),
	MIBSTAT(jabber_received),
	MIBSTAT(mac_receive_error),
	MIBSTAT(bad_crc_event),
	MIBSTAT(collision),
	MIBSTAT(late_collision),
1441 1442
	MIBSTAT(rx_discard),
	MIBSTAT(rx_overrun),
1443 1444
};

L
Lennert Buytenhek 已提交
1445
static int
1446 1447
mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp,
			     struct ethtool_cmd *cmd)
1448 1449 1450
{
	int err;

1451 1452 1453
	err = phy_read_status(mp->phy);
	if (err == 0)
		err = phy_ethtool_gset(mp->phy, cmd);
1454

L
Lennert Buytenhek 已提交
1455 1456 1457
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
1458 1459 1460 1461 1462 1463
	cmd->supported &= ~SUPPORTED_1000baseT_Half;
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

	return err;
}

L
Lennert Buytenhek 已提交
1464
static int
1465
mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
L
Lennert Buytenhek 已提交
1466
				 struct ethtool_cmd *cmd)
1467
{
1468 1469
	u32 port_status;

1470
	port_status = rdlp(mp, PORT_STATUS);
1471

1472 1473
	cmd->supported = SUPPORTED_MII;
	cmd->advertising = ADVERTISED_MII;
1474 1475
	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
1476
		ethtool_cmd_speed_set(cmd, SPEED_10);
1477 1478
		break;
	case PORT_SPEED_100:
1479
		ethtool_cmd_speed_set(cmd, SPEED_100);
1480 1481
		break;
	case PORT_SPEED_1000:
1482
		ethtool_cmd_speed_set(cmd, SPEED_1000);
1483 1484 1485 1486 1487 1488
		break;
	default:
		cmd->speed = -1;
		break;
	}
	cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
	cmd->port = PORT_MII;
	cmd->phy_address = 0;
	cmd->transceiver = XCVR_INTERNAL;
	cmd->autoneg = AUTONEG_DISABLE;
	cmd->maxtxpkt = 1;
	cmd->maxrxpkt = 1;

	return 0;
}

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
static void
mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	wol->supported = 0;
	wol->wolopts = 0;
	if (mp->phy)
		phy_ethtool_get_wol(mp->phy, wol);
}

static int
mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
	int err;

	if (mp->phy == NULL)
		return -EOPNOTSUPP;

	err = phy_ethtool_set_wol(mp->phy, wol);
	/* Given that mv643xx_eth works without the marvell-specific PHY driver,
	 * this debugging hint is useful to have.
	 */
	if (err == -EOPNOTSUPP)
		netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
	return err;
}

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
static int
mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	if (mp->phy != NULL)
		return mv643xx_eth_get_settings_phy(mp, cmd);
	else
		return mv643xx_eth_get_settings_phyless(mp, cmd);
}

L
Lennert Buytenhek 已提交
1538 1539
static int
mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
L
Linus Torvalds 已提交
1540
{
1541
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1542
	int ret;
1543

1544 1545 1546
	if (mp->phy == NULL)
		return -EINVAL;

L
Lennert Buytenhek 已提交
1547 1548 1549 1550 1551
	/*
	 * The MAC does not support 1000baseT_Half.
	 */
	cmd->advertising &= ~ADVERTISED_1000baseT_Half;

1552 1553
	ret = phy_ethtool_sset(mp->phy, cmd);
	if (!ret)
1554
		mv643xx_eth_adjust_link(dev);
1555
	return ret;
1556
}
L
Linus Torvalds 已提交
1557

L
Lennert Buytenhek 已提交
1558 1559
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
1560
{
A
Axel Lin 已提交
1561 1562
	strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
		sizeof(drvinfo->driver));
1563
	strlcpy(drvinfo->version, mv643xx_eth_driver_version,
A
Axel Lin 已提交
1564 1565 1566
		sizeof(drvinfo->version));
	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
1567
	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
1568
}
L
Linus Torvalds 已提交
1569

L
Lennert Buytenhek 已提交
1570
static int mv643xx_eth_nway_reset(struct net_device *dev)
1571
{
1572
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
1573

1574 1575
	if (mp->phy == NULL)
		return -EINVAL;
L
Linus Torvalds 已提交
1576

1577
	return genphy_restart_aneg(mp->phy);
1578 1579
}

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
static int
mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	ec->rx_coalesce_usecs = get_rx_coal(mp);
	ec->tx_coalesce_usecs = get_tx_coal(mp);

	return 0;
}

static int
mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	set_rx_coal(mp, ec->rx_coalesce_usecs);
	set_tx_coal(mp, ec->tx_coalesce_usecs);

	return 0;
}

1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
static void
mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	er->rx_max_pending = 4096;
	er->tx_max_pending = 4096;

	er->rx_pending = mp->rx_ring_size;
	er->tx_pending = mp->tx_ring_size;
}

static int
mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	if (er->rx_mini_pending || er->rx_jumbo_pending)
		return -EINVAL;

	mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
1623 1624 1625 1626 1627
	mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
				   MV643XX_MAX_SKB_DESCS * 2, 4096);
	if (mp->tx_ring_size != er->tx_pending)
		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
			    mp->tx_ring_size, er->tx_pending);
1628 1629 1630 1631

	if (netif_running(dev)) {
		mv643xx_eth_stop(dev);
		if (mv643xx_eth_open(dev)) {
1632 1633
			netdev_err(dev,
				   "fatal error on re-opening device after ring param change\n");
1634 1635 1636 1637 1638 1639 1640
			return -ENOMEM;
		}
	}

	return 0;
}

1641 1642

static int
1643
mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
1644 1645
{
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1646
	bool rx_csum = features & NETIF_F_RXCSUM;
1647 1648 1649 1650 1651 1652

	wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);

	return 0;
}

L
Lennert Buytenhek 已提交
1653 1654
static void mv643xx_eth_get_strings(struct net_device *dev,
				    uint32_t stringset, uint8_t *data)
1655 1656
{
	int i;
L
Linus Torvalds 已提交
1657

L
Lennert Buytenhek 已提交
1658 1659
	if (stringset == ETH_SS_STATS) {
		for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1660
			memcpy(data + i * ETH_GSTRING_LEN,
1661
				mv643xx_eth_stats[i].stat_string,
1662
				ETH_GSTRING_LEN);
1663 1664 1665
		}
	}
}
L
Linus Torvalds 已提交
1666

L
Lennert Buytenhek 已提交
1667 1668 1669
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
					  struct ethtool_stats *stats,
					  uint64_t *data)
1670
{
1671
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1672
	int i;
L
Linus Torvalds 已提交
1673

1674
	mv643xx_eth_get_stats(dev);
L
Lennert Buytenhek 已提交
1675
	mib_counters_update(mp);
L
Linus Torvalds 已提交
1676

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
		const struct mv643xx_eth_stats *stat;
		void *p;

		stat = mv643xx_eth_stats + i;

		if (stat->netdev_off >= 0)
			p = ((void *)mp->dev) + stat->netdev_off;
		else
			p = ((void *)mp) + stat->mp_off;

		data[i] = (stat->sizeof_stat == 8) ?
				*(uint64_t *)p : *(uint32_t *)p;
L
Linus Torvalds 已提交
1690
	}
1691
}
L
Linus Torvalds 已提交
1692

L
Lennert Buytenhek 已提交
1693
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1694
{
L
Lennert Buytenhek 已提交
1695
	if (sset == ETH_SS_STATS)
1696
		return ARRAY_SIZE(mv643xx_eth_stats);
L
Lennert Buytenhek 已提交
1697 1698

	return -EOPNOTSUPP;
1699
}
L
Linus Torvalds 已提交
1700

1701
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
L
Lennert Buytenhek 已提交
1702 1703 1704 1705
	.get_settings		= mv643xx_eth_get_settings,
	.set_settings		= mv643xx_eth_set_settings,
	.get_drvinfo		= mv643xx_eth_get_drvinfo,
	.nway_reset		= mv643xx_eth_nway_reset,
1706
	.get_link		= ethtool_op_get_link,
1707 1708
	.get_coalesce		= mv643xx_eth_get_coalesce,
	.set_coalesce		= mv643xx_eth_set_coalesce,
1709 1710
	.get_ringparam		= mv643xx_eth_get_ringparam,
	.set_ringparam		= mv643xx_eth_set_ringparam,
L
Lennert Buytenhek 已提交
1711 1712
	.get_strings		= mv643xx_eth_get_strings,
	.get_ethtool_stats	= mv643xx_eth_get_ethtool_stats,
1713
	.get_sset_count		= mv643xx_eth_get_sset_count,
1714
	.get_ts_info		= ethtool_op_get_ts_info,
1715 1716
	.get_wol                = mv643xx_eth_get_wol,
	.set_wol                = mv643xx_eth_set_wol,
1717
};
L
Linus Torvalds 已提交
1718

1719

1720
/* address handling *********************************************************/
1721
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1722
{
1723 1724
	unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
	unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
L
Linus Torvalds 已提交
1725

1726 1727 1728 1729 1730 1731
	addr[0] = (mac_h >> 24) & 0xff;
	addr[1] = (mac_h >> 16) & 0xff;
	addr[2] = (mac_h >> 8) & 0xff;
	addr[3] = mac_h & 0xff;
	addr[4] = (mac_l >> 8) & 0xff;
	addr[5] = mac_l & 0xff;
1732
}
L
Linus Torvalds 已提交
1733

1734
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1735
{
1736 1737 1738
	wrlp(mp, MAC_ADDR_HIGH,
		(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
	wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1739
}
1740

1741
static u32 uc_addr_filter_mask(struct net_device *dev)
1742
{
J
Jiri Pirko 已提交
1743
	struct netdev_hw_addr *ha;
1744
	u32 nibbles;
L
Linus Torvalds 已提交
1745

1746 1747
	if (dev->flags & IFF_PROMISC)
		return 0;
L
Linus Torvalds 已提交
1748

1749
	nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1750
	netdev_for_each_uc_addr(ha, dev) {
J
Jiri Pirko 已提交
1751
		if (memcmp(dev->dev_addr, ha->addr, 5))
1752
			return 0;
J
Jiri Pirko 已提交
1753
		if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
1754
			return 0;
1755

J
Jiri Pirko 已提交
1756
		nibbles |= 1 << (ha->addr[5] & 0x0f);
1757
	}
L
Linus Torvalds 已提交
1758

1759
	return nibbles;
L
Linus Torvalds 已提交
1760 1761
}

1762
static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
L
Linus Torvalds 已提交
1763
{
1764
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1765 1766 1767
	u32 port_config;
	u32 nibbles;
	int i;
L
Linus Torvalds 已提交
1768

1769
	uc_addr_set(mp, dev->dev_addr);
L
Linus Torvalds 已提交
1770

1771 1772
	port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;

1773 1774 1775
	nibbles = uc_addr_filter_mask(dev);
	if (!nibbles) {
		port_config |= UNICAST_PROMISCUOUS_MODE;
1776
		nibbles = 0xffff;
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
	}

	for (i = 0; i < 16; i += 4) {
		int off = UNICAST_TABLE(mp->port_num) + i;
		u32 v;

		v = 0;
		if (nibbles & 1)
			v |= 0x00000001;
		if (nibbles & 2)
			v |= 0x00000100;
		if (nibbles & 4)
			v |= 0x00010000;
		if (nibbles & 8)
			v |= 0x01000000;
		nibbles >>= 4;

		wrl(mp, off, v);
	}

	wrlp(mp, PORT_CONFIG, port_config);
L
Linus Torvalds 已提交
1798 1799
}

1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
static int addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < 6; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

1818
static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
L
Linus Torvalds 已提交
1819
{
L
Lennert Buytenhek 已提交
1820
	struct mv643xx_eth_private *mp = netdev_priv(dev);
1821 1822
	u32 *mc_spec;
	u32 *mc_other;
1823
	struct netdev_hw_addr *ha;
L
Lennert Buytenhek 已提交
1824
	int i;
1825

L
Lennert Buytenhek 已提交
1826
	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1827 1828
		int port_num;
		u32 accept;
1829

1830 1831 1832
oom:
		port_num = mp->port_num;
		accept = 0x01010101;
L
Lennert Buytenhek 已提交
1833 1834 1835
		for (i = 0; i < 0x100; i += 4) {
			wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
			wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1836 1837 1838
		}
		return;
	}
1839

1840
	mc_spec = kmalloc(0x200, GFP_ATOMIC);
1841 1842 1843 1844 1845 1846
	if (mc_spec == NULL)
		goto oom;
	mc_other = mc_spec + (0x100 >> 2);

	memset(mc_spec, 0, 0x100);
	memset(mc_other, 0, 0x100);
L
Linus Torvalds 已提交
1847

1848 1849
	netdev_for_each_mc_addr(ha, dev) {
		u8 *a = ha->addr;
1850 1851
		u32 *table;
		int entry;
L
Linus Torvalds 已提交
1852

L
Lennert Buytenhek 已提交
1853
		if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1854 1855
			table = mc_spec;
			entry = a[5];
L
Lennert Buytenhek 已提交
1856
		} else {
1857 1858
			table = mc_other;
			entry = addr_crc(a);
L
Lennert Buytenhek 已提交
1859
		}
1860

1861
		table[entry >> 2] |= 1 << (8 * (entry & 3));
L
Lennert Buytenhek 已提交
1862
	}
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881

	for (i = 0; i < 0x100; i += 4) {
		wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
		wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
	}

	kfree(mc_spec);
}

static void mv643xx_eth_set_rx_mode(struct net_device *dev)
{
	mv643xx_eth_program_unicast_filter(dev);
	mv643xx_eth_program_multicast_filter(dev);
}

static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
{
	struct sockaddr *sa = addr;

1882
	if (!is_valid_ether_addr(sa->sa_data))
1883
		return -EADDRNOTAVAIL;
1884

1885 1886 1887 1888 1889 1890 1891
	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);

	netif_addr_lock_bh(dev);
	mv643xx_eth_program_unicast_filter(dev);
	netif_addr_unlock_bh(dev);

	return 0;
1892
}
1893 1894


1895
/* rx/tx queue initialisation ***********************************************/
1896
static int rxq_init(struct mv643xx_eth_private *mp, int index)
1897
{
1898
	struct rx_queue *rxq = mp->rxq + index;
1899 1900
	struct rx_desc *rx_desc;
	int size;
1901 1902
	int i;

1903 1904
	rxq->index = index;

1905
	rxq->rx_ring_size = mp->rx_ring_size;
1906 1907 1908 1909 1910 1911 1912

	rxq->rx_desc_count = 0;
	rxq->rx_curr_desc = 0;
	rxq->rx_used_desc = 0;

	size = rxq->rx_ring_size * sizeof(struct rx_desc);

1913
	if (index == 0 && size <= mp->rx_desc_sram_size) {
1914 1915 1916 1917
		rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
						mp->rx_desc_sram_size);
		rxq->rx_desc_dma = mp->rx_desc_sram_addr;
	} else {
1918 1919 1920
		rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
						       size, &rxq->rx_desc_dma,
						       GFP_KERNEL);
1921 1922
	}

1923
	if (rxq->rx_desc_area == NULL) {
1924
		netdev_err(mp->dev,
1925 1926 1927 1928
			   "can't allocate rx ring (%d bytes)\n", size);
		goto out;
	}
	memset(rxq->rx_desc_area, 0, size);
L
Linus Torvalds 已提交
1929

1930
	rxq->rx_desc_area_size = size;
1931
	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
1932 1933
				    GFP_KERNEL);
	if (rxq->rx_skb == NULL)
1934 1935
		goto out_free;

1936
	rx_desc = rxq->rx_desc_area;
1937
	for (i = 0; i < rxq->rx_ring_size; i++) {
1938 1939 1940 1941 1942 1943
		int nexti;

		nexti = i + 1;
		if (nexti == rxq->rx_ring_size)
			nexti = 0;

1944 1945 1946 1947 1948 1949 1950 1951
		rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
					nexti * sizeof(struct rx_desc);
	}

	return 0;


out_free:
1952
	if (index == 0 && size <= mp->rx_desc_sram_size)
1953 1954
		iounmap(rxq->rx_desc_area);
	else
1955
		dma_free_coherent(mp->dev->dev.parent, size,
1956 1957 1958 1959 1960
				  rxq->rx_desc_area,
				  rxq->rx_desc_dma);

out:
	return -ENOMEM;
1961
}
1962

1963
static void rxq_deinit(struct rx_queue *rxq)
1964
{
1965 1966 1967 1968
	struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
	int i;

	rxq_disable(rxq);
1969

1970 1971 1972 1973
	for (i = 0; i < rxq->rx_ring_size; i++) {
		if (rxq->rx_skb[i]) {
			dev_kfree_skb(rxq->rx_skb[i]);
			rxq->rx_desc_count--;
L
Linus Torvalds 已提交
1974
		}
1975
	}
L
Linus Torvalds 已提交
1976

1977
	if (rxq->rx_desc_count) {
1978
		netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
1979 1980 1981
			   rxq->rx_desc_count);
	}

1982
	if (rxq->index == 0 &&
1983
	    rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1984
		iounmap(rxq->rx_desc_area);
1985
	else
1986
		dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
1987 1988 1989
				  rxq->rx_desc_area, rxq->rx_desc_dma);

	kfree(rxq->rx_skb);
1990
}
L
Linus Torvalds 已提交
1991

1992
static int txq_init(struct mv643xx_eth_private *mp, int index)
1993
{
1994
	struct tx_queue *txq = mp->txq + index;
1995 1996
	struct tx_desc *tx_desc;
	int size;
1997
	int i;
L
Linus Torvalds 已提交
1998

1999 2000
	txq->index = index;

2001
	txq->tx_ring_size = mp->tx_ring_size;
2002

2003 2004 2005 2006 2007 2008 2009
	/* A queue must always have room for at least one skb.
	 * Therefore, stop the queue when the free entries reaches
	 * the maximum number of descriptors per skb.
	 */
	txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;

2010 2011 2012 2013 2014 2015
	txq->tx_desc_count = 0;
	txq->tx_curr_desc = 0;
	txq->tx_used_desc = 0;

	size = txq->tx_ring_size * sizeof(struct tx_desc);

2016
	if (index == 0 && size <= mp->tx_desc_sram_size) {
2017 2018 2019 2020
		txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
						mp->tx_desc_sram_size);
		txq->tx_desc_dma = mp->tx_desc_sram_addr;
	} else {
2021 2022 2023
		txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
						       size, &txq->tx_desc_dma,
						       GFP_KERNEL);
2024 2025 2026
	}

	if (txq->tx_desc_area == NULL) {
2027
		netdev_err(mp->dev,
2028
			   "can't allocate tx ring (%d bytes)\n", size);
2029
		return -ENOMEM;
2030
	}
2031 2032 2033 2034
	memset(txq->tx_desc_area, 0, size);

	txq->tx_desc_area_size = size;

2035
	tx_desc = txq->tx_desc_area;
2036
	for (i = 0; i < txq->tx_ring_size; i++) {
2037
		struct tx_desc *txd = tx_desc + i;
2038 2039 2040 2041 2042
		int nexti;

		nexti = i + 1;
		if (nexti == txq->tx_ring_size)
			nexti = 0;
2043 2044 2045

		txd->cmd_sts = 0;
		txd->next_desc_ptr = txq->tx_desc_dma +
2046 2047 2048
					nexti * sizeof(struct tx_desc);
	}

2049 2050 2051 2052 2053 2054 2055 2056 2057
	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
	txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
					   txq->tx_ring_size * TSO_HEADER_SIZE,
					   &txq->tso_hdrs_dma, GFP_KERNEL);
	if (txq->tso_hdrs == NULL) {
		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
				  txq->tx_desc_area, txq->tx_desc_dma);
		return -ENOMEM;
	}
2058
	skb_queue_head_init(&txq->tx_skb);
2059

2060
	return 0;
2061
}
L
Linus Torvalds 已提交
2062

2063
static void txq_deinit(struct tx_queue *txq)
2064
{
2065
	struct mv643xx_eth_private *mp = txq_to_mp(txq);
2066

2067
	txq_disable(txq);
2068
	txq_reclaim(txq, txq->tx_ring_size, 1);
L
Linus Torvalds 已提交
2069

2070
	BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
L
Linus Torvalds 已提交
2071

2072
	if (txq->index == 0 &&
2073
	    txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2074
		iounmap(txq->tx_desc_area);
2075
	else
2076
		dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2077
				  txq->tx_desc_area, txq->tx_desc_dma);
2078 2079 2080 2081
	if (txq->tso_hdrs)
		dma_free_coherent(mp->dev->dev.parent,
				  txq->tx_ring_size * TSO_HEADER_SIZE,
				  txq->tso_hdrs, txq->tso_hdrs_dma);
2082
}
L
Linus Torvalds 已提交
2083 2084


2085
/* netdev ops and related ***************************************************/
2086 2087 2088 2089 2090
static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
{
	u32 int_cause;
	u32 int_cause_ext;

2091
	int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2092 2093 2094 2095
	if (int_cause == 0)
		return 0;

	int_cause_ext = 0;
2096 2097
	if (int_cause & INT_EXT) {
		int_cause &= ~INT_EXT;
2098
		int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2099
	}
2100 2101

	if (int_cause) {
2102
		wrlp(mp, INT_CAUSE, ~int_cause);
2103
		mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2104
				~(rdlp(mp, TXQ_COMMAND) & 0xff);
2105 2106 2107 2108 2109
		mp->work_rx |= (int_cause & INT_RX) >> 2;
	}

	int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
	if (int_cause_ext) {
2110
		wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
		if (int_cause_ext & INT_EXT_LINK_PHY)
			mp->work_link = 1;
		mp->work_tx |= int_cause_ext & INT_EXT_TX;
	}

	return 1;
}

static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct mv643xx_eth_private *mp = netdev_priv(dev);

	if (unlikely(!mv643xx_eth_collect_events(mp)))
		return IRQ_NONE;

2127
	wrlp(mp, INT_MASK, 0);
2128 2129 2130 2131 2132
	napi_schedule(&mp->napi);

	return IRQ_HANDLED;
}

2133 2134 2135 2136 2137 2138 2139 2140
static void handle_link_event(struct mv643xx_eth_private *mp)
{
	struct net_device *dev = mp->dev;
	u32 port_status;
	int speed;
	int duplex;
	int fc;

2141
	port_status = rdlp(mp, PORT_STATUS);
2142 2143 2144 2145
	if (!(port_status & LINK_UP)) {
		if (netif_carrier_ok(dev)) {
			int i;

2146
			netdev_info(dev, "link down\n");
2147 2148 2149

			netif_carrier_off(dev);

2150
			for (i = 0; i < mp->txq_count; i++) {
2151 2152
				struct tx_queue *txq = mp->txq + i;

2153
				txq_reclaim(txq, txq->tx_ring_size, 1);
2154
				txq_reset_hw_ptr(txq);
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
			}
		}
		return;
	}

	switch (port_status & PORT_SPEED_MASK) {
	case PORT_SPEED_10:
		speed = 10;
		break;
	case PORT_SPEED_100:
		speed = 100;
		break;
	case PORT_SPEED_1000:
		speed = 1000;
		break;
	default:
		speed = -1;
		break;
	}
	duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
	fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;

2177 2178
	netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
		    speed, duplex ? "full" : "half", fc ? "en" : "dis");
2179

2180
	if (!netif_carrier_ok(dev))
2181 2182 2183
		netif_carrier_on(dev);
}

2184
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2185
{
2186 2187
	struct mv643xx_eth_private *mp;
	int work_done;
2188

2189
	mp = container_of(napi, struct mv643xx_eth_private, napi);
L
Lennert Buytenhek 已提交
2190

2191 2192 2193 2194
	if (unlikely(mp->oom)) {
		mp->oom = 0;
		del_timer(&mp->rx_oom);
	}
L
Linus Torvalds 已提交
2195

2196 2197 2198 2199 2200 2201 2202 2203 2204
	work_done = 0;
	while (work_done < budget) {
		u8 queue_mask;
		int queue;
		int work_tbd;

		if (mp->work_link) {
			mp->work_link = 0;
			handle_link_event(mp);
2205
			work_done++;
2206 2207
			continue;
		}
L
Linus Torvalds 已提交
2208

2209 2210 2211 2212
		queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
		if (likely(!mp->oom))
			queue_mask |= mp->work_rx_refill;

2213 2214 2215 2216 2217
		if (!queue_mask) {
			if (mv643xx_eth_collect_events(mp))
				continue;
			break;
		}
L
Linus Torvalds 已提交
2218

2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
		queue = fls(queue_mask) - 1;
		queue_mask = 1 << queue;

		work_tbd = budget - work_done;
		if (work_tbd > 16)
			work_tbd = 16;

		if (mp->work_tx_end & queue_mask) {
			txq_kick(mp->txq + queue);
		} else if (mp->work_tx & queue_mask) {
			work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
			txq_maybe_wake(mp->txq + queue);
		} else if (mp->work_rx & queue_mask) {
			work_done += rxq_process(mp->rxq + queue, work_tbd);
2233
		} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2234 2235 2236 2237
			work_done += rxq_refill(mp->rxq + queue, work_tbd);
		} else {
			BUG();
		}
2238
	}
L
Lennert Buytenhek 已提交
2239

2240
	if (work_done < budget) {
2241
		if (mp->oom)
2242 2243
			mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
		napi_complete(napi);
2244
		wrlp(mp, INT_MASK, mp->int_mask);
2245
	}
2246

2247 2248
	return work_done;
}
2249

2250 2251 2252
static inline void oom_timer_wrapper(unsigned long data)
{
	struct mv643xx_eth_private *mp = (void *)data;
L
Linus Torvalds 已提交
2253

2254
	napi_schedule(&mp->napi);
L
Linus Torvalds 已提交
2255 2256
}

L
Lennert Buytenhek 已提交
2257
static void port_start(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2258
{
2259
	u32 pscr;
2260
	int i;
L
Linus Torvalds 已提交
2261

2262 2263 2264
	/*
	 * Perform PHY reset, if there is a PHY.
	 */
2265
	if (mp->phy != NULL) {
2266 2267 2268
		struct ethtool_cmd cmd;

		mv643xx_eth_get_settings(mp->dev, &cmd);
2269
		phy_init_hw(mp->phy);
2270
		mv643xx_eth_set_settings(mp->dev, &cmd);
2271
		phy_start(mp->phy);
2272
	}
L
Linus Torvalds 已提交
2273

2274 2275 2276
	/*
	 * Configure basic link parameters.
	 */
2277
	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2278 2279

	pscr |= SERIAL_PORT_ENABLE;
2280
	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2281 2282

	pscr |= DO_NOT_FORCE_LINK_FAIL;
2283
	if (mp->phy == NULL)
2284
		pscr |= FORCE_LINK_PASS;
2285
	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2286

2287 2288 2289
	/*
	 * Configure TX path and queues.
	 */
2290
	tx_set_rate(mp, 1000000000, 16777216);
2291
	for (i = 0; i < mp->txq_count; i++) {
2292
		struct tx_queue *txq = mp->txq + i;
2293

2294
		txq_reset_hw_ptr(txq);
2295 2296
		txq_set_rate(txq, 1000000000, 16777216);
		txq_set_fixed_prio_mode(txq);
2297 2298
	}

2299 2300
	/*
	 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
2301 2302
	 * frames to RX queue #0, and include the pseudo-header when
	 * calculating receive checksums.
2303
	 */
2304
	mv643xx_eth_set_features(mp->dev, mp->dev->features);
2305

2306 2307 2308
	/*
	 * Treat BPDUs as normal multicasts, and disable partition mode.
	 */
2309
	wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2310

2311 2312 2313 2314 2315
	/*
	 * Add configured unicast addresses to address filter table.
	 */
	mv643xx_eth_program_unicast_filter(mp->dev);

2316
	/*
2317
	 * Enable the receive queues.
2318
	 */
2319
	for (i = 0; i < mp->rxq_count; i++) {
2320
		struct rx_queue *rxq = mp->rxq + i;
2321
		u32 addr;
L
Linus Torvalds 已提交
2322

2323 2324
		addr = (u32)rxq->rx_desc_dma;
		addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2325
		wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
L
Linus Torvalds 已提交
2326

2327 2328
		rxq_enable(rxq);
	}
L
Linus Torvalds 已提交
2329 2330
}

2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
{
	int skb_size;

	/*
	 * Reserve 2+14 bytes for an ethernet header (the hardware
	 * automatically prepends 2 bytes of dummy data to each
	 * received packet), 16 bytes for up to four VLAN tags, and
	 * 4 bytes for the trailing FCS -- 36 bytes total.
	 */
	skb_size = mp->dev->mtu + 36;

	/*
	 * Make sure that the skb size is a multiple of 8 bytes, as
	 * the lower three bits of the receive descriptor's buffer
	 * size field are ignored by the hardware.
	 */
	mp->skb_size = (skb_size + 7) & ~7;
2349 2350 2351 2352 2353 2354 2355 2356

	/*
	 * If NET_SKB_PAD is smaller than a cache line,
	 * netdev_alloc_skb() will cause skb->data to be misaligned
	 * to a cache line boundary.  If this is the case, include
	 * some extra space to allow re-aligning the data area.
	 */
	mp->skb_size += SKB_DMA_REALIGN;
2357 2358
}

2359
static int mv643xx_eth_open(struct net_device *dev)
2360
{
2361
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2362
	int err;
2363
	int i;
2364

2365 2366 2367
	wrlp(mp, INT_CAUSE, 0);
	wrlp(mp, INT_CAUSE_EXT, 0);
	rdlp(mp, INT_CAUSE_EXT);
2368

L
Lennert Buytenhek 已提交
2369
	err = request_irq(dev->irq, mv643xx_eth_irq,
2370
			  IRQF_SHARED, dev->name, dev);
2371
	if (err) {
2372
		netdev_err(dev, "can't assign irq\n");
2373
		return -EAGAIN;
2374 2375
	}

2376 2377
	mv643xx_eth_recalc_skb_size(mp);

2378 2379
	napi_enable(&mp->napi);

2380 2381
	mp->int_mask = INT_EXT;

2382
	for (i = 0; i < mp->rxq_count; i++) {
2383 2384 2385
		err = rxq_init(mp, i);
		if (err) {
			while (--i >= 0)
2386
				rxq_deinit(mp->rxq + i);
2387 2388 2389
			goto out;
		}

2390
		rxq_refill(mp->rxq + i, INT_MAX);
2391
		mp->int_mask |= INT_RX_0 << i;
2392 2393
	}

2394
	if (mp->oom) {
2395 2396
		mp->rx_oom.expires = jiffies + (HZ / 10);
		add_timer(&mp->rx_oom);
2397
	}
2398

2399
	for (i = 0; i < mp->txq_count; i++) {
2400 2401 2402
		err = txq_init(mp, i);
		if (err) {
			while (--i >= 0)
2403
				txq_deinit(mp->txq + i);
2404 2405
			goto out_free;
		}
2406
		mp->int_mask |= INT_TX_END_0 << i;
2407
	}
2408

2409
	add_timer(&mp->mib_counters_timer);
L
Lennert Buytenhek 已提交
2410
	port_start(mp);
2411

2412
	wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2413
	wrlp(mp, INT_MASK, mp->int_mask);
2414

2415 2416
	return 0;

2417

L
Lennert Buytenhek 已提交
2418
out_free:
2419 2420
	for (i = 0; i < mp->rxq_count; i++)
		rxq_deinit(mp->rxq + i);
L
Lennert Buytenhek 已提交
2421
out:
2422 2423 2424
	free_irq(dev->irq, dev);

	return err;
2425 2426
}

2427
static void port_reset(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2428
{
L
Lennert Buytenhek 已提交
2429
	unsigned int data;
2430
	int i;
L
Linus Torvalds 已提交
2431

2432 2433 2434 2435
	for (i = 0; i < mp->rxq_count; i++)
		rxq_disable(mp->rxq + i);
	for (i = 0; i < mp->txq_count; i++)
		txq_disable(mp->txq + i);
2436 2437

	while (1) {
2438
		u32 ps = rdlp(mp, PORT_STATUS);
2439 2440 2441

		if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
			break;
2442
		udelay(10);
2443
	}
L
Linus Torvalds 已提交
2444

2445
	/* Reset the Enable bit in the Configuration Register */
2446
	data = rdlp(mp, PORT_SERIAL_CONTROL);
L
Lennert Buytenhek 已提交
2447 2448 2449
	data &= ~(SERIAL_PORT_ENABLE		|
		  DO_NOT_FORCE_LINK_FAIL	|
		  FORCE_LINK_PASS);
2450
	wrlp(mp, PORT_SERIAL_CONTROL, data);
L
Linus Torvalds 已提交
2451 2452
}

2453
static int mv643xx_eth_stop(struct net_device *dev)
L
Linus Torvalds 已提交
2454
{
2455
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2456
	int i;
L
Linus Torvalds 已提交
2457

2458
	wrlp(mp, INT_MASK_EXT, 0x00000000);
2459 2460
	wrlp(mp, INT_MASK, 0x00000000);
	rdlp(mp, INT_MASK);
L
Linus Torvalds 已提交
2461

2462
	napi_disable(&mp->napi);
2463

2464 2465
	del_timer_sync(&mp->rx_oom);

2466
	netif_carrier_off(dev);
2467 2468
	if (mp->phy)
		phy_stop(mp->phy);
L
Lennert Buytenhek 已提交
2469 2470
	free_irq(dev->irq, dev);

2471
	port_reset(mp);
2472
	mv643xx_eth_get_stats(dev);
L
Lennert Buytenhek 已提交
2473
	mib_counters_update(mp);
2474
	del_timer_sync(&mp->mib_counters_timer);
L
Linus Torvalds 已提交
2475

2476 2477 2478 2479
	for (i = 0; i < mp->rxq_count; i++)
		rxq_deinit(mp->rxq + i);
	for (i = 0; i < mp->txq_count; i++)
		txq_deinit(mp->txq + i);
L
Linus Torvalds 已提交
2480

2481
	return 0;
L
Linus Torvalds 已提交
2482 2483
}

L
Lennert Buytenhek 已提交
2484
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
L
Linus Torvalds 已提交
2485
{
2486
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2487
	int ret;
L
Linus Torvalds 已提交
2488

2489 2490
	if (mp->phy == NULL)
		return -ENOTSUPP;
2491

2492 2493
	ret = phy_mii_ioctl(mp->phy, ifr, cmd);
	if (!ret)
2494
		mv643xx_eth_adjust_link(dev);
2495
	return ret;
L
Linus Torvalds 已提交
2496 2497
}

2498
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
L
Linus Torvalds 已提交
2499
{
2500 2501
	struct mv643xx_eth_private *mp = netdev_priv(dev);

L
Lennert Buytenhek 已提交
2502
	if (new_mtu < 64 || new_mtu > 9500)
2503
		return -EINVAL;
L
Linus Torvalds 已提交
2504

2505
	dev->mtu = new_mtu;
2506
	mv643xx_eth_recalc_skb_size(mp);
2507 2508
	tx_set_rate(mp, 1000000000, 16777216);

2509 2510
	if (!netif_running(dev))
		return 0;
L
Linus Torvalds 已提交
2511

2512 2513 2514 2515
	/*
	 * Stop and then re-open the interface. This will allocate RX
	 * skbs of the new MTU.
	 * There is a possible danger that the open will not succeed,
L
Lennert Buytenhek 已提交
2516
	 * due to memory being full.
2517 2518 2519
	 */
	mv643xx_eth_stop(dev);
	if (mv643xx_eth_open(dev)) {
2520 2521
		netdev_err(dev,
			   "fatal error on re-opening device after MTU change\n");
2522 2523 2524
	}

	return 0;
L
Linus Torvalds 已提交
2525 2526
}

L
Lennert Buytenhek 已提交
2527
static void tx_timeout_task(struct work_struct *ugly)
L
Linus Torvalds 已提交
2528
{
L
Lennert Buytenhek 已提交
2529
	struct mv643xx_eth_private *mp;
L
Linus Torvalds 已提交
2530

L
Lennert Buytenhek 已提交
2531 2532
	mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
	if (netif_running(mp->dev)) {
2533
		netif_tx_stop_all_queues(mp->dev);
L
Lennert Buytenhek 已提交
2534 2535
		port_reset(mp);
		port_start(mp);
2536
		netif_tx_wake_all_queues(mp->dev);
L
Lennert Buytenhek 已提交
2537
	}
2538 2539 2540
}

static void mv643xx_eth_tx_timeout(struct net_device *dev)
L
Linus Torvalds 已提交
2541
{
2542
	struct mv643xx_eth_private *mp = netdev_priv(dev);
L
Linus Torvalds 已提交
2543

2544
	netdev_info(dev, "tx timeout\n");
2545

2546
	schedule_work(&mp->tx_timeout_task);
L
Linus Torvalds 已提交
2547 2548
}

2549
#ifdef CONFIG_NET_POLL_CONTROLLER
L
Lennert Buytenhek 已提交
2550
static void mv643xx_eth_netpoll(struct net_device *dev)
2551
{
L
Lennert Buytenhek 已提交
2552
	struct mv643xx_eth_private *mp = netdev_priv(dev);
2553

2554 2555
	wrlp(mp, INT_MASK, 0x00000000);
	rdlp(mp, INT_MASK);
2556

L
Lennert Buytenhek 已提交
2557
	mv643xx_eth_irq(dev->irq, dev);
2558

2559
	wrlp(mp, INT_MASK, mp->int_mask);
2560
}
2561
#endif
2562 2563


2564
/* platform glue ************************************************************/
2565 2566
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
2567
			      const struct mbus_dram_target_info *dram)
2568
{
2569
	void __iomem *base = msp->base;
2570 2571 2572
	u32 win_enable;
	u32 win_protect;
	int i;
2573

2574 2575 2576 2577 2578
	for (i = 0; i < 6; i++) {
		writel(0, base + WINDOW_BASE(i));
		writel(0, base + WINDOW_SIZE(i));
		if (i < 4)
			writel(0, base + WINDOW_REMAP_HIGH(i));
2579 2580
	}

2581 2582 2583 2584
	win_enable = 0x3f;
	win_protect = 0;

	for (i = 0; i < dram->num_cs; i++) {
2585
		const struct mbus_dram_window *cs = dram->cs + i;
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597

		writel((cs->base & 0xffff0000) |
			(cs->mbus_attr << 8) |
			dram->mbus_dram_target_id, base + WINDOW_BASE(i));
		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));

		win_enable &= ~(1 << i);
		win_protect |= 3 << (2 * i);
	}

	writel(win_enable, base + WINDOW_BAR_ENABLE);
	msp->win_protect = win_protect;
2598 2599
}

2600 2601 2602 2603 2604 2605 2606
static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
{
	/*
	 * Check whether we have a 14-bit coal limit field in bits
	 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
	 * SDMA config register.
	 */
2607 2608
	writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
	if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2609 2610 2611
		msp->extended_rx_coal_limit = 1;
	else
		msp->extended_rx_coal_limit = 0;
2612 2613

	/*
2614 2615 2616
	 * Check whether the MAC supports TX rate control, and if
	 * yes, whether its associated registers are in the old or
	 * the new place.
2617
	 */
2618 2619
	writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
	if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2620 2621
		msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
	} else {
2622 2623
		writel(7, msp->base + 0x0400 + TX_BW_RATE);
		if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2624 2625 2626 2627
			msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
		else
			msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
	}
2628 2629
}

2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
#if defined(CONFIG_OF)
static const struct of_device_id mv643xx_eth_shared_ids[] = {
	{ .compatible = "marvell,orion-eth", },
	{ .compatible = "marvell,kirkwood-eth", },
	{ }
};
MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
#endif

#if defined(CONFIG_OF) && !defined(CONFIG_MV64X60)
#define mv643xx_eth_property(_np, _name, _v)				\
	do {								\
		u32 tmp;						\
		if (!of_property_read_u32(_np, "marvell," _name, &tmp))	\
			_v = tmp;					\
	} while (0)

static struct platform_device *port_platdev[3];

static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
					  struct device_node *pnp)
{
	struct platform_device *ppdev;
	struct mv643xx_eth_platform_data ppd;
	struct resource res;
	const char *mac_addr;
	int ret;
2657
	int dev_num = 0;
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677

	memset(&ppd, 0, sizeof(ppd));
	ppd.shared = pdev;

	memset(&res, 0, sizeof(res));
	if (!of_irq_to_resource(pnp, 0, &res)) {
		dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
		return -EINVAL;
	}

	if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
		dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name);
		return -EINVAL;
	}

	if (ppd.port_number >= 3) {
		dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name);
		return -EINVAL;
	}

2678 2679 2680 2681 2682 2683 2684 2685
	while (dev_num < 3 && port_platdev[dev_num])
		dev_num++;

	if (dev_num == 3) {
		dev_err(&pdev->dev, "too many ports registered\n");
		return -EINVAL;
	}

2686 2687
	mac_addr = of_get_mac_address(pnp);
	if (mac_addr)
2688
		memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703

	mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
	mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
	mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
	mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
	mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
	mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);

	ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
	if (!ppd.phy_node) {
		ppd.phy_addr = MV643XX_ETH_PHY_NONE;
		of_property_read_u32(pnp, "speed", &ppd.speed);
		of_property_read_u32(pnp, "duplex", &ppd.duplex);
	}

2704
	ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
2705 2706 2707
	if (!ppdev)
		return -ENOMEM;
	ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
2708
	ppdev->dev.of_node = pnp;
2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721

	ret = platform_device_add_resources(ppdev, &res, 1);
	if (ret)
		goto port_err;

	ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
	if (ret)
		goto port_err;

	ret = platform_device_add(ppdev);
	if (ret)
		goto port_err;

2722
	port_platdev[dev_num] = ppdev;
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765

	return 0;

port_err:
	platform_device_put(ppdev);
	return ret;
}

static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
	struct mv643xx_eth_shared_platform_data *pd;
	struct device_node *pnp, *np = pdev->dev.of_node;
	int ret;

	/* bail out if not registered from DT */
	if (!np)
		return 0;

	pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
	if (!pd)
		return -ENOMEM;
	pdev->dev.platform_data = pd;

	mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);

	for_each_available_child_of_node(np, pnp) {
		ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
		if (ret)
			return ret;
	}
	return 0;
}

static void mv643xx_eth_shared_of_remove(void)
{
	int n;

	for (n = 0; n < 3; n++) {
		platform_device_del(port_platdev[n]);
		port_platdev[n] = NULL;
	}
}
#else
2766
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
2767
{
2768
	return 0;
2769 2770
}

2771 2772 2773
static inline void mv643xx_eth_shared_of_remove(void)
{
}
2774 2775
#endif

2776
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2777
{
L
Lennert Buytenhek 已提交
2778
	static int mv643xx_eth_version_printed;
2779
	struct mv643xx_eth_shared_platform_data *pd;
2780
	struct mv643xx_eth_shared_private *msp;
2781
	const struct mbus_dram_target_info *dram;
2782
	struct resource *res;
2783
	int ret;
2784

2785
	if (!mv643xx_eth_version_printed++)
2786 2787
		pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
			  mv643xx_eth_driver_version);
2788

2789 2790
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
2791
		return -EINVAL;
2792

2793
	msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
2794
	if (msp == NULL)
2795
		return -ENOMEM;
2796
	platform_set_drvdata(pdev, msp);
2797

2798
	msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
2799
	if (msp->base == NULL)
2800
		return -ENOMEM;
2801

2802 2803 2804 2805
	msp->clk = devm_clk_get(&pdev->dev, NULL);
	if (!IS_ERR(msp->clk))
		clk_prepare_enable(msp->clk);

2806 2807 2808
	/*
	 * (Re-)program MBUS remapping windows if we are asked to.
	 */
2809 2810 2811
	dram = mv_mbus_dram_info();
	if (dram)
		mv643xx_eth_conf_mbus_windows(msp, dram);
2812

2813 2814 2815
	ret = mv643xx_eth_shared_of_probe(pdev);
	if (ret)
		return ret;
2816
	pd = dev_get_platdata(&pdev->dev);
2817

2818 2819
	msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
					pd->tx_csum_limit : 9 * 1024;
2820
	infer_hw_params(msp);
L
Lennert Buytenhek 已提交
2821

2822 2823 2824 2825 2826
	return 0;
}

static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
2827
	struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2828

2829
	mv643xx_eth_shared_of_remove();
2830 2831
	if (!IS_ERR(msp->clk))
		clk_disable_unprepare(msp->clk);
2832
	return 0;
2833 2834
}

2835
static struct platform_driver mv643xx_eth_shared_driver = {
L
Lennert Buytenhek 已提交
2836 2837
	.probe		= mv643xx_eth_shared_probe,
	.remove		= mv643xx_eth_shared_remove,
2838
	.driver = {
L
Lennert Buytenhek 已提交
2839
		.name	= MV643XX_ETH_SHARED_NAME,
2840
		.owner	= THIS_MODULE,
2841
		.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
2842 2843 2844
	},
};

2845
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
L
Linus Torvalds 已提交
2846
{
2847
	int addr_shift = 5 * mp->port_num;
L
Lennert Buytenhek 已提交
2848
	u32 data;
L
Linus Torvalds 已提交
2849

L
Lennert Buytenhek 已提交
2850 2851 2852 2853
	data = rdl(mp, PHY_ADDR);
	data &= ~(0x1f << addr_shift);
	data |= (phy_addr & 0x1f) << addr_shift;
	wrl(mp, PHY_ADDR, data);
L
Linus Torvalds 已提交
2854 2855
}

2856
static int phy_addr_get(struct mv643xx_eth_private *mp)
L
Linus Torvalds 已提交
2857
{
L
Lennert Buytenhek 已提交
2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
	unsigned int data;

	data = rdl(mp, PHY_ADDR);

	return (data >> (5 * mp->port_num)) & 0x1f;
}

static void set_params(struct mv643xx_eth_private *mp,
		       struct mv643xx_eth_platform_data *pd)
{
	struct net_device *dev = mp->dev;
2869
	unsigned int tx_ring_size;
L
Lennert Buytenhek 已提交
2870 2871

	if (is_valid_ether_addr(pd->mac_addr))
2872
		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
L
Lennert Buytenhek 已提交
2873 2874 2875
	else
		uc_addr_get(mp, dev->dev_addr);

2876
	mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
L
Lennert Buytenhek 已提交
2877
	if (pd->rx_queue_size)
2878
		mp->rx_ring_size = pd->rx_queue_size;
L
Lennert Buytenhek 已提交
2879 2880
	mp->rx_desc_sram_addr = pd->rx_sram_addr;
	mp->rx_desc_sram_size = pd->rx_sram_size;
L
Linus Torvalds 已提交
2881

2882
	mp->rxq_count = pd->rx_queue_count ? : 1;
2883

2884
	tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
L
Lennert Buytenhek 已提交
2885
	if (pd->tx_queue_size)
2886 2887 2888 2889 2890 2891 2892 2893
		tx_ring_size = pd->tx_queue_size;

	mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
				   MV643XX_MAX_SKB_DESCS * 2, 4096);
	if (mp->tx_ring_size != tx_ring_size)
		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
			    mp->tx_ring_size, tx_ring_size);

L
Lennert Buytenhek 已提交
2894 2895
	mp->tx_desc_sram_addr = pd->tx_sram_addr;
	mp->tx_desc_sram_size = pd->tx_sram_size;
2896

2897
	mp->txq_count = pd->tx_queue_count ? : 1;
L
Linus Torvalds 已提交
2898 2899
}

2900 2901
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
				   int phy_addr)
L
Linus Torvalds 已提交
2902
{
2903 2904 2905 2906
	struct phy_device *phydev;
	int start;
	int num;
	int i;
2907
	char phy_id[MII_BUS_ID_SIZE + 3];
2908

2909 2910 2911 2912 2913 2914 2915
	if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
		start = phy_addr_get(mp) & 0x1f;
		num = 32;
	} else {
		start = phy_addr & 0x1f;
		num = 1;
	}
2916

2917
	/* Attempt to connect to the PHY using orion-mdio */
2918
	phydev = ERR_PTR(-ENODEV);
2919 2920
	for (i = 0; i < num; i++) {
		int addr = (start + i) & 0x1f;
L
Lennert Buytenhek 已提交
2921

2922 2923
		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
				"orion-mdio-mii", addr);
L
Linus Torvalds 已提交
2924

2925 2926 2927 2928 2929
		phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
				PHY_INTERFACE_MODE_GMII);
		if (!IS_ERR(phydev)) {
			phy_addr_set(mp, addr);
			break;
2930 2931
		}
	}
L
Linus Torvalds 已提交
2932

2933
	return phydev;
L
Linus Torvalds 已提交
2934 2935
}

2936
static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2937
{
2938
	struct phy_device *phy = mp->phy;
2939

2940 2941 2942 2943 2944
	if (speed == 0) {
		phy->autoneg = AUTONEG_ENABLE;
		phy->speed = 0;
		phy->duplex = 0;
		phy->advertising = phy->supported | ADVERTISED_Autoneg;
2945
	} else {
2946 2947 2948 2949
		phy->autoneg = AUTONEG_DISABLE;
		phy->advertising = 0;
		phy->speed = speed;
		phy->duplex = duplex;
2950
	}
2951
	phy_start_aneg(phy);
2952 2953
}

2954 2955 2956 2957
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
	u32 pscr;

2958
	pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2959 2960
	if (pscr & SERIAL_PORT_ENABLE) {
		pscr &= ~SERIAL_PORT_ENABLE;
2961
		wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2962 2963 2964
	}

	pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2965
	if (mp->phy == NULL) {
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
		pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
		if (speed == SPEED_1000)
			pscr |= SET_GMII_SPEED_TO_1000;
		else if (speed == SPEED_100)
			pscr |= SET_MII_SPEED_TO_100;

		pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;

		pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
		if (duplex == DUPLEX_FULL)
			pscr |= SET_FULL_DUPLEX_MODE;
	}

2979
	wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2980 2981
}

2982 2983 2984 2985 2986 2987
static const struct net_device_ops mv643xx_eth_netdev_ops = {
	.ndo_open		= mv643xx_eth_open,
	.ndo_stop		= mv643xx_eth_stop,
	.ndo_start_xmit		= mv643xx_eth_xmit,
	.ndo_set_rx_mode	= mv643xx_eth_set_rx_mode,
	.ndo_set_mac_address	= mv643xx_eth_set_mac_address,
2988
	.ndo_validate_addr	= eth_validate_addr,
2989 2990
	.ndo_do_ioctl		= mv643xx_eth_ioctl,
	.ndo_change_mtu		= mv643xx_eth_change_mtu,
2991
	.ndo_set_features	= mv643xx_eth_set_features,
2992 2993 2994 2995 2996 2997 2998
	.ndo_tx_timeout		= mv643xx_eth_tx_timeout,
	.ndo_get_stats		= mv643xx_eth_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= mv643xx_eth_netpoll,
#endif
};

2999
static int mv643xx_eth_probe(struct platform_device *pdev)
L
Linus Torvalds 已提交
3000
{
3001
	struct mv643xx_eth_platform_data *pd;
3002
	struct mv643xx_eth_private *mp;
3003 3004
	struct net_device *dev;
	struct resource *res;
L
Lennert Buytenhek 已提交
3005
	int err;
L
Linus Torvalds 已提交
3006

3007
	pd = dev_get_platdata(&pdev->dev);
3008
	if (pd == NULL) {
3009
		dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
3010 3011
		return -ENODEV;
	}
L
Linus Torvalds 已提交
3012

3013
	if (pd->shared == NULL) {
3014
		dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
3015 3016
		return -ENODEV;
	}
3017

3018
	dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
3019 3020
	if (!dev)
		return -ENOMEM;
L
Linus Torvalds 已提交
3021

3022
	mp = netdev_priv(dev);
L
Lennert Buytenhek 已提交
3023 3024 3025
	platform_set_drvdata(pdev, mp);

	mp->shared = platform_get_drvdata(pd->shared);
3026
	mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
L
Lennert Buytenhek 已提交
3027 3028
	mp->port_num = pd->port_number;

3029
	mp->dev = dev;
3030

3031 3032 3033 3034 3035 3036 3037 3038 3039
	/* Kirkwood resets some registers on gated clocks. Especially
	 * CLK125_BYPASS_EN must be cleared but is not available on
	 * all other SoCs/System Controllers using this driver.
	 */
	if (of_device_is_compatible(pdev->dev.of_node,
				    "marvell,kirkwood-eth-port"))
		wrlp(mp, PORT_SERIAL_CONTROL1,
		     rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);

3040
	/*
3041 3042
	 * Start with a default rate, and if there is a clock, allow
	 * it to override the default.
3043
	 */
3044
	mp->t_clk = 133000000;
3045
	mp->clk = devm_clk_get(&pdev->dev, NULL);
3046 3047 3048
	if (!IS_ERR(mp->clk)) {
		clk_prepare_enable(mp->clk);
		mp->t_clk = clk_get_rate(mp->clk);
3049 3050
	} else if (!IS_ERR(mp->shared->clk)) {
		mp->t_clk = clk_get_rate(mp->shared->clk);
3051
	}
3052

L
Lennert Buytenhek 已提交
3053
	set_params(mp, pd);
3054 3055
	netif_set_real_num_tx_queues(dev, mp->txq_count);
	netif_set_real_num_rx_queues(dev, mp->rxq_count);
L
Lennert Buytenhek 已提交
3056

3057 3058 3059 3060 3061 3062 3063
	err = 0;
	if (pd->phy_node) {
		mp->phy = of_phy_connect(mp->dev, pd->phy_node,
					 mv643xx_eth_adjust_link, 0,
					 PHY_INTERFACE_MODE_GMII);
		if (!mp->phy)
			err = -ENODEV;
3064 3065
		else
			phy_addr_set(mp, mp->phy->addr);
3066
	} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
3067
		mp->phy = phy_scan(mp, pd->phy_addr);
3068

3069
		if (IS_ERR(mp->phy))
3070
			err = PTR_ERR(mp->phy);
3071 3072
		else
			phy_init(mp, pd->speed, pd->duplex);
3073
	}
3074 3075 3076 3077 3078 3079
	if (err == -ENODEV) {
		err = -EPROBE_DEFER;
		goto out;
	}
	if (err)
		goto out;
3080

3081
	dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
3082

3083
	init_pscr(mp, pd->speed, pd->duplex);
L
Lennert Buytenhek 已提交
3084

3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096

	mib_counters_clear(mp);

	init_timer(&mp->mib_counters_timer);
	mp->mib_counters_timer.data = (unsigned long)mp;
	mp->mib_counters_timer.function = mib_counters_timer_wrapper;
	mp->mib_counters_timer.expires = jiffies + 30 * HZ;

	spin_lock_init(&mp->mib_counters_lock);

	INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);

3097
	netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
3098 3099 3100 3101 3102

	init_timer(&mp->rx_oom);
	mp->rx_oom.data = (unsigned long)mp;
	mp->rx_oom.function = oom_timer_wrapper;

L
Lennert Buytenhek 已提交
3103

3104 3105 3106
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	BUG_ON(!res);
	dev->irq = res->start;
L
Linus Torvalds 已提交
3107

3108 3109
	dev->netdev_ops = &mv643xx_eth_netdev_ops;

3110 3111
	dev->watchdog_timeo = 2 * HZ;
	dev->base_addr = 0;
L
Linus Torvalds 已提交
3112

3113
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3114 3115 3116 3117
	dev->vlan_features = dev->features;

	dev->features |= NETIF_F_RXCSUM;
	dev->hw_features = dev->features;
L
Linus Torvalds 已提交
3118

3119
	dev->priv_flags |= IFF_UNICAST_FLT;
3120
	dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
3121

L
Lennert Buytenhek 已提交
3122
	SET_NETDEV_DEV(dev, &pdev->dev);
3123

3124
	if (mp->shared->win_protect)
L
Lennert Buytenhek 已提交
3125
		wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
L
Linus Torvalds 已提交
3126

3127 3128
	netif_carrier_off(dev);

3129 3130
	wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);

3131
	set_rx_coal(mp, 250);
3132 3133
	set_tx_coal(mp, 0);

3134 3135 3136
	err = register_netdev(dev);
	if (err)
		goto out;
L
Linus Torvalds 已提交
3137

3138 3139
	netdev_notice(dev, "port %d with MAC address %pM\n",
		      mp->port_num, dev->dev_addr);
L
Linus Torvalds 已提交
3140

3141
	if (mp->tx_desc_sram_size > 0)
3142
		netdev_notice(dev, "configured with sram\n");
L
Linus Torvalds 已提交
3143

3144
	return 0;
L
Linus Torvalds 已提交
3145

3146
out:
3147
	if (!IS_ERR(mp->clk))
3148
		clk_disable_unprepare(mp->clk);
3149
	free_netdev(dev);
L
Linus Torvalds 已提交
3150

3151
	return err;
L
Linus Torvalds 已提交
3152 3153
}

3154
static int mv643xx_eth_remove(struct platform_device *pdev)
L
Linus Torvalds 已提交
3155
{
L
Lennert Buytenhek 已提交
3156
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
L
Linus Torvalds 已提交
3157

L
Lennert Buytenhek 已提交
3158
	unregister_netdev(mp->dev);
3159
	if (mp->phy != NULL)
3160
		phy_disconnect(mp->phy);
3161
	cancel_work_sync(&mp->tx_timeout_task);
3162

3163
	if (!IS_ERR(mp->clk))
3164
		clk_disable_unprepare(mp->clk);
3165

L
Lennert Buytenhek 已提交
3166
	free_netdev(mp->dev);
3167 3168

	return 0;
L
Linus Torvalds 已提交
3169 3170
}

3171
static void mv643xx_eth_shutdown(struct platform_device *pdev)
3172
{
L
Lennert Buytenhek 已提交
3173
	struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3174

3175
	/* Mask all interrupts on ethernet port */
3176 3177
	wrlp(mp, INT_MASK, 0);
	rdlp(mp, INT_MASK);
3178

L
Lennert Buytenhek 已提交
3179 3180
	if (netif_running(mp->dev))
		port_reset(mp);
3181 3182
}

3183
static struct platform_driver mv643xx_eth_driver = {
L
Lennert Buytenhek 已提交
3184 3185 3186
	.probe		= mv643xx_eth_probe,
	.remove		= mv643xx_eth_remove,
	.shutdown	= mv643xx_eth_shutdown,
3187
	.driver = {
L
Lennert Buytenhek 已提交
3188
		.name	= MV643XX_ETH_NAME,
3189 3190 3191 3192
		.owner	= THIS_MODULE,
	},
};

3193
static int __init mv643xx_eth_init_module(void)
3194
{
3195
	int rc;
3196

3197 3198 3199 3200 3201 3202
	rc = platform_driver_register(&mv643xx_eth_shared_driver);
	if (!rc) {
		rc = platform_driver_register(&mv643xx_eth_driver);
		if (rc)
			platform_driver_unregister(&mv643xx_eth_shared_driver);
	}
L
Lennert Buytenhek 已提交
3203

3204
	return rc;
3205
}
L
Lennert Buytenhek 已提交
3206
module_init(mv643xx_eth_init_module);
3207

3208
static void __exit mv643xx_eth_cleanup_module(void)
3209
{
3210 3211
	platform_driver_unregister(&mv643xx_eth_driver);
	platform_driver_unregister(&mv643xx_eth_shared_driver);
3212
}
3213
module_exit(mv643xx_eth_cleanup_module);
L
Linus Torvalds 已提交
3214

3215 3216
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
	      "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
3217
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
L
Lennert Buytenhek 已提交
3218
MODULE_LICENSE("GPL");
3219
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
L
Lennert Buytenhek 已提交
3220
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);