fec_main.c 93.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
 *
5
 * Right now, I am very wasteful with the buffers.  I allocate memory
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14
 * pages and then divide them into 2K frame buffers.  This way I know I
 * have buffers large enough to hold one frame within one buffer descriptor.
 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
 * will be much more memory efficient and will easily handle lots of
 * small packets.
 *
 * Much better multiple PHY support by Magnus Damm.
 * Copyright (c) 2000 Ericsson Radio Systems AB.
 *
15 16
 * Support for FEC controller of ColdFire processors.
 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17 18
 *
 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19
 * Copyright (c) 2004-2006 Macq Electronique SA.
20
 *
S
Shawn Guo 已提交
21
 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
L
Linus Torvalds 已提交
22 23 24 25 26
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
27
#include <linux/pm_runtime.h>
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
37 38 39
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
N
Nimrod Andy 已提交
40
#include <net/tso.h>
41 42 43
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
L
Linus Torvalds 已提交
44 45 46
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
47 48
#include <linux/io.h>
#include <linux/irq.h>
49
#include <linux/clk.h>
50
#include <linux/platform_device.h>
51
#include <linux/mdio.h>
52
#include <linux/phy.h>
53
#include <linux/fec.h>
54 55 56
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
57
#include <linux/of_mdio.h>
58
#include <linux/of_net.h>
59
#include <linux/regulator/consumer.h>
60
#include <linux/if_vlan.h>
F
Fabio Estevam 已提交
61
#include <linux/pinctrl/consumer.h>
62
#include <linux/prefetch.h>
63
#include <soc/imx/cpuidle.h>
L
Linus Torvalds 已提交
64

65
#include <asm/cacheflush.h>
66

L
Linus Torvalds 已提交
67 68
#include "fec.h"

69
static void set_multicast_list(struct net_device *ndev);
70
static void fec_enet_itr_coal_init(struct net_device *ndev);
71

72 73
#define DRIVER_NAME	"fec"

74 75
#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))

76 77 78 79 80 81 82
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE	(1 << 5)
#define FEC_ENET_RSEM_V	0x84
#define FEC_ENET_RSFL_V	16
#define FEC_ENET_RAEM_V	0x8
#define FEC_ENET_RAFL_V	0x8
#define FEC_ENET_OPD_V	0xFFF0
83
#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
84

85 86
static struct platform_device_id fec_devtype[] = {
	{
87
		/* keep it for coldfire */
88 89
		.name = DRIVER_NAME,
		.driver_data = 0,
90 91
	}, {
		.name = "imx25-fec",
92
		.driver_data = FEC_QUIRK_USE_GASKET,
93 94
	}, {
		.name = "imx27-fec",
95
		.driver_data = 0,
96 97
	}, {
		.name = "imx28-fec",
98
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
99
				FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
S
Shawn Guo 已提交
100 101
	}, {
		.name = "imx6q-fec",
102
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
103
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
104 105
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
				FEC_QUIRK_HAS_RACC,
106
	}, {
107
		.name = "mvf600-fec",
108
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
109 110 111 112
	}, {
		.name = "imx6sx-fec",
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
113
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
114
				FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
115
				FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
116 117 118 119 120 121
	}, {
		.name = "imx6ul-fec",
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
				FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
122 123 124
	}, {
		/* sentinel */
	}
125
};
126
MODULE_DEVICE_TABLE(platform, fec_devtype);
127

128
enum imx_fec_type {
L
Lothar Waßmann 已提交
129
	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
130 131
	IMX27_FEC,	/* runs on i.mx27/35/51 */
	IMX28_FEC,
S
Shawn Guo 已提交
132
	IMX6Q_FEC,
133
	MVF600_FEC,
134
	IMX6SX_FEC,
135
	IMX6UL_FEC,
136 137 138 139 140 141
};

static const struct of_device_id fec_dt_ids[] = {
	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
S
Shawn Guo 已提交
142
	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
143
	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
144
	{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
145
	{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
146 147 148 149
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);

150 151 152
static unsigned char macaddr[ETH_ALEN];
module_param_array(macaddr, byte, NULL, 0);
MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
L
Linus Torvalds 已提交
153

154
#if defined(CONFIG_M5272)
L
Linus Torvalds 已提交
155 156 157 158 159 160 161 162 163 164
/*
 * Some hardware gets it MAC address out of local flash memory.
 * if this is non-zero then assume it is the address to get MAC from.
 */
#if defined(CONFIG_NETtel)
#define	FEC_FLASHMAC	0xf0006006
#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
#define	FEC_FLASHMAC	0xf0006000
#elif defined(CONFIG_CANCam)
#define	FEC_FLASHMAC	0xf0020000
165 166 167
#elif defined (CONFIG_M5272C3)
#define	FEC_FLASHMAC	(0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
L
Lothar Waßmann 已提交
168
#define FEC_FLASHMAC	0xffc0406b
L
Linus Torvalds 已提交
169 170 171
#else
#define	FEC_FLASHMAC	0
#endif
172
#endif /* CONFIG_M5272 */
173

174
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
L
Linus Torvalds 已提交
175
 */
176
#define PKT_MAXBUF_SIZE		1522
L
Linus Torvalds 已提交
177
#define PKT_MINBUF_SIZE		64
178
#define PKT_MAXBLR_SIZE		1536
L
Linus Torvalds 已提交
179

180 181 182
/* FEC receive acceleration */
#define FEC_RACC_IPDIS		(1 << 1)
#define FEC_RACC_PRODIS		(1 << 2)
183
#define FEC_RACC_SHIFT16	BIT(7)
184 185
#define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)

L
Linus Torvalds 已提交
186
/*
187
 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
L
Linus Torvalds 已提交
188 189 190
 * size bits. Other FEC hardware does not, so we need to take that into
 * account when setting it.
 */
191
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
192
    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
L
Linus Torvalds 已提交
193 194 195 196 197
#define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
#else
#define	OPT_FRAME_SIZE	0
#endif

198 199 200 201 202 203 204 205
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST		(1 << 30)
#define FEC_MMFR_OP_READ	(2 << 28)
#define FEC_MMFR_OP_WRITE	(1 << 28)
#define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
#define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
#define FEC_MMFR_TA		(2 << 16)
#define FEC_MMFR_DATA(v)	(v & 0xffff)
N
Nimrod Andy 已提交
206 207 208
/* FEC ECR bits definition */
#define FEC_ECR_MAGICEN		(1 << 2)
#define FEC_ECR_SLEEP		(1 << 3)
L
Linus Torvalds 已提交
209

210
#define FEC_MII_TIMEOUT		30000 /* us */
L
Linus Torvalds 已提交
211

S
Sascha Hauer 已提交
212 213
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
L
Linus Torvalds 已提交
214

215 216
#define FEC_PAUSE_FLAG_AUTONEG	0x1
#define FEC_PAUSE_FLAG_ENABLE	0x2
N
Nimrod Andy 已提交
217 218 219
#define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
#define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
#define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
220

221 222
#define COPYBREAK_DEFAULT	256

N
Nimrod Andy 已提交
223 224 225 226 227 228 229
#define TSO_HEADER_SIZE		128
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS	100
#define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)

#define IS_TSO_HEADER(txq, addr) \
	((addr >= txq->tso_hdrs_dma) && \
T
Troy Kisky 已提交
230
	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
N
Nimrod Andy 已提交
231

L
Lothar Waßmann 已提交
232 233
static int mii_cnt;

T
Troy Kisky 已提交
234 235 236 237 238 239
static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
					     struct bufdesc_prop *bd)
{
	return (bdp >= bd->last) ? bd->base
			: (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
}
240

T
Troy Kisky 已提交
241 242 243 244 245
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
					     struct bufdesc_prop *bd)
{
	return (bdp <= bd->base) ? bd->last
			: (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
246 247
}

T
Troy Kisky 已提交
248 249
static int fec_enet_get_bd_index(struct bufdesc *bdp,
				 struct bufdesc_prop *bd)
250
{
T
Troy Kisky 已提交
251
	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
252 253
}

T
Troy Kisky 已提交
254
static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
255 256 257
{
	int entries;

T
Troy Kisky 已提交
258 259
	entries = (((const char *)txq->dirty_tx -
			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
260

T
Troy Kisky 已提交
261
	return entries >= 0 ? entries : entries + txq->bd.ring_size;
262 263
}

264
static void swap_buffer(void *bufaddr, int len)
265 266 267 268
{
	int i;
	unsigned int *buf = bufaddr;

269
	for (i = 0; i < len; i += 4, buf++)
270
		swab32s(buf);
271 272
}

273 274 275 276 277 278 279 280 281 282
static void swap_buffer2(void *dst_buf, void *src_buf, int len)
{
	int i;
	unsigned int *src = src_buf;
	unsigned int *dst = dst_buf;

	for (i = 0; i < len; i += 4, src++, dst++)
		*dst = swab32p(src);
}

283 284 285
static void fec_dump(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
286 287 288
	struct bufdesc *bdp;
	struct fec_enet_priv_tx_q *txq;
	int index = 0;
289 290 291 292

	netdev_info(ndev, "TX ring dump\n");
	pr_info("Nr     SC     addr       len  SKB\n");

293
	txq = fep->tx_queue[0];
T
Troy Kisky 已提交
294
	bdp = txq->bd.base;
295

296
	do {
297
		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
298
			index,
T
Troy Kisky 已提交
299
			bdp == txq->bd.cur ? 'S' : ' ',
300
			bdp == txq->dirty_tx ? 'H' : ' ',
301 302 303
			fec16_to_cpu(bdp->cbd_sc),
			fec32_to_cpu(bdp->cbd_bufaddr),
			fec16_to_cpu(bdp->cbd_datlen),
304
			txq->tx_skbuff[index]);
T
Troy Kisky 已提交
305
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
306
		index++;
T
Troy Kisky 已提交
307
	} while (bdp != txq->bd.base);
308 309
}

310 311 312 313 314
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}

315 316 317 318 319 320 321 322 323 324
static int
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
{
	/* Only run for packets requiring a checksum. */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	if (unlikely(skb_cow_head(skb, 0)))
		return -1;

325 326
	if (is_ipv4_pkt(skb))
		ip_hdr(skb)->check = 0;
327 328 329 330 331
	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;

	return 0;
}

332
static struct bufdesc *
333 334 335
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
			     struct sk_buff *skb,
			     struct net_device *ndev)
L
Linus Torvalds 已提交
336
{
337
	struct fec_enet_private *fep = netdev_priv(ndev);
T
Troy Kisky 已提交
338
	struct bufdesc *bdp = txq->bd.cur;
339 340 341 342 343 344
	struct bufdesc_ex *ebdp;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int frag, frag_len;
	unsigned short status;
	unsigned int estatus = 0;
	skb_frag_t *this_frag;
345
	unsigned int index;
346
	void *bufaddr;
347
	dma_addr_t addr;
348
	int i;
L
Linus Torvalds 已提交
349

350 351
	for (frag = 0; frag < nr_frags; frag++) {
		this_frag = &skb_shinfo(skb)->frags[frag];
T
Troy Kisky 已提交
352
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
353 354
		ebdp = (struct bufdesc_ex *)bdp;

355
		status = fec16_to_cpu(bdp->cbd_sc);
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
		status &= ~BD_ENET_TX_STATS;
		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
		frag_len = skb_shinfo(skb)->frags[frag].size;

		/* Handle the last BD specially */
		if (frag == nr_frags - 1) {
			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
			if (fep->bufdesc_ex) {
				estatus |= BD_ENET_TX_INT;
				if (unlikely(skb_shinfo(skb)->tx_flags &
					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
					estatus |= BD_ENET_TX_TS;
			}
		}

		if (fep->bufdesc_ex) {
372
			if (fep->quirks & FEC_QUIRK_HAS_AVB)
373
				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
374 375 376
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
			ebdp->cbd_bdu = 0;
377
			ebdp->cbd_esc = cpu_to_fec32(estatus);
378 379 380 381
		}

		bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;

T
Troy Kisky 已提交
382
		index = fec_enet_get_bd_index(bdp, &txq->bd);
383
		if (((unsigned long) bufaddr) & fep->tx_align ||
384
			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
385 386
			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
			bufaddr = txq->tx_bounce[index];
387

388
			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
389 390 391
				swap_buffer(bufaddr, frag_len);
		}

392 393 394
		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
				      DMA_TO_DEVICE);
		if (dma_mapping_error(&fep->pdev->dev, addr)) {
395 396 397 398 399
			if (net_ratelimit())
				netdev_err(ndev, "Tx DMA memory map failed\n");
			goto dma_mapping_error;
		}

400 401
		bdp->cbd_bufaddr = cpu_to_fec32(addr);
		bdp->cbd_datlen = cpu_to_fec16(frag_len);
402 403 404 405
		/* Make sure the updates to rest of the descriptor are
		 * performed before transferring ownership.
		 */
		wmb();
406
		bdp->cbd_sc = cpu_to_fec16(status);
407 408
	}

409
	return bdp;
410
dma_mapping_error:
T
Troy Kisky 已提交
411
	bdp = txq->bd.cur;
412
	for (i = 0; i < frag; i++) {
T
Troy Kisky 已提交
413
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
414 415
		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
416
	}
417
	return ERR_PTR(-ENOMEM);
418
}
L
Linus Torvalds 已提交
419

420 421
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
				   struct sk_buff *skb, struct net_device *ndev)
422 423 424 425 426
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	struct bufdesc *bdp, *last_bdp;
	void *bufaddr;
427
	dma_addr_t addr;
428 429 430 431
	unsigned short status;
	unsigned short buflen;
	unsigned int estatus = 0;
	unsigned int index;
N
Nimrod Andy 已提交
432
	int entries_free;
S
Sascha Hauer 已提交
433

T
Troy Kisky 已提交
434
	entries_free = fec_enet_get_free_txdesc_num(txq);
N
Nimrod Andy 已提交
435 436 437 438 439 440 441
	if (entries_free < MAX_SKB_FRAGS + 1) {
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "NOT enough BD for SG!\n");
		return NETDEV_TX_OK;
	}

442 443
	/* Protocol checksum off-load for TCP and UDP. */
	if (fec_enet_clear_csum(skb, ndev)) {
444
		dev_kfree_skb_any(skb);
445 446 447
		return NETDEV_TX_OK;
	}

448
	/* Fill in a Tx ring entry */
T
Troy Kisky 已提交
449
	bdp = txq->bd.cur;
450
	last_bdp = bdp;
451
	status = fec16_to_cpu(bdp->cbd_sc);
452
	status &= ~BD_ENET_TX_STATS;
L
Linus Torvalds 已提交
453

S
Sascha Hauer 已提交
454
	/* Set buffer length and buffer pointer */
455
	bufaddr = skb->data;
456
	buflen = skb_headlen(skb);
L
Linus Torvalds 已提交
457

T
Troy Kisky 已提交
458
	index = fec_enet_get_bd_index(bdp, &txq->bd);
459
	if (((unsigned long) bufaddr) & fep->tx_align ||
460
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
461 462
		memcpy(txq->tx_bounce[index], skb->data, buflen);
		bufaddr = txq->tx_bounce[index];
L
Linus Torvalds 已提交
463

464
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
465 466
			swap_buffer(bufaddr, buflen);
	}
467

468 469 470
	/* Push the data cache so the CPM does not get stale memory data. */
	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
	if (dma_mapping_error(&fep->pdev->dev, addr)) {
471 472 473 474 475
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "Tx DMA memory map failed\n");
		return NETDEV_TX_OK;
	}
L
Linus Torvalds 已提交
476

477
	if (nr_frags) {
478
		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
T
Troy Kisky 已提交
479 480 481 482
		if (IS_ERR(last_bdp)) {
			dma_unmap_single(&fep->pdev->dev, addr,
					 buflen, DMA_TO_DEVICE);
			dev_kfree_skb_any(skb);
483
			return NETDEV_TX_OK;
T
Troy Kisky 已提交
484
		}
485 486 487 488 489 490 491 492 493
	} else {
		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
		if (fep->bufdesc_ex) {
			estatus = BD_ENET_TX_INT;
			if (unlikely(skb_shinfo(skb)->tx_flags &
				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
				estatus |= BD_ENET_TX_TS;
		}
	}
T
Troy Kisky 已提交
494 495
	bdp->cbd_bufaddr = cpu_to_fec32(addr);
	bdp->cbd_datlen = cpu_to_fec16(buflen);
496

497 498 499
	if (fep->bufdesc_ex) {

		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
500

501
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
502
			fep->hwts_tx_en))
503
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
504

505
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
506
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
507

508 509 510 511
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;

		ebdp->cbd_bdu = 0;
512
		ebdp->cbd_esc = cpu_to_fec32(estatus);
513
	}
514

T
Troy Kisky 已提交
515
	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
516
	/* Save skb pointer */
517
	txq->tx_skbuff[index] = skb;
518

519 520 521 522
	/* Make sure the updates to rest of the descriptor are performed before
	 * transferring ownership.
	 */
	wmb();
523

524 525 526
	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
	 * it's the last BD of the frame, and to put the CRC on the end.
	 */
527
	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
528
	bdp->cbd_sc = cpu_to_fec16(status);
529

S
Sascha Hauer 已提交
530
	/* If this was the last BD in the ring, start at the beginning again. */
T
Troy Kisky 已提交
531
	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
L
Linus Torvalds 已提交
532

533 534
	skb_tx_timestamp(skb);

535
	/* Make sure the update to bdp and tx_skbuff are performed before
T
Troy Kisky 已提交
536
	 * txq->bd.cur.
537 538
	 */
	wmb();
T
Troy Kisky 已提交
539
	txq->bd.cur = bdp;
540 541

	/* Trigger transmission start */
542
	writel(0, txq->bd.reg_desc_active);
L
Linus Torvalds 已提交
543

544
	return 0;
L
Linus Torvalds 已提交
545 546
}

N
Nimrod Andy 已提交
547
static int
548 549 550 551
fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
			  struct net_device *ndev,
			  struct bufdesc *bdp, int index, char *data,
			  int size, bool last_tcp, bool is_last)
552 553
{
	struct fec_enet_private *fep = netdev_priv(ndev);
554
	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
N
Nimrod Andy 已提交
555 556
	unsigned short status;
	unsigned int estatus = 0;
557
	dma_addr_t addr;
558

559
	status = fec16_to_cpu(bdp->cbd_sc);
N
Nimrod Andy 已提交
560
	status &= ~BD_ENET_TX_STATS;
561

N
Nimrod Andy 已提交
562 563
	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);

564
	if (((unsigned long) data) & fep->tx_align ||
565
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
566 567
		memcpy(txq->tx_bounce[index], data, size);
		data = txq->tx_bounce[index];
N
Nimrod Andy 已提交
568

569
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
N
Nimrod Andy 已提交
570 571 572
			swap_buffer(data, size);
	}

573 574
	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
	if (dma_mapping_error(&fep->pdev->dev, addr)) {
N
Nimrod Andy 已提交
575
		dev_kfree_skb_any(skb);
576
		if (net_ratelimit())
N
Nimrod Andy 已提交
577
			netdev_err(ndev, "Tx DMA memory map failed\n");
578 579 580
		return NETDEV_TX_BUSY;
	}

581 582
	bdp->cbd_datlen = cpu_to_fec16(size);
	bdp->cbd_bufaddr = cpu_to_fec32(addr);
583

N
Nimrod Andy 已提交
584
	if (fep->bufdesc_ex) {
585
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
586
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
N
Nimrod Andy 已提交
587 588 589
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
		ebdp->cbd_bdu = 0;
590
		ebdp->cbd_esc = cpu_to_fec32(estatus);
N
Nimrod Andy 已提交
591 592 593 594 595 596 597 598
	}

	/* Handle the last BD specially */
	if (last_tcp)
		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
	if (is_last) {
		status |= BD_ENET_TX_INTR;
		if (fep->bufdesc_ex)
599
			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
N
Nimrod Andy 已提交
600 601
	}

602
	bdp->cbd_sc = cpu_to_fec16(status);
N
Nimrod Andy 已提交
603 604 605 606 607

	return 0;
}

static int
608 609 610
fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
			 struct sk_buff *skb, struct net_device *ndev,
			 struct bufdesc *bdp, int index)
N
Nimrod Andy 已提交
611 612 613
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
614
	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
N
Nimrod Andy 已提交
615 616 617 618 619
	void *bufaddr;
	unsigned long dmabuf;
	unsigned short status;
	unsigned int estatus = 0;

620
	status = fec16_to_cpu(bdp->cbd_sc);
N
Nimrod Andy 已提交
621 622 623
	status &= ~BD_ENET_TX_STATS;
	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);

624 625
	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
626
	if (((unsigned long)bufaddr) & fep->tx_align ||
627
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
628 629
		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
		bufaddr = txq->tx_bounce[index];
N
Nimrod Andy 已提交
630

631
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
N
Nimrod Andy 已提交
632 633 634 635 636 637 638 639 640 641 642 643
			swap_buffer(bufaddr, hdr_len);

		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
					hdr_len, DMA_TO_DEVICE);
		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
			dev_kfree_skb_any(skb);
			if (net_ratelimit())
				netdev_err(ndev, "Tx DMA memory map failed\n");
			return NETDEV_TX_BUSY;
		}
	}

644 645
	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
N
Nimrod Andy 已提交
646 647

	if (fep->bufdesc_ex) {
648
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
649
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
N
Nimrod Andy 已提交
650 651 652
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
		ebdp->cbd_bdu = 0;
653
		ebdp->cbd_esc = cpu_to_fec32(estatus);
N
Nimrod Andy 已提交
654 655
	}

656
	bdp->cbd_sc = cpu_to_fec16(status);
N
Nimrod Andy 已提交
657 658 659 660

	return 0;
}

661 662 663
static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
				   struct sk_buff *skb,
				   struct net_device *ndev)
N
Nimrod Andy 已提交
664 665 666 667
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int total_len, data_left;
T
Troy Kisky 已提交
668
	struct bufdesc *bdp = txq->bd.cur;
N
Nimrod Andy 已提交
669 670 671 672
	struct tso_t tso;
	unsigned int index = 0;
	int ret;

T
Troy Kisky 已提交
673
	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
N
Nimrod Andy 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "NOT enough BD for TSO!\n");
		return NETDEV_TX_OK;
	}

	/* Protocol checksum off-load for TCP and UDP. */
	if (fec_enet_clear_csum(skb, ndev)) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* Initialize the TSO handler, and prepare the first payload */
	tso_start(skb, &tso);

	total_len = skb->len - hdr_len;
	while (total_len > 0) {
		char *hdr;

T
Troy Kisky 已提交
693
		index = fec_enet_get_bd_index(bdp, &txq->bd);
N
Nimrod Andy 已提交
694 695 696 697
		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
		total_len -= data_left;

		/* prepare packet headers: MAC + IP + TCP */
698
		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
N
Nimrod Andy 已提交
699
		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
700
		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
N
Nimrod Andy 已提交
701 702 703 704 705 706 707
		if (ret)
			goto err_release;

		while (data_left > 0) {
			int size;

			size = min_t(int, tso.size, data_left);
T
Troy Kisky 已提交
708 709
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
			index = fec_enet_get_bd_index(bdp, &txq->bd);
710 711 712 713
			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
							bdp, index,
							tso.data, size,
							size == data_left,
N
Nimrod Andy 已提交
714 715 716 717 718 719 720 721
							total_len == 0);
			if (ret)
				goto err_release;

			data_left -= size;
			tso_build_data(skb, &tso, size);
		}

T
Troy Kisky 已提交
722
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
N
Nimrod Andy 已提交
723 724 725
	}

	/* Save skb pointer */
726
	txq->tx_skbuff[index] = skb;
N
Nimrod Andy 已提交
727 728

	skb_tx_timestamp(skb);
T
Troy Kisky 已提交
729
	txq->bd.cur = bdp;
N
Nimrod Andy 已提交
730 731

	/* Trigger transmission start */
732
	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
733 734 735 736 737
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active))
		writel(0, txq->bd.reg_desc_active);
N
Nimrod Andy 已提交
738 739 740 741 742 743 744 745 746 747 748 749 750

	return 0;

err_release:
	/* TODO: Release all used data descriptors for TSO */
	return ret;
}

static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int entries_free;
751 752 753
	unsigned short queue;
	struct fec_enet_priv_tx_q *txq;
	struct netdev_queue *nq;
N
Nimrod Andy 已提交
754 755
	int ret;

756 757 758 759
	queue = skb_get_queue_mapping(skb);
	txq = fep->tx_queue[queue];
	nq = netdev_get_tx_queue(ndev, queue);

N
Nimrod Andy 已提交
760
	if (skb_is_gso(skb))
761
		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
N
Nimrod Andy 已提交
762
	else
763
		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
764 765
	if (ret)
		return ret;
766

T
Troy Kisky 已提交
767
	entries_free = fec_enet_get_free_txdesc_num(txq);
768 769
	if (entries_free <= txq->tx_stop_threshold)
		netif_tx_stop_queue(nq);
770 771 772 773

	return NETDEV_TX_OK;
}

774 775 776 777 778
/* Init RX & TX buffer descriptors
 */
static void fec_enet_bd_init(struct net_device *dev)
{
	struct fec_enet_private *fep = netdev_priv(dev);
779 780
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
781 782
	struct bufdesc *bdp;
	unsigned int i;
783
	unsigned int q;
784

785 786 787
	for (q = 0; q < fep->num_rx_queues; q++) {
		/* Initialize the receive buffer descriptors. */
		rxq = fep->rx_queue[q];
T
Troy Kisky 已提交
788
		bdp = rxq->bd.base;
789

T
Troy Kisky 已提交
790
		for (i = 0; i < rxq->bd.ring_size; i++) {
791

792 793
			/* Initialize the BD for every fragment in the page. */
			if (bdp->cbd_bufaddr)
794
				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
795
			else
796
				bdp->cbd_sc = cpu_to_fec16(0);
T
Troy Kisky 已提交
797
			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
798 799 800
		}

		/* Set the last buffer to wrap */
T
Troy Kisky 已提交
801
		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
802
		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
803

T
Troy Kisky 已提交
804
		rxq->bd.cur = rxq->bd.base;
805 806 807 808 809
	}

	for (q = 0; q < fep->num_tx_queues; q++) {
		/* ...and the same for transmit */
		txq = fep->tx_queue[q];
T
Troy Kisky 已提交
810 811
		bdp = txq->bd.base;
		txq->bd.cur = bdp;
812

T
Troy Kisky 已提交
813
		for (i = 0; i < txq->bd.ring_size; i++) {
814
			/* Initialize the BD for every fragment in the page. */
815
			bdp->cbd_sc = cpu_to_fec16(0);
816 817 818 819
			if (txq->tx_skbuff[i]) {
				dev_kfree_skb_any(txq->tx_skbuff[i]);
				txq->tx_skbuff[i] = NULL;
			}
820
			bdp->cbd_bufaddr = cpu_to_fec32(0);
T
Troy Kisky 已提交
821
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
822 823 824
		}

		/* Set the last buffer to wrap */
T
Troy Kisky 已提交
825
		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
826
		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
827
		txq->dirty_tx = bdp;
828
	}
829
}
830

F
Frank Li 已提交
831 832 833 834 835 836
static void fec_enet_active_rxring(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;

	for (i = 0; i < fep->num_rx_queues; i++)
837
		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
F
Frank Li 已提交
838 839
}

840 841 842 843 844 845
static void fec_enet_enable_ring(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
	int i;
846

847 848
	for (i = 0; i < fep->num_rx_queues; i++) {
		rxq = fep->rx_queue[i];
T
Troy Kisky 已提交
849
		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
850
		writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
851

852 853 854 855 856
		/* enable DMA1/2 */
		if (i)
			writel(RCMR_MATCHEN | RCMR_CMP(i),
			       fep->hwp + FEC_RCMR(i));
	}
857

858 859
	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = fep->tx_queue[i];
T
Troy Kisky 已提交
860
		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
861 862 863 864 865

		/* enable DMA1/2 */
		if (i)
			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
			       fep->hwp + FEC_DMA_CFG(i));
866
	}
867
}
868

869 870 871 872 873 874 875 876 877
static void fec_enet_reset_skb(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_priv_tx_q *txq;
	int i, j;

	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = fep->tx_queue[i];

T
Troy Kisky 已提交
878
		for (j = 0; j < txq->bd.ring_size; j++) {
879 880 881 882 883 884
			if (txq->tx_skbuff[j]) {
				dev_kfree_skb_any(txq->tx_skbuff[j]);
				txq->tx_skbuff[j] = NULL;
			}
		}
	}
885 886
}

887 888 889 890
/*
 * This function is called to start or restart the FEC during a link
 * change, transmit timeout, or to reconfigure the FEC.  The network
 * packet processing for this device must be stopped before this call.
891
 */
L
Linus Torvalds 已提交
892
static void
893
fec_restart(struct net_device *ndev)
L
Linus Torvalds 已提交
894
{
895
	struct fec_enet_private *fep = netdev_priv(ndev);
896
	u32 val;
897 898
	u32 temp_mac[2];
	u32 rcntl = OPT_FRAME_SIZE | 0x04;
S
Shawn Guo 已提交
899
	u32 ecntl = 0x2; /* ETHEREN */
L
Linus Torvalds 已提交
900

901 902 903 904
	/* Whack a reset.  We should wait for this.
	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
	 * instead of reset MAC itself.
	 */
905
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
906 907 908 909 910
		writel(0, fep->hwp + FEC_ECNTRL);
	} else {
		writel(1, fep->hwp + FEC_ECNTRL);
		udelay(10);
	}
L
Linus Torvalds 已提交
911

912 913 914 915
	/*
	 * enet-mac reset will reset mac address registers too,
	 * so need to reconfigure it.
	 */
916 917 918 919 920
	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
	writel((__force u32)cpu_to_be32(temp_mac[0]),
	       fep->hwp + FEC_ADDR_LOW);
	writel((__force u32)cpu_to_be32(temp_mac[1]),
	       fep->hwp + FEC_ADDR_HIGH);
L
Linus Torvalds 已提交
921

922
	/* Clear any outstanding interrupt. */
923
	writel(0xffffffff, fep->hwp + FEC_IEVENT);
L
Linus Torvalds 已提交
924

925 926
	fec_enet_bd_init(ndev);

927
	fec_enet_enable_ring(ndev);
928

929 930
	/* Reset tx SKB buffers. */
	fec_enet_reset_skb(ndev);
931

932
	/* Enable MII mode */
933
	if (fep->full_duplex == DUPLEX_FULL) {
934
		/* FD enable */
935 936
		writel(0x04, fep->hwp + FEC_X_CNTRL);
	} else {
937 938
		/* No Rcv on Xmit */
		rcntl |= 0x02;
939 940
		writel(0x0, fep->hwp + FEC_X_CNTRL);
	}
941

942 943 944
	/* Set MII speed */
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);

G
Guenter Roeck 已提交
945
#if !defined(CONFIG_M5272)
946 947
	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
		val = readl(fep->hwp + FEC_RACC);
948 949
		/* align IP header */
		val |= FEC_RACC_SHIFT16;
950
		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
951
			/* set RX checksum */
952 953 954 955
			val |= FEC_RACC_OPTIONS;
		else
			val &= ~FEC_RACC_OPTIONS;
		writel(val, fep->hwp + FEC_RACC);
956
		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
957
	}
G
Guenter Roeck 已提交
958
#endif
959

960 961 962 963
	/*
	 * The phy interface and speed need to get configured
	 * differently on enet-mac.
	 */
964
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
965 966
		/* Enable flow control and length check */
		rcntl |= 0x40000000 | 0x00000020;
967

S
Shawn Guo 已提交
968
		/* RGMII, RMII or MII */
M
Markus Pargmann 已提交
969 970 971 972
		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
S
Shawn Guo 已提交
973 974
			rcntl |= (1 << 6);
		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
975
			rcntl |= (1 << 8);
976
		else
977
			rcntl &= ~(1 << 8);
978

S
Shawn Guo 已提交
979
		/* 1G, 100M or 10M */
980 981
		if (ndev->phydev) {
			if (ndev->phydev->speed == SPEED_1000)
S
Shawn Guo 已提交
982
				ecntl |= (1 << 5);
983
			else if (ndev->phydev->speed == SPEED_100)
S
Shawn Guo 已提交
984 985 986 987
				rcntl &= ~(1 << 9);
			else
				rcntl |= (1 << 9);
		}
988 989
	} else {
#ifdef FEC_MIIGSK_ENR
990
		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
991
			u32 cfgr;
992 993 994 995 996 997 998 999
			/* disable the gasket and wait */
			writel(0, fep->hwp + FEC_MIIGSK_ENR);
			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
				udelay(1);

			/*
			 * configure the gasket:
			 *   RMII, 50 MHz, no loopback, no echo
1000
			 *   MII, 25 MHz, no loopback, no echo
1001
			 */
1002 1003
			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1004
			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1005 1006
				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1007 1008 1009

			/* re-enable the gasket */
			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1010
		}
1011 1012
#endif
	}
1013

G
Guenter Roeck 已提交
1014
#if !defined(CONFIG_M5272)
1015 1016 1017
	/* enable pause frame*/
	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1018
	     ndev->phydev && ndev->phydev->pause)) {
1019 1020
		rcntl |= FEC_ENET_FCE;

1021
		/* set FIFO threshold parameter to reduce overrun */
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);

		/* OPD */
		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
	} else {
		rcntl &= ~FEC_ENET_FCE;
	}
G
Guenter Roeck 已提交
1032
#endif /* !defined(CONFIG_M5272) */
1033

1034
	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1035

1036 1037 1038 1039 1040 1041 1042
	/* Setup multicast filter. */
	set_multicast_list(ndev);
#ifndef CONFIG_M5272
	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
#endif

1043
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
S
Shawn Guo 已提交
1044 1045 1046 1047 1048 1049
		/* enable ENET endian swap */
		ecntl |= (1 << 8);
		/* enable ENET store and forward mode */
		writel(1 << 8, fep->hwp + FEC_X_WMRK);
	}

1050 1051
	if (fep->bufdesc_ex)
		ecntl |= (1 << 4);
1052

1053
#ifndef CONFIG_M5272
1054 1055
	/* Enable the MIB statistic event counters */
	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1056 1057
#endif

1058
	/* And last, enable the transmit and receive processing */
S
Shawn Guo 已提交
1059
	writel(ecntl, fep->hwp + FEC_ECNTRL);
F
Frank Li 已提交
1060
	fec_enet_active_rxring(ndev);
1061

1062 1063 1064
	if (fep->bufdesc_ex)
		fec_ptp_start_cyclecounter(ndev);

1065
	/* Enable interrupts we wish to service */
1066 1067 1068 1069
	if (fep->link)
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
	else
		writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1070 1071 1072 1073

	/* Init the interrupt coalescing */
	fec_enet_itr_coal_init(ndev);

1074 1075 1076 1077 1078 1079
}

static void
fec_stop(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
N
Nimrod Andy 已提交
1080
	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1081
	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
N
Nimrod Andy 已提交
1082
	u32 val;
1083 1084 1085 1086 1087 1088

	/* We cannot expect a graceful transmit stop without link !!! */
	if (fep->link) {
		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
		udelay(10);
		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1089
			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1090 1091
	}

1092 1093 1094 1095
	/* Whack a reset.  We should wait for this.
	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
	 * instead of reset MAC itself.
	 */
N
Nimrod Andy 已提交
1096 1097 1098 1099 1100 1101 1102 1103
	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
		if (fep->quirks & FEC_QUIRK_HAS_AVB) {
			writel(0, fep->hwp + FEC_ECNTRL);
		} else {
			writel(1, fep->hwp + FEC_ECNTRL);
			udelay(10);
		}
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1104
	} else {
N
Nimrod Andy 已提交
1105 1106 1107 1108 1109 1110 1111
		writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
		val = readl(fep->hwp + FEC_ECNTRL);
		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
		writel(val, fep->hwp + FEC_ECNTRL);

		if (pdata && pdata->sleep_mode_enable)
			pdata->sleep_mode_enable(true);
1112
	}
1113
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
S
Shawn Guo 已提交
1114 1115

	/* We have to keep ENET enabled to have MII interrupt stay working */
N
Nimrod Andy 已提交
1116 1117
	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
S
Shawn Guo 已提交
1118
		writel(2, fep->hwp + FEC_ECNTRL);
1119 1120
		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
	}
L
Linus Torvalds 已提交
1121 1122 1123
}


1124 1125 1126 1127 1128
static void
fec_timeout(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

1129 1130
	fec_dump(ndev);

1131 1132
	ndev->stats.tx_errors++;

1133
	schedule_work(&fep->tx_timeout_work);
1134 1135
}

1136
static void fec_enet_timeout_work(struct work_struct *work)
1137 1138
{
	struct fec_enet_private *fep =
1139
		container_of(work, struct fec_enet_private, tx_timeout_work);
1140
	struct net_device *ndev = fep->netdev;
1141

1142 1143 1144 1145 1146 1147 1148 1149
	rtnl_lock();
	if (netif_device_present(ndev) || netif_running(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
		fec_restart(ndev);
		netif_wake_queue(ndev);
		netif_tx_unlock_bh(ndev);
		napi_enable(&fep->napi);
1150
	}
1151
	rtnl_unlock();
1152 1153
}

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
static void
fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
	struct skb_shared_hwtstamps *hwtstamps)
{
	unsigned long flags;
	u64 ns;

	spin_lock_irqsave(&fep->tmreg_lock, flags);
	ns = timecounter_cyc2time(&fep->tc, ts);
	spin_unlock_irqrestore(&fep->tmreg_lock, flags);

	memset(hwtstamps, 0, sizeof(*hwtstamps));
	hwtstamps->hwtstamp = ns_to_ktime(ns);
}

L
Linus Torvalds 已提交
1169
static void
1170
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
L
Linus Torvalds 已提交
1171 1172
{
	struct	fec_enet_private *fep;
1173
	struct bufdesc *bdp;
1174
	unsigned short status;
L
Linus Torvalds 已提交
1175
	struct	sk_buff	*skb;
1176 1177
	struct fec_enet_priv_tx_q *txq;
	struct netdev_queue *nq;
1178
	int	index = 0;
N
Nimrod Andy 已提交
1179
	int	entries_free;
L
Linus Torvalds 已提交
1180

1181
	fep = netdev_priv(ndev);
1182 1183 1184 1185 1186 1187 1188

	queue_id = FEC_ENET_GET_QUQUE(queue_id);

	txq = fep->tx_queue[queue_id];
	/* get next bdp of dirty_tx */
	nq = netdev_get_tx_queue(ndev, queue_id);
	bdp = txq->dirty_tx;
L
Linus Torvalds 已提交
1189

1190
	/* get next bdp of dirty_tx */
T
Troy Kisky 已提交
1191
	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1192

T
Troy Kisky 已提交
1193 1194
	while (bdp != READ_ONCE(txq->bd.cur)) {
		/* Order the load of bd.cur and cbd_sc */
1195
		rmb();
1196
		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1197
		if (status & BD_ENET_TX_READY)
S
Sascha Hauer 已提交
1198 1199
			break;

T
Troy Kisky 已提交
1200
		index = fec_enet_get_bd_index(bdp, &txq->bd);
1201

1202
		skb = txq->tx_skbuff[index];
1203
		txq->tx_skbuff[index] = NULL;
1204 1205 1206 1207 1208 1209
		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
			dma_unmap_single(&fep->pdev->dev,
					 fec32_to_cpu(bdp->cbd_bufaddr),
					 fec16_to_cpu(bdp->cbd_datlen),
					 DMA_TO_DEVICE);
		bdp->cbd_bufaddr = cpu_to_fec32(0);
1210 1211
		if (!skb)
			goto skb_done;
1212

L
Linus Torvalds 已提交
1213
		/* Check for errors. */
1214
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
L
Linus Torvalds 已提交
1215 1216
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
1217
			ndev->stats.tx_errors++;
1218
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1219
				ndev->stats.tx_heartbeat_errors++;
1220
			if (status & BD_ENET_TX_LC)  /* Late collision */
1221
				ndev->stats.tx_window_errors++;
1222
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1223
				ndev->stats.tx_aborted_errors++;
1224
			if (status & BD_ENET_TX_UN)  /* Underrun */
1225
				ndev->stats.tx_fifo_errors++;
1226
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1227
				ndev->stats.tx_carrier_errors++;
L
Linus Torvalds 已提交
1228
		} else {
1229
			ndev->stats.tx_packets++;
1230
			ndev->stats.tx_bytes += skb->len;
L
Linus Torvalds 已提交
1231 1232
		}

1233 1234
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
			fep->bufdesc_ex) {
1235
			struct skb_shared_hwtstamps shhwtstamps;
1236
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1237

1238
			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1239 1240
			skb_tstamp_tx(skb, &shhwtstamps);
		}
1241

L
Linus Torvalds 已提交
1242 1243 1244
		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
1245
		if (status & BD_ENET_TX_DEF)
1246
			ndev->stats.collisions++;
1247

S
Sascha Hauer 已提交
1248
		/* Free the sk buffer associated with this last transmit */
L
Linus Torvalds 已提交
1249
		dev_kfree_skb_any(skb);
1250
skb_done:
1251 1252 1253 1254
		/* Make sure the update to bdp and tx_skbuff are performed
		 * before dirty_tx
		 */
		wmb();
1255
		txq->dirty_tx = bdp;
1256

S
Sascha Hauer 已提交
1257
		/* Update pointer to next buffer descriptor to be transmitted */
T
Troy Kisky 已提交
1258
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1259

S
Sascha Hauer 已提交
1260
		/* Since we have freed up a buffer, the ring is no longer full
L
Linus Torvalds 已提交
1261
		 */
N
Nimrod Andy 已提交
1262
		if (netif_queue_stopped(ndev)) {
T
Troy Kisky 已提交
1263
			entries_free = fec_enet_get_free_txdesc_num(txq);
1264 1265
			if (entries_free >= txq->tx_wake_threshold)
				netif_tx_wake_queue(nq);
N
Nimrod Andy 已提交
1266
		}
L
Linus Torvalds 已提交
1267
	}
1268 1269

	/* ERR006538: Keep the transmitter going */
T
Troy Kisky 已提交
1270
	if (bdp != txq->bd.cur &&
1271 1272
	    readl(txq->bd.reg_desc_active) == 0)
		writel(0, txq->bd.reg_desc_active);
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
}

static void
fec_enet_tx(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	u16 queue_id;
	/* First process class A queue, then Class B and Best Effort queue */
	for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
		clear_bit(queue_id, &fep->work_tx);
		fec_enet_tx_queue(ndev, queue_id);
	}
	return;
L
Linus Torvalds 已提交
1286 1287
}

1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
static int
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
	struct  fec_enet_private *fep = netdev_priv(ndev);
	int off;

	off = ((unsigned long)skb->data) & fep->rx_align;
	if (off)
		skb_reserve(skb, fep->rx_align + 1 - off);

1298 1299
	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1300 1301 1302 1303 1304 1305 1306 1307 1308
		if (net_ratelimit())
			netdev_err(ndev, "Rx DMA memory map failed\n");
		return -ENOMEM;
	}

	return 0;
}

static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1309
			       struct bufdesc *bdp, u32 length, bool swap)
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
{
	struct  fec_enet_private *fep = netdev_priv(ndev);
	struct sk_buff *new_skb;

	if (length > fep->rx_copybreak)
		return false;

	new_skb = netdev_alloc_skb(ndev, length);
	if (!new_skb)
		return false;

1321 1322
	dma_sync_single_for_cpu(&fep->pdev->dev,
				fec32_to_cpu(bdp->cbd_bufaddr),
1323 1324
				FEC_ENET_RX_FRSIZE - fep->rx_align,
				DMA_FROM_DEVICE);
1325 1326 1327 1328
	if (!swap)
		memcpy(new_skb->data, (*skb)->data, length);
	else
		swap_buffer2(new_skb->data, (*skb)->data, length);
1329 1330 1331 1332 1333
	*skb = new_skb;

	return true;
}

T
Troy Kisky 已提交
1334
/* During a receive, the bd_rx.cur points to the current incoming buffer.
L
Linus Torvalds 已提交
1335 1336 1337 1338
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
1339
static int
1340
fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
L
Linus Torvalds 已提交
1341
{
1342
	struct fec_enet_private *fep = netdev_priv(ndev);
1343
	struct fec_enet_priv_rx_q *rxq;
S
Sascha Hauer 已提交
1344
	struct bufdesc *bdp;
1345
	unsigned short status;
1346 1347
	struct  sk_buff *skb_new = NULL;
	struct  sk_buff *skb;
L
Linus Torvalds 已提交
1348 1349
	ushort	pkt_len;
	__u8 *data;
1350
	int	pkt_received = 0;
1351 1352 1353
	struct	bufdesc_ex *ebdp = NULL;
	bool	vlan_packet_rcvd = false;
	u16	vlan_tag;
1354
	int	index = 0;
1355
	bool	is_copybreak;
1356
	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1357

1358 1359
#ifdef CONFIG_M532x
	flush_cache_all();
1360
#endif
1361 1362
	queue_id = FEC_ENET_GET_QUQUE(queue_id);
	rxq = fep->rx_queue[queue_id];
L
Linus Torvalds 已提交
1363 1364 1365 1366

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
T
Troy Kisky 已提交
1367
	bdp = rxq->bd.cur;
L
Linus Torvalds 已提交
1368

1369
	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
L
Linus Torvalds 已提交
1370

1371 1372 1373 1374
		if (pkt_received >= budget)
			break;
		pkt_received++;

1375
		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1376

S
Sascha Hauer 已提交
1377
		/* Check for errors. */
T
Troy Kisky 已提交
1378
		status ^= BD_ENET_RX_LAST;
S
Sascha Hauer 已提交
1379
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
T
Troy Kisky 已提交
1380 1381
			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
			   BD_ENET_RX_CL)) {
1382
			ndev->stats.rx_errors++;
T
Troy Kisky 已提交
1383 1384 1385 1386 1387 1388 1389
			if (status & BD_ENET_RX_OV) {
				/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
				goto rx_processing_done;
			}
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
						| BD_ENET_RX_LAST)) {
S
Sascha Hauer 已提交
1390
				/* Frame too long or too short. */
1391
				ndev->stats.rx_length_errors++;
T
Troy Kisky 已提交
1392 1393
				if (status & BD_ENET_RX_LAST)
					netdev_err(ndev, "rcv is not +last\n");
S
Sascha Hauer 已提交
1394 1395
			}
			if (status & BD_ENET_RX_CR)	/* CRC Error */
1396
				ndev->stats.rx_crc_errors++;
T
Troy Kisky 已提交
1397 1398 1399
			/* Report late collisions as a frame error. */
			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				ndev->stats.rx_frame_errors++;
S
Sascha Hauer 已提交
1400 1401
			goto rx_processing_done;
		}
L
Linus Torvalds 已提交
1402

S
Sascha Hauer 已提交
1403
		/* Process the incoming frame. */
1404
		ndev->stats.rx_packets++;
1405
		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1406
		ndev->stats.rx_bytes += pkt_len;
L
Linus Torvalds 已提交
1407

T
Troy Kisky 已提交
1408
		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1409
		skb = rxq->rx_skbuff[index];
1410

1411 1412 1413 1414
		/* The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
1415 1416
		is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
						  need_swap);
1417 1418 1419 1420 1421 1422
		if (!is_copybreak) {
			skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
			if (unlikely(!skb_new)) {
				ndev->stats.rx_dropped++;
				goto rx_processing_done;
			}
1423 1424
			dma_unmap_single(&fep->pdev->dev,
					 fec32_to_cpu(bdp->cbd_bufaddr),
1425 1426 1427 1428 1429 1430 1431
					 FEC_ENET_RX_FRSIZE - fep->rx_align,
					 DMA_FROM_DEVICE);
		}

		prefetch(skb->data - NET_IP_ALIGN);
		skb_put(skb, pkt_len - 4);
		data = skb->data;
1432

1433 1434 1435
		if (!is_copybreak && need_swap)
			swap_buffer(data, pkt_len);

1436 1437 1438 1439 1440
#if !defined(CONFIG_M5272)
		if (fep->quirks & FEC_QUIRK_HAS_RACC)
			data = skb_pull_inline(skb, 2);
#endif

1441 1442 1443 1444 1445 1446 1447 1448
		/* Extract the enhanced buffer descriptor */
		ebdp = NULL;
		if (fep->bufdesc_ex)
			ebdp = (struct bufdesc_ex *)bdp;

		/* If this is a VLAN packet remove the VLAN Tag */
		vlan_packet_rcvd = false;
		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1449 1450
		    fep->bufdesc_ex &&
		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1451 1452 1453 1454 1455 1456
			/* Push and remove the vlan tag */
			struct vlan_hdr *vlan_header =
					(struct vlan_hdr *) (data + ETH_HLEN);
			vlan_tag = ntohs(vlan_header->h_vlan_TCI);

			vlan_packet_rcvd = true;
1457

1458
			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1459
			skb_pull(skb, VLAN_HLEN);
1460 1461
		}

1462
		skb->protocol = eth_type_trans(skb, ndev);
L
Linus Torvalds 已提交
1463

1464 1465
		/* Get receive timestamp from the skb */
		if (fep->hwts_rx_en && fep->bufdesc_ex)
1466
			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1467 1468 1469 1470
					  skb_hwtstamps(skb));

		if (fep->bufdesc_ex &&
		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1471
			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1472 1473 1474 1475
				/* don't check it */
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			} else {
				skb_checksum_none_assert(skb);
1476
			}
1477
		}
1478

1479 1480 1481 1482 1483
		/* Handle received VLAN packets */
		if (vlan_packet_rcvd)
			__vlan_hwaccel_put_tag(skb,
					       htons(ETH_P_8021Q),
					       vlan_tag);
1484

1485 1486 1487
		napi_gro_receive(&fep->napi, skb);

		if (is_copybreak) {
1488 1489
			dma_sync_single_for_device(&fep->pdev->dev,
						   fec32_to_cpu(bdp->cbd_bufaddr),
1490 1491 1492 1493 1494
						   FEC_ENET_RX_FRSIZE - fep->rx_align,
						   DMA_FROM_DEVICE);
		} else {
			rxq->rx_skbuff[index] = skb_new;
			fec_enet_new_rxbdp(ndev, bdp, skb_new);
S
Sascha Hauer 已提交
1495
		}
S
Sascha Hauer 已提交
1496

S
Sascha Hauer 已提交
1497 1498 1499
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;
L
Linus Torvalds 已提交
1500

S
Sascha Hauer 已提交
1501 1502
		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
1503

1504 1505 1506
		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

1507
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1508 1509 1510
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}
1511 1512 1513 1514 1515
		/* Make sure the updates to rest of the descriptor are
		 * performed before transferring ownership.
		 */
		wmb();
		bdp->cbd_sc = cpu_to_fec16(status);
1516

S
Sascha Hauer 已提交
1517
		/* Update BD pointer to next entry */
T
Troy Kisky 已提交
1518
		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1519

S
Sascha Hauer 已提交
1520 1521 1522 1523
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
1524
		writel(0, rxq->bd.reg_desc_active);
S
Sascha Hauer 已提交
1525
	}
T
Troy Kisky 已提交
1526
	rxq->bd.cur = bdp;
1527 1528
	return pkt_received;
}
L
Linus Torvalds 已提交
1529

1530 1531 1532 1533 1534 1535 1536 1537
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	int     pkt_received = 0;
	u16	queue_id;
	struct fec_enet_private *fep = netdev_priv(ndev);

	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1538 1539 1540
		int ret;

		ret = fec_enet_rx_queue(ndev,
1541
					budget - pkt_received, queue_id);
1542 1543 1544 1545 1546

		if (ret < budget - pkt_received)
			clear_bit(queue_id, &fep->work_rx);

		pkt_received += ret;
1547
	}
1548
	return pkt_received;
L
Linus Torvalds 已提交
1549 1550
}

1551 1552 1553 1554 1555 1556 1557 1558
static bool
fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
{
	if (int_events == 0)
		return false;

	if (int_events & FEC_ENET_RXF)
		fep->work_rx |= (1 << 2);
F
Frank Li 已提交
1559 1560 1561 1562
	if (int_events & FEC_ENET_RXF_1)
		fep->work_rx |= (1 << 0);
	if (int_events & FEC_ENET_RXF_2)
		fep->work_rx |= (1 << 1);
1563 1564 1565

	if (int_events & FEC_ENET_TXF)
		fep->work_tx |= (1 << 2);
F
Frank Li 已提交
1566 1567 1568 1569
	if (int_events & FEC_ENET_TXF_1)
		fep->work_tx |= (1 << 0);
	if (int_events & FEC_ENET_TXF_2)
		fep->work_tx |= (1 << 1);
1570 1571 1572 1573

	return true;
}

1574 1575 1576 1577 1578 1579 1580 1581
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

1582
	int_events = readl(fep->hwp + FEC_IEVENT);
N
Nimrod Andy 已提交
1583
	writel(int_events, fep->hwp + FEC_IEVENT);
1584
	fec_enet_collect_events(fep, int_events);
1585

1586
	if ((fep->work_tx || fep->work_rx) && fep->link) {
1587
		ret = IRQ_HANDLED;
1588

N
Nimrod Andy 已提交
1589 1590
		if (napi_schedule_prep(&fep->napi)) {
			/* Disable the NAPI interrupts */
1591
			writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
N
Nimrod Andy 已提交
1592 1593
			__napi_schedule(&fep->napi);
		}
1594
	}
1595

1596 1597 1598 1599
	if (int_events & FEC_ENET_MII) {
		ret = IRQ_HANDLED;
		complete(&fep->mdio_done);
	}
1600

1601 1602
	if (fep->ptp_clock)
		fec_ptp_check_pps_event(fep);
1603

1604 1605 1606
	return ret;
}

1607 1608 1609 1610
static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct fec_enet_private *fep = netdev_priv(ndev);
1611 1612 1613
	int pkts;

	pkts = fec_enet_rx(ndev, budget);
1614

1615 1616
	fec_enet_tx(ndev);

1617
	if (pkts < budget) {
1618
		napi_complete_done(napi, pkts);
1619 1620 1621 1622
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
	}
	return pkts;
}
1623

1624
/* ------------------------------------------------------------------------- */
1625
static void fec_get_mac(struct net_device *ndev)
L
Linus Torvalds 已提交
1626
{
1627
	struct fec_enet_private *fep = netdev_priv(ndev);
J
Jingoo Han 已提交
1628
	struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1629
	unsigned char *iap, tmpaddr[ETH_ALEN];
L
Linus Torvalds 已提交
1630

1631 1632 1633 1634 1635 1636 1637 1638
	/*
	 * try to get mac address in following order:
	 *
	 * 1) module parameter via kernel command line in form
	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
	 */
	iap = macaddr;

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
	/*
	 * 2) from device tree data
	 */
	if (!is_valid_ether_addr(iap)) {
		struct device_node *np = fep->pdev->dev.of_node;
		if (np) {
			const char *mac = of_get_mac_address(np);
			if (mac)
				iap = (unsigned char *) mac;
		}
	}

1651
	/*
1652
	 * 3) from flash or fuse (via platform data)
1653 1654 1655 1656 1657 1658 1659
	 */
	if (!is_valid_ether_addr(iap)) {
#ifdef CONFIG_M5272
		if (FEC_FLASHMAC)
			iap = (unsigned char *)FEC_FLASHMAC;
#else
		if (pdata)
1660
			iap = (unsigned char *)&pdata->mac;
1661 1662 1663 1664
#endif
	}

	/*
1665
	 * 4) FEC mac registers set by bootloader
1666 1667
	 */
	if (!is_valid_ether_addr(iap)) {
1668 1669 1670 1671
		*((__be32 *) &tmpaddr[0]) =
			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
		*((__be16 *) &tmpaddr[4]) =
			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1672
		iap = &tmpaddr[0];
L
Linus Torvalds 已提交
1673 1674
	}

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
	/*
	 * 5) random mac address
	 */
	if (!is_valid_ether_addr(iap)) {
		/* Report it and use a random ethernet address instead */
		netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
		eth_hw_addr_random(ndev);
		netdev_info(ndev, "Using random MAC address: %pM\n",
			    ndev->dev_addr);
		return;
	}

1687
	memcpy(ndev->dev_addr, iap, ETH_ALEN);
L
Linus Torvalds 已提交
1688

1689 1690
	/* Adjust MAC if using macaddr */
	if (iap == macaddr)
S
Shawn Guo 已提交
1691
		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
L
Linus Torvalds 已提交
1692 1693
}

1694
/* ------------------------------------------------------------------------- */
L
Linus Torvalds 已提交
1695

1696 1697 1698
/*
 * Phy section
 */
1699
static void fec_enet_adjust_link(struct net_device *ndev)
L
Linus Torvalds 已提交
1700
{
1701
	struct fec_enet_private *fep = netdev_priv(ndev);
1702
	struct phy_device *phy_dev = ndev->phydev;
1703
	int status_change = 0;
L
Linus Torvalds 已提交
1704

1705 1706 1707
	/* Prevent a state halted on mii error */
	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
		phy_dev->state = PHY_RESUMING;
1708
		return;
1709
	}
L
Linus Torvalds 已提交
1710

1711 1712 1713 1714 1715 1716 1717 1718
	/*
	 * If the netdev is down, or is going down, we're not interested
	 * in link state events, so just mark our idea of the link as down
	 * and ignore the event.
	 */
	if (!netif_running(ndev) || !netif_device_present(ndev)) {
		fep->link = 0;
	} else if (phy_dev->link) {
1719
		if (!fep->link) {
1720
			fep->link = phy_dev->link;
1721 1722
			status_change = 1;
		}
L
Linus Torvalds 已提交
1723

1724 1725
		if (fep->full_duplex != phy_dev->duplex) {
			fep->full_duplex = phy_dev->duplex;
1726
			status_change = 1;
1727
		}
1728 1729 1730 1731 1732 1733 1734

		if (phy_dev->speed != fep->speed) {
			fep->speed = phy_dev->speed;
			status_change = 1;
		}

		/* if any of the above changed restart the FEC */
1735 1736 1737
		if (status_change) {
			napi_disable(&fep->napi);
			netif_tx_lock_bh(ndev);
1738
			fec_restart(ndev);
1739
			netif_wake_queue(ndev);
1740
			netif_tx_unlock_bh(ndev);
1741 1742
			napi_enable(&fep->napi);
		}
1743 1744
	} else {
		if (fep->link) {
1745 1746
			napi_disable(&fep->napi);
			netif_tx_lock_bh(ndev);
1747
			fec_stop(ndev);
1748 1749
			netif_tx_unlock_bh(ndev);
			napi_enable(&fep->napi);
1750
			fep->link = phy_dev->link;
1751 1752
			status_change = 1;
		}
L
Linus Torvalds 已提交
1753
	}
1754

1755 1756 1757
	if (status_change)
		phy_print_status(phy_dev);
}
L
Linus Torvalds 已提交
1758

1759
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
L
Linus Torvalds 已提交
1760
{
1761
	struct fec_enet_private *fep = bus->priv;
1762
	struct device *dev = &fep->pdev->dev;
1763
	unsigned long time_left;
1764 1765 1766
	int ret = 0;

	ret = pm_runtime_get_sync(dev);
1767
	if (ret < 0)
1768
		return ret;
L
Linus Torvalds 已提交
1769

1770
	fep->mii_timeout = 0;
1771
	reinit_completion(&fep->mdio_done);
1772 1773 1774 1775 1776 1777 1778

	/* start a read op */
	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);

	/* wait for end of transfer */
1779 1780 1781 1782
	time_left = wait_for_completion_timeout(&fep->mdio_done,
			usecs_to_jiffies(FEC_MII_TIMEOUT));
	if (time_left == 0) {
		fep->mii_timeout = 1;
1783
		netdev_err(fep->netdev, "MDIO read timeout\n");
1784 1785
		ret = -ETIMEDOUT;
		goto out;
L
Linus Torvalds 已提交
1786 1787
	}

1788 1789 1790 1791 1792 1793 1794
	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));

out:
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return ret;
1795
}
1796

1797 1798
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
L
Linus Torvalds 已提交
1799
{
1800
	struct fec_enet_private *fep = bus->priv;
1801
	struct device *dev = &fep->pdev->dev;
1802
	unsigned long time_left;
1803
	int ret;
1804 1805

	ret = pm_runtime_get_sync(dev);
1806
	if (ret < 0)
1807
		return ret;
1808 1809
	else
		ret = 0;
L
Linus Torvalds 已提交
1810

1811
	fep->mii_timeout = 0;
1812
	reinit_completion(&fep->mdio_done);
L
Linus Torvalds 已提交
1813

1814 1815
	/* start a write op */
	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1816 1817 1818 1819 1820
		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
		FEC_MMFR_TA | FEC_MMFR_DATA(value),
		fep->hwp + FEC_MII_DATA);

	/* wait for end of transfer */
1821 1822 1823 1824
	time_left = wait_for_completion_timeout(&fep->mdio_done,
			usecs_to_jiffies(FEC_MII_TIMEOUT));
	if (time_left == 0) {
		fep->mii_timeout = 1;
1825
		netdev_err(fep->netdev, "MDIO write timeout\n");
1826
		ret  = -ETIMEDOUT;
1827
	}
L
Linus Torvalds 已提交
1828

1829 1830 1831 1832
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return ret;
1833
}
L
Linus Torvalds 已提交
1834

1835 1836 1837 1838 1839 1840 1841 1842 1843
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int ret;

	if (enable) {
		ret = clk_prepare_enable(fep->clk_ahb);
		if (ret)
			return ret;
1844 1845 1846 1847 1848

		ret = clk_prepare_enable(fep->clk_enet_out);
		if (ret)
			goto failed_clk_enet_out;

1849
		if (fep->clk_ptp) {
1850
			mutex_lock(&fep->ptp_clk_mutex);
1851
			ret = clk_prepare_enable(fep->clk_ptp);
1852 1853
			if (ret) {
				mutex_unlock(&fep->ptp_clk_mutex);
1854
				goto failed_clk_ptp;
1855 1856 1857 1858
			} else {
				fep->ptp_clk_on = true;
			}
			mutex_unlock(&fep->ptp_clk_mutex);
1859
		}
1860 1861 1862 1863

		ret = clk_prepare_enable(fep->clk_ref);
		if (ret)
			goto failed_clk_ref;
1864 1865
	} else {
		clk_disable_unprepare(fep->clk_ahb);
1866
		clk_disable_unprepare(fep->clk_enet_out);
1867 1868
		if (fep->clk_ptp) {
			mutex_lock(&fep->ptp_clk_mutex);
1869
			clk_disable_unprepare(fep->clk_ptp);
1870 1871 1872
			fep->ptp_clk_on = false;
			mutex_unlock(&fep->ptp_clk_mutex);
		}
1873
		clk_disable_unprepare(fep->clk_ref);
1874 1875 1876
	}

	return 0;
1877 1878 1879 1880

failed_clk_ref:
	if (fep->clk_ref)
		clk_disable_unprepare(fep->clk_ref);
1881 1882 1883 1884 1885 1886 1887 1888 1889
failed_clk_ptp:
	if (fep->clk_enet_out)
		clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
		clk_disable_unprepare(fep->clk_ahb);

	return ret;
}

1890
static int fec_enet_mii_probe(struct net_device *ndev)
1891
{
1892
	struct fec_enet_private *fep = netdev_priv(ndev);
1893
	struct phy_device *phy_dev = NULL;
1894 1895 1896
	char mdio_bus_id[MII_BUS_ID_SIZE];
	char phy_name[MII_BUS_ID_SIZE + 3];
	int phy_id;
S
Shawn Guo 已提交
1897
	int dev_id = fep->dev_id;
1898

1899 1900 1901 1902
	if (fep->phy_node) {
		phy_dev = of_phy_connect(ndev, fep->phy_node,
					 &fec_enet_adjust_link, 0,
					 fep->phy_interface);
1903 1904
		if (!phy_dev)
			return -ENODEV;
1905 1906 1907
	} else {
		/* check for attached phy */
		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1908
			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1909 1910 1911
				continue;
			if (dev_id--)
				continue;
1912
			strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1913 1914
			break;
		}
L
Linus Torvalds 已提交
1915

1916 1917
		if (phy_id >= PHY_MAX_ADDR) {
			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1918
			strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1919 1920 1921 1922 1923 1924 1925
			phy_id = 0;
		}

		snprintf(phy_name, sizeof(phy_name),
			 PHY_ID_FMT, mdio_bus_id, phy_id);
		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
				      fep->phy_interface);
1926 1927 1928
	}

	if (IS_ERR(phy_dev)) {
1929
		netdev_err(ndev, "could not attach to PHY\n");
1930
		return PTR_ERR(phy_dev);
1931
	}
L
Linus Torvalds 已提交
1932

1933
	/* mask with MAC supported features */
1934
	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
S
Shawn Guo 已提交
1935
		phy_dev->supported &= PHY_GBIT_FEATURES;
1936
		phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
G
Guenter Roeck 已提交
1937
#if !defined(CONFIG_M5272)
1938
		phy_dev->supported |= SUPPORTED_Pause;
G
Guenter Roeck 已提交
1939
#endif
1940
	}
S
Shawn Guo 已提交
1941 1942 1943
	else
		phy_dev->supported &= PHY_BASIC_FEATURES;

1944
	phy_dev->advertising = phy_dev->supported;
L
Linus Torvalds 已提交
1945

1946 1947
	fep->link = 0;
	fep->full_duplex = 0;
L
Linus Torvalds 已提交
1948

1949
	phy_attached_info(phy_dev);
1950

1951
	return 0;
L
Linus Torvalds 已提交
1952 1953
}

1954
static int fec_enet_mii_init(struct platform_device *pdev)
1955
{
1956
	static struct mii_bus *fec0_mii_bus;
1957 1958
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct fec_enet_private *fep = netdev_priv(ndev);
1959
	struct device_node *node;
1960
	int err = -ENXIO;
1961
	u32 mii_speed, holdtime;
1962

1963
	/*
1964
	 * The i.MX28 dual fec interfaces are not equal.
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
	 * Here are the differences:
	 *
	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
	 *  - fec0 acts as the 1588 time master while fec1 is slave
	 *  - external phys can only be configured by fec0
	 *
	 * That is to say fec1 can not work independently. It only works
	 * when fec0 is working. The reason behind this design is that the
	 * second interface is added primarily for Switch mode.
	 *
	 * Because of the last point above, both phys are attached on fec0
	 * mdio interface in board design, and need to be configured by
	 * fec0 mii_bus.
	 */
1979
	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1980
		/* fec1 uses fec0 mii_bus */
L
Lothar Waßmann 已提交
1981 1982 1983 1984 1985 1986
		if (mii_cnt && fec0_mii_bus) {
			fep->mii_bus = fec0_mii_bus;
			mii_cnt++;
			return 0;
		}
		return -ENOENT;
1987 1988
	}

1989
	fep->mii_timeout = 0;
L
Linus Torvalds 已提交
1990

1991 1992
	/*
	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
S
Shawn Guo 已提交
1993 1994 1995 1996 1997
	 *
	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
	 * document.
1998
	 */
1999
	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2000
	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
		mii_speed--;
	if (mii_speed > 63) {
		dev_err(&pdev->dev,
			"fec clock (%lu) to fast to get right mii speed\n",
			clk_get_rate(fep->clk_ipg));
		err = -EINVAL;
		goto err_out;
	}

	/*
	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
	 * versions are RAZ there, so just ignore the difference and write the
	 * register always.
	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
	 * output.
	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
	 * holdtime cannot result in a value greater than 3.
	 */
	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;

	fep->phy_speed = mii_speed << 1 | holdtime << 8;

2026
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
L
Linus Torvalds 已提交
2027

2028 2029 2030 2031
	fep->mii_bus = mdiobus_alloc();
	if (fep->mii_bus == NULL) {
		err = -ENOMEM;
		goto err_out;
L
Linus Torvalds 已提交
2032 2033
	}

2034 2035 2036
	fep->mii_bus->name = "fec_enet_mii_bus";
	fep->mii_bus->read = fec_enet_mdio_read;
	fep->mii_bus->write = fec_enet_mdio_write;
2037 2038
	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
		pdev->name, fep->dev_id + 1);
2039 2040 2041
	fep->mii_bus->priv = fep;
	fep->mii_bus->parent = &pdev->dev;

2042 2043 2044 2045 2046 2047 2048 2049 2050
	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
	if (node) {
		err = of_mdiobus_register(fep->mii_bus, node);
		of_node_put(node);
	} else {
		err = mdiobus_register(fep->mii_bus);
	}

	if (err)
2051
		goto err_out_free_mdiobus;
L
Linus Torvalds 已提交
2052

L
Lothar Waßmann 已提交
2053 2054
	mii_cnt++;

2055
	/* save fec0 mii_bus */
2056
	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2057 2058
		fec0_mii_bus = fep->mii_bus;

2059
	return 0;
L
Linus Torvalds 已提交
2060

2061 2062 2063 2064
err_out_free_mdiobus:
	mdiobus_free(fep->mii_bus);
err_out:
	return err;
L
Linus Torvalds 已提交
2065 2066
}

2067
static void fec_enet_mii_remove(struct fec_enet_private *fep)
L
Linus Torvalds 已提交
2068
{
L
Lothar Waßmann 已提交
2069 2070 2071 2072
	if (--mii_cnt == 0) {
		mdiobus_unregister(fep->mii_bus);
		mdiobus_free(fep->mii_bus);
	}
L
Linus Torvalds 已提交
2073 2074
}

2075
static void fec_enet_get_drvinfo(struct net_device *ndev,
2076
				 struct ethtool_drvinfo *info)
L
Linus Torvalds 已提交
2077
{
2078
	struct fec_enet_private *fep = netdev_priv(ndev);
2079

2080 2081 2082 2083
	strlcpy(info->driver, fep->pdev->dev.driver->name,
		sizeof(info->driver));
	strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
L
Linus Torvalds 已提交
2084 2085
}

2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
static int fec_enet_get_regs_len(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct resource *r;
	int s = 0;

	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
	if (r)
		s = resource_size(r);

	return s;
}

/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2101
	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
static u32 fec_enet_register_offset[] = {
	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
	RMON_T_P_GTE2048, RMON_T_OCTETS,
	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
	RMON_R_P_GTE2048, RMON_R_OCTETS,
	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
#else
static u32 fec_enet_register_offset[] = {
	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
};
#endif

static void fec_enet_get_regs(struct net_device *ndev,
			      struct ethtool_regs *regs, void *regbuf)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
	u32 *buf = (u32 *)regbuf;
	u32 i, off;

	memset(buf, 0, regs->len);

	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
		off = fec_enet_register_offset[i] / 4;
		buf[off] = readl(&theregs[off]);
	}
}

2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
static int fec_enet_get_ts_info(struct net_device *ndev,
				struct ethtool_ts_info *info)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (fep->bufdesc_ex) {

		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
					SOF_TIMESTAMPING_RX_SOFTWARE |
					SOF_TIMESTAMPING_SOFTWARE |
					SOF_TIMESTAMPING_TX_HARDWARE |
					SOF_TIMESTAMPING_RX_HARDWARE |
					SOF_TIMESTAMPING_RAW_HARDWARE;
		if (fep->ptp_clock)
			info->phc_index = ptp_clock_index(fep->ptp_clock);
		else
			info->phc_index = -1;

		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
				 (1 << HWTSTAMP_TX_ON);

		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
				   (1 << HWTSTAMP_FILTER_ALL);
		return 0;
	} else {
		return ethtool_op_get_ts_info(ndev, info);
	}
}

G
Guenter Roeck 已提交
2190 2191
#if !defined(CONFIG_M5272)

2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
static void fec_enet_get_pauseparam(struct net_device *ndev,
				    struct ethtool_pauseparam *pause)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
	pause->rx_pause = pause->tx_pause;
}

static int fec_enet_set_pauseparam(struct net_device *ndev,
				   struct ethtool_pauseparam *pause)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

2207
	if (!ndev->phydev)
2208 2209
		return -ENODEV;

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
	if (pause->tx_pause != pause->rx_pause) {
		netdev_info(ndev,
			"hardware only support enable/disable both tx and rx");
		return -EINVAL;
	}

	fep->pause_flag = 0;

	/* tx pause must be same as rx pause */
	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;

	if (pause->rx_pause || pause->autoneg) {
2223 2224
		ndev->phydev->supported |= ADVERTISED_Pause;
		ndev->phydev->advertising |= ADVERTISED_Pause;
2225
	} else {
2226 2227
		ndev->phydev->supported &= ~ADVERTISED_Pause;
		ndev->phydev->advertising &= ~ADVERTISED_Pause;
2228 2229 2230 2231 2232
	}

	if (pause->autoneg) {
		if (netif_running(ndev))
			fec_stop(ndev);
2233
		phy_start_aneg(ndev->phydev);
2234
	}
2235 2236 2237
	if (netif_running(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
2238
		fec_restart(ndev);
2239
		netif_wake_queue(ndev);
2240
		netif_tx_unlock_bh(ndev);
2241 2242
		napi_enable(&fep->napi);
	}
2243 2244 2245 2246

	return 0;
}

2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
static const struct fec_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} fec_stats[] = {
	/* RMON TX */
	{ "tx_dropped", RMON_T_DROP },
	{ "tx_packets", RMON_T_PACKETS },
	{ "tx_broadcast", RMON_T_BC_PKT },
	{ "tx_multicast", RMON_T_MC_PKT },
	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
	{ "tx_undersize", RMON_T_UNDERSIZE },
	{ "tx_oversize", RMON_T_OVERSIZE },
	{ "tx_fragment", RMON_T_FRAG },
	{ "tx_jabber", RMON_T_JAB },
	{ "tx_collision", RMON_T_COL },
	{ "tx_64byte", RMON_T_P64 },
	{ "tx_65to127byte", RMON_T_P65TO127 },
	{ "tx_128to255byte", RMON_T_P128TO255 },
	{ "tx_256to511byte", RMON_T_P256TO511 },
	{ "tx_512to1023byte", RMON_T_P512TO1023 },
	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
	{ "tx_octets", RMON_T_OCTETS },

	/* IEEE TX */
	{ "IEEE_tx_drop", IEEE_T_DROP },
	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
	{ "IEEE_tx_1col", IEEE_T_1COL },
	{ "IEEE_tx_mcol", IEEE_T_MCOL },
	{ "IEEE_tx_def", IEEE_T_DEF },
	{ "IEEE_tx_lcol", IEEE_T_LCOL },
	{ "IEEE_tx_excol", IEEE_T_EXCOL },
	{ "IEEE_tx_macerr", IEEE_T_MACERR },
	{ "IEEE_tx_cserr", IEEE_T_CSERR },
	{ "IEEE_tx_sqe", IEEE_T_SQE },
	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },

	/* RMON RX */
	{ "rx_packets", RMON_R_PACKETS },
	{ "rx_broadcast", RMON_R_BC_PKT },
	{ "rx_multicast", RMON_R_MC_PKT },
	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
	{ "rx_undersize", RMON_R_UNDERSIZE },
	{ "rx_oversize", RMON_R_OVERSIZE },
	{ "rx_fragment", RMON_R_FRAG },
	{ "rx_jabber", RMON_R_JAB },
	{ "rx_64byte", RMON_R_P64 },
	{ "rx_65to127byte", RMON_R_P65TO127 },
	{ "rx_128to255byte", RMON_R_P128TO255 },
	{ "rx_256to511byte", RMON_R_P256TO511 },
	{ "rx_512to1023byte", RMON_R_P512TO1023 },
	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
	{ "rx_octets", RMON_R_OCTETS },

	/* IEEE RX */
	{ "IEEE_rx_drop", IEEE_R_DROP },
	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
	{ "IEEE_rx_crc", IEEE_R_CRC },
	{ "IEEE_rx_align", IEEE_R_ALIGN },
	{ "IEEE_rx_macerr", IEEE_R_MACERR },
	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};

2313 2314
#define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))

2315
static void fec_enet_update_ethtool_stats(struct net_device *dev)
2316 2317 2318 2319 2320
{
	struct fec_enet_private *fep = netdev_priv(dev);
	int i;

	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
}

static void fec_enet_get_ethtool_stats(struct net_device *dev,
				       struct ethtool_stats *stats, u64 *data)
{
	struct fec_enet_private *fep = netdev_priv(dev);

	if (netif_running(dev))
		fec_enet_update_ethtool_stats(dev);

2332
	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
}

static void fec_enet_get_strings(struct net_device *netdev,
	u32 stringset, u8 *data)
{
	int i;
	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
				fec_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(fec_stats);
	default:
		return -EOPNOTSUPP;
	}
}
2357 2358 2359 2360 2361 2362

#else	/* !defined(CONFIG_M5272) */
#define FEC_STATS_SIZE	0
static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
{
}
G
Guenter Roeck 已提交
2363
#endif /* !defined(CONFIG_M5272) */
2364

2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
/* ITR clock source is enet system clock (clk_ahb).
 * TCTT unit is cycle_ns * 64 cycle
 * So, the ICTT value = X us / (cycle_ns * 64)
 */
static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	return us * (fep->itr_clk_rate / 64000) / 1000;
}

/* Set threshold for interrupt coalescing */
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int rx_itr, tx_itr;

	/* Must be greater than zero to avoid unpredictable behavior */
	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
	    !fep->tx_time_itr || !fep->tx_pkts_itr)
		return;

	/* Select enet system clock as Interrupt Coalescing
	 * timer Clock Source
	 */
	rx_itr = FEC_ITR_CLK_SEL;
	tx_itr = FEC_ITR_CLK_SEL;

	/* set ICFT and ICTT */
	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));

	rx_itr |= FEC_ITR_EN;
	tx_itr |= FEC_ITR_EN;

	writel(tx_itr, fep->hwp + FEC_TXIC0);
	writel(rx_itr, fep->hwp + FEC_RXIC0);
2404 2405 2406 2407 2408 2409
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
		writel(tx_itr, fep->hwp + FEC_TXIC1);
		writel(rx_itr, fep->hwp + FEC_RXIC1);
		writel(tx_itr, fep->hwp + FEC_TXIC2);
		writel(rx_itr, fep->hwp + FEC_RXIC2);
	}
2410 2411 2412 2413 2414 2415 2416
}

static int
fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

2417
	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
		return -EOPNOTSUPP;

	ec->rx_coalesce_usecs = fep->rx_time_itr;
	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;

	ec->tx_coalesce_usecs = fep->tx_time_itr;
	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;

	return 0;
}

static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int cycle;

2435
	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2436 2437 2438
		return -EOPNOTSUPP;

	if (ec->rx_max_coalesced_frames > 255) {
2439
		pr_err("Rx coalesced frames exceed hardware limitation\n");
2440 2441 2442 2443
		return -EINVAL;
	}

	if (ec->tx_max_coalesced_frames > 255) {
2444
		pr_err("Tx coalesced frame exceed hardware limitation\n");
2445 2446 2447 2448 2449
		return -EINVAL;
	}

	cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
	if (cycle > 0xFFFF) {
2450
		pr_err("Rx coalesced usec exceed hardware limitation\n");
2451 2452 2453 2454 2455
		return -EINVAL;
	}

	cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
	if (cycle > 0xFFFF) {
2456
		pr_err("Rx coalesced usec exceed hardware limitation\n");
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
		return -EINVAL;
	}

	fep->rx_time_itr = ec->rx_coalesce_usecs;
	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;

	fep->tx_time_itr = ec->tx_coalesce_usecs;
	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;

	fec_enet_itr_coal_set(ndev);

	return 0;
}

static void fec_enet_itr_coal_init(struct net_device *ndev)
{
	struct ethtool_coalesce ec;

	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;

	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;

	fec_enet_set_coalesce(ndev, &ec);
}

2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
static int fec_enet_get_tunable(struct net_device *netdev,
				const struct ethtool_tunable *tuna,
				void *data)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		*(u32 *)data = fep->rx_copybreak;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int fec_enet_set_tunable(struct net_device *netdev,
				const struct ethtool_tunable *tuna,
				const void *data)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		fep->rx_copybreak = *(u32 *)data;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

N
Nimrod Andy 已提交
2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
static void
fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
		wol->supported = WAKE_MAGIC;
		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
	} else {
		wol->supported = wol->wolopts = 0;
	}
}

static int
fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
		return -EINVAL;

	if (wol->wolopts & ~WAKE_MAGIC)
		return -EINVAL;

	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
	if (device_may_wakeup(&ndev->dev)) {
		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
		if (fep->irq[0] > 0)
			enable_irq_wake(fep->irq[0]);
	} else {
		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
		if (fep->irq[0] > 0)
			disable_irq_wake(fep->irq[0]);
	}

	return 0;
}

S
stephen hemminger 已提交
2560
static const struct ethtool_ops fec_enet_ethtool_ops = {
2561
	.get_drvinfo		= fec_enet_get_drvinfo,
2562 2563
	.get_regs_len		= fec_enet_get_regs_len,
	.get_regs		= fec_enet_get_regs,
2564
	.nway_reset		= phy_ethtool_nway_reset,
2565
	.get_link		= ethtool_op_get_link,
2566 2567
	.get_coalesce		= fec_enet_get_coalesce,
	.set_coalesce		= fec_enet_set_coalesce,
2568
#ifndef CONFIG_M5272
2569 2570
	.get_pauseparam		= fec_enet_get_pauseparam,
	.set_pauseparam		= fec_enet_set_pauseparam,
2571
	.get_strings		= fec_enet_get_strings,
2572
	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
2573 2574
	.get_sset_count		= fec_enet_get_sset_count,
#endif
2575
	.get_ts_info		= fec_enet_get_ts_info,
2576 2577
	.get_tunable		= fec_enet_get_tunable,
	.set_tunable		= fec_enet_set_tunable,
N
Nimrod Andy 已提交
2578 2579
	.get_wol		= fec_enet_get_wol,
	.set_wol		= fec_enet_set_wol,
2580 2581
	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2582
};
L
Linus Torvalds 已提交
2583

2584
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
L
Linus Torvalds 已提交
2585
{
2586
	struct fec_enet_private *fep = netdev_priv(ndev);
2587
	struct phy_device *phydev = ndev->phydev;
L
Linus Torvalds 已提交
2588

2589
	if (!netif_running(ndev))
2590
		return -EINVAL;
L
Linus Torvalds 已提交
2591

2592 2593 2594
	if (!phydev)
		return -ENODEV;

2595 2596 2597 2598 2599 2600
	if (fep->bufdesc_ex) {
		if (cmd == SIOCSHWTSTAMP)
			return fec_ptp_set(ndev, rq);
		if (cmd == SIOCGHWTSTAMP)
			return fec_ptp_get(ndev, rq);
	}
2601

2602
	return phy_mii_ioctl(phydev, rq, cmd);
L
Linus Torvalds 已提交
2603 2604
}

2605
static void fec_enet_free_buffers(struct net_device *ndev)
S
Sascha Hauer 已提交
2606
{
2607
	struct fec_enet_private *fep = netdev_priv(ndev);
2608
	unsigned int i;
S
Sascha Hauer 已提交
2609 2610
	struct sk_buff *skb;
	struct bufdesc	*bdp;
2611 2612
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
2613 2614 2615 2616
	unsigned int q;

	for (q = 0; q < fep->num_rx_queues; q++) {
		rxq = fep->rx_queue[q];
T
Troy Kisky 已提交
2617 2618
		bdp = rxq->bd.base;
		for (i = 0; i < rxq->bd.ring_size; i++) {
2619 2620 2621 2622
			skb = rxq->rx_skbuff[i];
			rxq->rx_skbuff[i] = NULL;
			if (skb) {
				dma_unmap_single(&fep->pdev->dev,
2623
						 fec32_to_cpu(bdp->cbd_bufaddr),
2624
						 FEC_ENET_RX_FRSIZE - fep->rx_align,
2625 2626 2627
						 DMA_FROM_DEVICE);
				dev_kfree_skb(skb);
			}
T
Troy Kisky 已提交
2628
			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2629 2630
		}
	}
2631

2632 2633
	for (q = 0; q < fep->num_tx_queues; q++) {
		txq = fep->tx_queue[q];
T
Troy Kisky 已提交
2634 2635
		bdp = txq->bd.base;
		for (i = 0; i < txq->bd.ring_size; i++) {
2636 2637 2638 2639
			kfree(txq->tx_bounce[i]);
			txq->tx_bounce[i] = NULL;
			skb = txq->tx_skbuff[i];
			txq->tx_skbuff[i] = NULL;
S
Sascha Hauer 已提交
2640
			dev_kfree_skb(skb);
2641
		}
S
Sascha Hauer 已提交
2642
	}
2643
}
S
Sascha Hauer 已提交
2644

2645 2646 2647 2648 2649 2650 2651 2652 2653 2654
static void fec_enet_free_queue(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;
	struct fec_enet_priv_tx_q *txq;

	for (i = 0; i < fep->num_tx_queues; i++)
		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
			txq = fep->tx_queue[i];
			dma_free_coherent(NULL,
T
Troy Kisky 已提交
2655
					  txq->bd.ring_size * TSO_HEADER_SIZE,
2656 2657 2658 2659 2660
					  txq->tso_hdrs,
					  txq->tso_hdrs_dma);
		}

	for (i = 0; i < fep->num_rx_queues; i++)
2661
		kfree(fep->rx_queue[i]);
2662
	for (i = 0; i < fep->num_tx_queues; i++)
2663
		kfree(fep->tx_queue[i]);
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
}

static int fec_enet_alloc_queue(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;
	int ret = 0;
	struct fec_enet_priv_tx_q *txq;

	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
		if (!txq) {
			ret = -ENOMEM;
			goto alloc_failed;
		}

		fep->tx_queue[i] = txq;
T
Troy Kisky 已提交
2681 2682
		txq->bd.ring_size = TX_RING_SIZE;
		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2683 2684 2685

		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
		txq->tx_wake_threshold =
T
Troy Kisky 已提交
2686
			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2687 2688

		txq->tso_hdrs = dma_alloc_coherent(NULL,
T
Troy Kisky 已提交
2689
					txq->bd.ring_size * TSO_HEADER_SIZE,
2690 2691 2692 2693 2694 2695
					&txq->tso_hdrs_dma,
					GFP_KERNEL);
		if (!txq->tso_hdrs) {
			ret = -ENOMEM;
			goto alloc_failed;
		}
2696
	}
2697 2698 2699 2700 2701 2702 2703 2704 2705

	for (i = 0; i < fep->num_rx_queues; i++) {
		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
					   GFP_KERNEL);
		if (!fep->rx_queue[i]) {
			ret = -ENOMEM;
			goto alloc_failed;
		}

T
Troy Kisky 已提交
2706 2707
		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2708 2709 2710 2711 2712 2713
	}
	return ret;

alloc_failed:
	fec_enet_free_queue(ndev);
	return ret;
S
Sascha Hauer 已提交
2714 2715
}

2716 2717
static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
S
Sascha Hauer 已提交
2718
{
2719
	struct fec_enet_private *fep = netdev_priv(ndev);
2720
	unsigned int i;
S
Sascha Hauer 已提交
2721 2722
	struct sk_buff *skb;
	struct bufdesc	*bdp;
2723
	struct fec_enet_priv_rx_q *rxq;
S
Sascha Hauer 已提交
2724

2725
	rxq = fep->rx_queue[queue];
T
Troy Kisky 已提交
2726 2727
	bdp = rxq->bd.base;
	for (i = 0; i < rxq->bd.ring_size; i++) {
2728
		skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2729 2730
		if (!skb)
			goto err_alloc;
S
Sascha Hauer 已提交
2731

2732
		if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2733
			dev_kfree_skb(skb);
2734
			goto err_alloc;
2735
		}
2736

2737
		rxq->rx_skbuff[i] = skb;
2738
		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2739 2740 2741

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2742
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2743 2744
		}

T
Troy Kisky 已提交
2745
		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
S
Sascha Hauer 已提交
2746 2747 2748
	}

	/* Set the last buffer to wrap. */
T
Troy Kisky 已提交
2749
	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2750
	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2751
	return 0;
S
Sascha Hauer 已提交
2752

2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
 err_alloc:
	fec_enet_free_buffers(ndev);
	return -ENOMEM;
}

static int
fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int i;
	struct bufdesc  *bdp;
	struct fec_enet_priv_tx_q *txq;

	txq = fep->tx_queue[queue];
T
Troy Kisky 已提交
2767 2768
	bdp = txq->bd.base;
	for (i = 0; i < txq->bd.ring_size; i++) {
2769 2770
		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
		if (!txq->tx_bounce[i])
2771
			goto err_alloc;
S
Sascha Hauer 已提交
2772

2773 2774
		bdp->cbd_sc = cpu_to_fec16(0);
		bdp->cbd_bufaddr = cpu_to_fec32(0);
2775

2776 2777
		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2778
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2779 2780
		}

T
Troy Kisky 已提交
2781
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
S
Sascha Hauer 已提交
2782 2783 2784
	}

	/* Set the last buffer to wrap. */
T
Troy Kisky 已提交
2785
	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2786
	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
S
Sascha Hauer 已提交
2787 2788

	return 0;
2789 2790 2791 2792

 err_alloc:
	fec_enet_free_buffers(ndev);
	return -ENOMEM;
S
Sascha Hauer 已提交
2793 2794
}

2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
static int fec_enet_alloc_buffers(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int i;

	for (i = 0; i < fep->num_rx_queues; i++)
		if (fec_enet_alloc_rxq_buffers(ndev, i))
			return -ENOMEM;

	for (i = 0; i < fep->num_tx_queues; i++)
		if (fec_enet_alloc_txq_buffers(ndev, i))
			return -ENOMEM;
	return 0;
}

L
Linus Torvalds 已提交
2810
static int
2811
fec_enet_open(struct net_device *ndev)
L
Linus Torvalds 已提交
2812
{
2813
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
2814
	int ret;
L
Linus Torvalds 已提交
2815

2816
	ret = pm_runtime_get_sync(&fep->pdev->dev);
2817
	if (ret < 0)
2818 2819
		return ret;

N
Nimrod Andy 已提交
2820
	pinctrl_pm_select_default_state(&fep->pdev->dev);
2821 2822
	ret = fec_enet_clk_enable(ndev, true);
	if (ret)
2823
		goto clk_enable;
2824

L
Linus Torvalds 已提交
2825 2826 2827 2828
	/* I should reset the ring buffers here, but I don't yet know
	 * a simple way to do that.
	 */

2829
	ret = fec_enet_alloc_buffers(ndev);
S
Sascha Hauer 已提交
2830
	if (ret)
2831
		goto err_enet_alloc;
S
Sascha Hauer 已提交
2832

2833 2834 2835
	/* Init MAC prior to mii bus probe */
	fec_restart(ndev);

2836
	/* Probe and connect to PHY when open the interface */
2837
	ret = fec_enet_mii_probe(ndev);
2838 2839
	if (ret)
		goto err_enet_mii_probe;
2840

2841 2842 2843
	if (fep->quirks & FEC_QUIRK_ERR006687)
		imx6q_cpuidle_fec_irqs_used();

2844
	napi_enable(&fep->napi);
2845
	phy_start(ndev->phydev);
2846 2847
	netif_tx_start_all_queues(ndev);

N
Nimrod Andy 已提交
2848 2849 2850
	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
				 FEC_WOL_FLAG_ENABLE);

S
Sascha Hauer 已提交
2851
	return 0;
2852 2853 2854 2855 2856

err_enet_mii_probe:
	fec_enet_free_buffers(ndev);
err_enet_alloc:
	fec_enet_clk_enable(ndev, false);
2857 2858 2859
clk_enable:
	pm_runtime_mark_last_busy(&fep->pdev->dev);
	pm_runtime_put_autosuspend(&fep->pdev->dev);
2860 2861
	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
	return ret;
L
Linus Torvalds 已提交
2862 2863 2864
}

static int
2865
fec_enet_close(struct net_device *ndev)
L
Linus Torvalds 已提交
2866
{
2867
	struct fec_enet_private *fep = netdev_priv(ndev);
L
Linus Torvalds 已提交
2868

2869
	phy_stop(ndev->phydev);
2870

2871 2872 2873
	if (netif_device_present(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_disable(ndev);
2874
		fec_stop(ndev);
2875
	}
L
Linus Torvalds 已提交
2876

2877
	phy_disconnect(ndev->phydev);
2878

2879 2880 2881
	if (fep->quirks & FEC_QUIRK_ERR006687)
		imx6q_cpuidle_fec_irqs_unused();

2882 2883
	fec_enet_update_ethtool_stats(ndev);

2884
	fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
2885
	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2886 2887 2888
	pm_runtime_mark_last_busy(&fep->pdev->dev);
	pm_runtime_put_autosuspend(&fep->pdev->dev);

2889
	fec_enet_free_buffers(ndev);
S
Sascha Hauer 已提交
2890

L
Linus Torvalds 已提交
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
	return 0;
}

/* Set or clear the multicast filter for this adaptor.
 * Skeleton taken from sunlance driver.
 * The CPM Ethernet implementation allows Multicast as well as individual
 * MAC address filtering.  Some of the drivers check to make sure it is
 * a group multicast address, and discard those that are not.  I guess I
 * will do the same for now, but just remove the test if you want
 * individual filtering as well (do the upper net layers want or support
 * this kind of feature?).
 */

2904
#define FEC_HASH_BITS	6		/* #bits in hash */
L
Linus Torvalds 已提交
2905 2906
#define CRC32_POLY	0xEDB88320

2907
static void set_multicast_list(struct net_device *ndev)
L
Linus Torvalds 已提交
2908
{
2909
	struct fec_enet_private *fep = netdev_priv(ndev);
2910
	struct netdev_hw_addr *ha;
2911
	unsigned int i, bit, data, crc, tmp;
L
Linus Torvalds 已提交
2912
	unsigned char hash;
2913
	unsigned int hash_high = 0, hash_low = 0;
L
Linus Torvalds 已提交
2914

2915
	if (ndev->flags & IFF_PROMISC) {
S
Sascha Hauer 已提交
2916 2917 2918
		tmp = readl(fep->hwp + FEC_R_CNTRL);
		tmp |= 0x8;
		writel(tmp, fep->hwp + FEC_R_CNTRL);
2919 2920
		return;
	}
L
Linus Torvalds 已提交
2921

2922 2923 2924 2925
	tmp = readl(fep->hwp + FEC_R_CNTRL);
	tmp &= ~0x8;
	writel(tmp, fep->hwp + FEC_R_CNTRL);

2926
	if (ndev->flags & IFF_ALLMULTI) {
2927 2928 2929 2930 2931 2932 2933 2934 2935
		/* Catch all multicast addresses, so set the
		 * filter to all 1's
		 */
		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);

		return;
	}

2936
	/* Add the addresses in hash register */
2937
	netdev_for_each_mc_addr(ha, ndev) {
2938 2939 2940
		/* calculate crc32 value of mac address */
		crc = 0xffffffff;

2941
		for (i = 0; i < ndev->addr_len; i++) {
2942
			data = ha->addr[i];
2943 2944 2945
			for (bit = 0; bit < 8; bit++, data >>= 1) {
				crc = (crc >> 1) ^
				(((crc ^ data) & 1) ? CRC32_POLY : 0);
L
Linus Torvalds 已提交
2946 2947
			}
		}
2948

2949
		/* only upper 6 bits (FEC_HASH_BITS) are used
2950 2951
		 * which point to specific bit in he hash registers
		 */
2952
		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2953

2954 2955 2956 2957
		if (hash > 31)
			hash_high |= 1 << (hash - 32);
		else
			hash_low |= 1 << hash;
L
Linus Torvalds 已提交
2958
	}
2959 2960 2961

	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
L
Linus Torvalds 已提交
2962 2963
}

S
Sascha Hauer 已提交
2964
/* Set a MAC change in hardware. */
S
Sascha Hauer 已提交
2965
static int
2966
fec_set_mac_address(struct net_device *ndev, void *p)
L
Linus Torvalds 已提交
2967
{
2968
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
2969 2970
	struct sockaddr *addr = p;

2971 2972 2973 2974 2975
	if (addr) {
		if (!is_valid_ether_addr(addr->sa_data))
			return -EADDRNOTAVAIL;
		memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
	}
L
Linus Torvalds 已提交
2976

2977 2978 2979 2980 2981 2982 2983 2984
	/* Add netif status check here to avoid system hang in below case:
	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
	 * After ethx down, fec all clocks are gated off and then register
	 * access causes system hang.
	 */
	if (!netif_running(ndev))
		return 0;

2985 2986
	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
S
Sascha Hauer 已提交
2987
		fep->hwp + FEC_ADDR_LOW);
2988
	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
2989
		fep->hwp + FEC_ADDR_HIGH);
S
Sascha Hauer 已提交
2990
	return 0;
L
Linus Torvalds 已提交
2991 2992
}

2993
#ifdef CONFIG_NET_POLL_CONTROLLER
2994 2995
/**
 * fec_poll_controller - FEC Poll controller function
2996 2997 2998 2999 3000
 * @dev: The FEC network adapter
 *
 * Polled functionality used by netconsole and others in non interrupt mode
 *
 */
3001
static void fec_poll_controller(struct net_device *dev)
3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
{
	int i;
	struct fec_enet_private *fep = netdev_priv(dev);

	for (i = 0; i < FEC_IRQ_NUM; i++) {
		if (fep->irq[i] > 0) {
			disable_irq(fep->irq[i]);
			fec_enet_interrupt(fep->irq[i], dev);
			enable_irq(fep->irq[i]);
		}
	}
}
#endif

3016
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
	netdev_features_t features)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;

	netdev->features = features;

	/* Receive checksum has been changed */
	if (changed & NETIF_F_RXCSUM) {
		if (features & NETIF_F_RXCSUM)
			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
		else
			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3030
	}
3031 3032 3033 3034 3035 3036 3037
}

static int fec_set_features(struct net_device *netdev,
	netdev_features_t features)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;
3038

3039
	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3040 3041 3042 3043
		napi_disable(&fep->napi);
		netif_tx_lock_bh(netdev);
		fec_stop(netdev);
		fec_enet_set_netdev_features(netdev, features);
3044
		fec_restart(netdev);
3045
		netif_tx_wake_all_queues(netdev);
3046 3047
		netif_tx_unlock_bh(netdev);
		napi_enable(&fep->napi);
3048 3049
	} else {
		fec_enet_set_netdev_features(netdev, features);
3050 3051 3052 3053 3054
	}

	return 0;
}

S
Sascha Hauer 已提交
3055 3056 3057 3058
static const struct net_device_ops fec_netdev_ops = {
	.ndo_open		= fec_enet_open,
	.ndo_stop		= fec_enet_close,
	.ndo_start_xmit		= fec_enet_start_xmit,
3059
	.ndo_set_rx_mode	= set_multicast_list,
S
Sascha Hauer 已提交
3060 3061 3062
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_tx_timeout		= fec_timeout,
	.ndo_set_mac_address	= fec_set_mac_address,
3063
	.ndo_do_ioctl		= fec_enet_ioctl,
3064 3065 3066
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= fec_poll_controller,
#endif
3067
	.ndo_set_features	= fec_set_features,
S
Sascha Hauer 已提交
3068 3069
};

3070 3071 3072 3073 3074 3075 3076 3077
static const unsigned short offset_des_active_rxq[] = {
	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
};

static const unsigned short offset_des_active_txq[] = {
	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
};

L
Linus Torvalds 已提交
3078 3079
 /*
  * XXX:  We need to clean up on failure exits here.
3080
  *
L
Linus Torvalds 已提交
3081
  */
3082
static int fec_enet_init(struct net_device *ndev)
L
Linus Torvalds 已提交
3083
{
3084
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
3085
	struct bufdesc *cbd_base;
3086
	dma_addr_t bd_dma;
3087
	int bd_size;
3088
	unsigned int i;
T
Troy Kisky 已提交
3089 3090 3091
	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
			sizeof(struct bufdesc);
	unsigned dsize_log2 = __fls(dsize);
3092

T
Troy Kisky 已提交
3093
	WARN_ON(dsize != (1 << dsize_log2));
3094 3095 3096 3097 3098 3099 3100 3101
#if defined(CONFIG_ARM)
	fep->rx_align = 0xf;
	fep->tx_align = 0xf;
#else
	fep->rx_align = 0x3;
	fep->tx_align = 0x3;
#endif

3102
	fec_enet_alloc_queue(ndev);
N
Nimrod Andy 已提交
3103

T
Troy Kisky 已提交
3104
	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
L
Linus Torvalds 已提交
3105

S
Sascha Hauer 已提交
3106
	/* Allocate memory for buffer descriptors. */
3107 3108
	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
				       GFP_KERNEL);
3109
	if (!cbd_base) {
N
Nimrod Andy 已提交
3110 3111 3112
		return -ENOMEM;
	}

3113
	memset(cbd_base, 0, bd_size);
L
Linus Torvalds 已提交
3114

3115
	/* Get the Ethernet address */
3116
	fec_get_mac(ndev);
3117 3118
	/* make sure MAC we just acquired is programmed into the hw */
	fec_set_mac_address(ndev, NULL);
L
Linus Torvalds 已提交
3119

S
Sascha Hauer 已提交
3120
	/* Set receive and transmit descriptor base. */
3121
	for (i = 0; i < fep->num_rx_queues; i++) {
T
Troy Kisky 已提交
3122 3123 3124 3125 3126 3127 3128 3129 3130
		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
		unsigned size = dsize * rxq->bd.ring_size;

		rxq->bd.qid = i;
		rxq->bd.base = cbd_base;
		rxq->bd.cur = cbd_base;
		rxq->bd.dma = bd_dma;
		rxq->bd.dsize = dsize;
		rxq->bd.dsize_log2 = dsize_log2;
3131
		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
T
Troy Kisky 已提交
3132 3133 3134
		bd_dma += size;
		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3135 3136 3137
	}

	for (i = 0; i < fep->num_tx_queues; i++) {
T
Troy Kisky 已提交
3138 3139 3140 3141 3142 3143 3144 3145 3146
		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
		unsigned size = dsize * txq->bd.ring_size;

		txq->bd.qid = i;
		txq->bd.base = cbd_base;
		txq->bd.cur = cbd_base;
		txq->bd.dma = bd_dma;
		txq->bd.dsize = dsize;
		txq->bd.dsize_log2 = dsize_log2;
3147
		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
T
Troy Kisky 已提交
3148 3149 3150
		bd_dma += size;
		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3151
	}
3152

L
Linus Torvalds 已提交
3153

S
Sascha Hauer 已提交
3154
	/* The FEC Ethernet specific entries in the device structure */
3155 3156 3157
	ndev->watchdog_timeo = TX_TIMEOUT;
	ndev->netdev_ops = &fec_netdev_ops;
	ndev->ethtool_ops = &fec_enet_ethtool_ops;
3158

3159
	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
F
Fabio Estevam 已提交
3160
	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3161

3162
	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3163 3164 3165
		/* enable hw VLAN support */
		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;

3166
	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
N
Nimrod Andy 已提交
3167 3168
		ndev->gso_max_segs = FEC_MAX_TSO_SEGS;

3169 3170
		/* enable hw accelerator */
		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
N
Nimrod Andy 已提交
3171
				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3172 3173
		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
	}
3174

3175
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3176 3177 3178 3179
		fep->tx_align = 0;
		fep->rx_align = 0x3f;
	}

3180 3181
	ndev->hw_features = ndev->features;

3182
	fec_restart(ndev);
L
Linus Torvalds 已提交
3183

3184 3185
	fec_enet_update_ethtool_stats(ndev);

L
Linus Torvalds 已提交
3186 3187 3188
	return 0;
}

3189
#ifdef CONFIG_OF
3190
static void fec_reset_phy(struct platform_device *pdev)
3191 3192
{
	int err, phy_reset;
3193
	bool active_high = false;
3194
	int msec = 1;
3195 3196 3197
	struct device_node *np = pdev->dev.of_node;

	if (!np)
3198
		return;
3199

3200 3201 3202 3203 3204
	of_property_read_u32(np, "phy-reset-duration", &msec);
	/* A sane reset duration should not be longer than 1s */
	if (msec > 1000)
		msec = 1;

3205
	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3206 3207 3208
	if (!gpio_is_valid(phy_reset))
		return;

3209
	active_high = of_property_read_bool(np, "phy-reset-active-high");
3210

3211
	err = devm_gpio_request_one(&pdev->dev, phy_reset,
3212
			active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3213
			"phy-reset");
3214
	if (err) {
3215
		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3216
		return;
3217
	}
3218 3219 3220 3221 3222 3223

	if (msec > 20)
		msleep(msec);
	else
		usleep_range(msec * 1000, msec * 1000 + 1000);

3224
	gpio_set_value_cansleep(phy_reset, !active_high);
3225 3226
}
#else /* CONFIG_OF */
3227
static void fec_reset_phy(struct platform_device *pdev)
3228 3229 3230 3231 3232 3233 3234 3235
{
	/*
	 * In case of platform probe, the reset has been done
	 * by machine code.
	 */
}
#endif /* CONFIG_OF */

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246
static void
fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
{
	struct device_node *np = pdev->dev.of_node;

	*num_tx = *num_rx = 1;

	if (!np || !of_device_is_available(np))
		return;

	/* parse the num of tx and rx queues */
3247
	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3248

3249
	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3250 3251

	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3252 3253
		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
			 *num_tx);
3254 3255 3256 3257 3258
		*num_tx = 1;
		return;
	}

	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3259 3260
		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
			 *num_rx);
3261 3262 3263 3264 3265 3266
		*num_rx = 1;
		return;
	}

}

3267
static int
3268 3269 3270
fec_probe(struct platform_device *pdev)
{
	struct fec_enet_private *fep;
3271
	struct fec_platform_data *pdata;
3272 3273 3274
	struct net_device *ndev;
	int i, irq, ret = 0;
	struct resource *r;
3275
	const struct of_device_id *of_id;
S
Shawn Guo 已提交
3276
	static int dev_id;
3277
	struct device_node *np = pdev->dev.of_node, *phy_node;
3278 3279
	int num_tx_qs;
	int num_rx_qs;
3280

3281 3282
	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);

3283
	/* Init network device */
3284
	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3285
				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3286 3287
	if (!ndev)
		return -ENOMEM;
3288 3289 3290 3291 3292 3293

	SET_NETDEV_DEV(ndev, &pdev->dev);

	/* setup board info structure */
	fep = netdev_priv(ndev);

3294 3295 3296 3297 3298
	of_id = of_match_device(fec_dt_ids, &pdev->dev);
	if (of_id)
		pdev->id_entry = of_id->data;
	fep->quirks = pdev->id_entry->driver_data;

3299
	fep->netdev = ndev;
3300 3301 3302
	fep->num_rx_queues = num_rx_qs;
	fep->num_tx_queues = num_tx_qs;

G
Guenter Roeck 已提交
3303
#if !defined(CONFIG_M5272)
3304
	/* default enable pause frame auto negotiation */
3305
	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3306
		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
G
Guenter Roeck 已提交
3307
#endif
3308

N
Nimrod Andy 已提交
3309 3310 3311
	/* Select default pin state */
	pinctrl_pm_select_default_state(&pdev->dev);

3312
	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3313 3314 3315 3316 3317 3318
	fep->hwp = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(fep->hwp)) {
		ret = PTR_ERR(fep->hwp);
		goto failed_ioremap;
	}

3319
	fep->pdev = pdev;
S
Shawn Guo 已提交
3320
	fep->dev_id = dev_id++;
3321 3322 3323

	platform_set_drvdata(pdev, ndev);

3324 3325 3326 3327 3328
	if ((of_machine_is_compatible("fsl,imx6q") ||
	     of_machine_is_compatible("fsl,imx6dl")) &&
	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
		fep->quirks |= FEC_QUIRK_ERR006687;

N
Nimrod Andy 已提交
3329 3330 3331
	if (of_get_property(np, "fsl,magic-packet", NULL))
		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;

3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343
	phy_node = of_parse_phandle(np, "phy-handle", 0);
	if (!phy_node && of_phy_is_fixed_link(np)) {
		ret = of_phy_register_fixed_link(np);
		if (ret < 0) {
			dev_err(&pdev->dev,
				"broken fixed-link specification\n");
			goto failed_phy;
		}
		phy_node = of_node_get(np);
	}
	fep->phy_node = phy_node;

3344
	ret = of_get_phy_mode(pdev->dev.of_node);
3345
	if (ret < 0) {
J
Jingoo Han 已提交
3346
		pdata = dev_get_platdata(&pdev->dev);
3347 3348 3349 3350 3351 3352 3353 3354
		if (pdata)
			fep->phy_interface = pdata->phy;
		else
			fep->phy_interface = PHY_INTERFACE_MODE_MII;
	} else {
		fep->phy_interface = ret;
	}

3355 3356 3357
	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(fep->clk_ipg)) {
		ret = PTR_ERR(fep->clk_ipg);
3358 3359
		goto failed_clk;
	}
3360 3361 3362 3363 3364 3365 3366

	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(fep->clk_ahb)) {
		ret = PTR_ERR(fep->clk_ahb);
		goto failed_clk;
	}

3367 3368
	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);

3369 3370 3371 3372 3373
	/* enet_out is optional, depends on board */
	fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
	if (IS_ERR(fep->clk_enet_out))
		fep->clk_enet_out = NULL;

3374 3375
	fep->ptp_clk_on = false;
	mutex_init(&fep->ptp_clk_mutex);
3376 3377 3378 3379 3380 3381

	/* clk_ref is optional, depends on board */
	fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
	if (IS_ERR(fep->clk_ref))
		fep->clk_ref = NULL;

3382
	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3383 3384
	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
	if (IS_ERR(fep->clk_ptp)) {
3385
		fep->clk_ptp = NULL;
3386
		fep->bufdesc_ex = false;
3387 3388
	}

3389
	ret = fec_enet_clk_enable(ndev, true);
3390 3391 3392
	if (ret)
		goto failed_clk;

3393 3394 3395 3396
	ret = clk_prepare_enable(fep->clk_ipg);
	if (ret)
		goto failed_clk_ipg;

3397 3398 3399
	fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
	if (!IS_ERR(fep->reg_phy)) {
		ret = regulator_enable(fep->reg_phy);
3400 3401 3402 3403 3404
		if (ret) {
			dev_err(&pdev->dev,
				"Failed to enable phy regulator: %d\n", ret);
			goto failed_regulator;
		}
3405 3406
	} else {
		fep->reg_phy = NULL;
3407 3408
	}

3409 3410
	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);
3411
	pm_runtime_get_noresume(&pdev->dev);
3412 3413 3414
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

3415 3416
	fec_reset_phy(pdev);

F
Fabio Estevam 已提交
3417
	if (fep->bufdesc_ex)
3418
		fec_ptp_init(pdev);
F
Fabio Estevam 已提交
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431

	ret = fec_enet_init(ndev);
	if (ret)
		goto failed_init;

	for (i = 0; i < FEC_IRQ_NUM; i++) {
		irq = platform_get_irq(pdev, i);
		if (irq < 0) {
			if (i)
				break;
			ret = irq;
			goto failed_irq;
		}
F
Fabio Estevam 已提交
3432
		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3433
				       0, pdev->name, ndev);
F
Fabio Estevam 已提交
3434
		if (ret)
F
Fabio Estevam 已提交
3435
			goto failed_irq;
N
Nimrod Andy 已提交
3436 3437

		fep->irq[i] = irq;
F
Fabio Estevam 已提交
3438 3439
	}

3440
	init_completion(&fep->mdio_done);
3441 3442 3443 3444
	ret = fec_enet_mii_init(pdev);
	if (ret)
		goto failed_mii_init;

3445 3446
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(ndev);
3447
	fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
3448
	pinctrl_pm_select_sleep_state(&pdev->dev);
3449

3450 3451 3452 3453
	ret = register_netdev(ndev);
	if (ret)
		goto failed_register;

N
Nimrod Andy 已提交
3454 3455 3456
	device_init_wakeup(&ndev->dev, fep->wol_flag &
			   FEC_WOL_HAS_MAGIC_PACKET);

F
Fabio Estevam 已提交
3457 3458 3459
	if (fep->bufdesc_ex && fep->ptp_clock)
		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);

3460
	fep->rx_copybreak = COPYBREAK_DEFAULT;
3461
	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3462 3463 3464 3465

	pm_runtime_mark_last_busy(&pdev->dev);
	pm_runtime_put_autosuspend(&pdev->dev);

3466 3467 3468
	return 0;

failed_register:
3469 3470
	fec_enet_mii_remove(fep);
failed_mii_init:
3471 3472
failed_irq:
failed_init:
3473
	fec_ptp_stop(pdev);
3474 3475
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
3476
failed_regulator:
3477 3478
	clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
3479
	fec_enet_clk_enable(ndev, false);
3480
failed_clk:
3481 3482
	if (of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
3483 3484
failed_phy:
	of_node_put(phy_node);
3485 3486 3487 3488 3489 3490
failed_ioremap:
	free_netdev(ndev);

	return ret;
}

3491
static int
3492 3493 3494 3495
fec_drv_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct fec_enet_private *fep = netdev_priv(ndev);
3496
	struct device_node *np = pdev->dev.of_node;
3497

3498
	cancel_work_sync(&fep->tx_timeout_work);
3499
	fec_ptp_stop(pdev);
L
Lothar Waßmann 已提交
3500
	unregister_netdev(ndev);
3501
	fec_enet_mii_remove(fep);
3502 3503
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
3504 3505
	if (of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
3506
	of_node_put(fep->phy_node);
3507
	free_netdev(ndev);
3508

3509 3510 3511
	return 0;
}

3512
static int __maybe_unused fec_suspend(struct device *dev)
3513
{
E
Eric Benard 已提交
3514
	struct net_device *ndev = dev_get_drvdata(dev);
3515
	struct fec_enet_private *fep = netdev_priv(ndev);
3516

3517
	rtnl_lock();
3518
	if (netif_running(ndev)) {
N
Nimrod Andy 已提交
3519 3520
		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3521
		phy_stop(ndev->phydev);
3522 3523
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
3524
		netif_device_detach(ndev);
3525 3526
		netif_tx_unlock_bh(ndev);
		fec_stop(ndev);
3527
		fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
3528 3529
		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3530
	}
3531 3532
	rtnl_unlock();

N
Nimrod Andy 已提交
3533
	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3534 3535
		regulator_disable(fep->reg_phy);

3536 3537 3538 3539 3540 3541
	/* SOC supply clock to phy, when clock is disabled, phy link down
	 * SOC control phy regulator, when regulator is disabled, phy link down
	 */
	if (fep->clk_enet_out || fep->reg_phy)
		fep->link = 0;

3542 3543 3544
	return 0;
}

3545
static int __maybe_unused fec_resume(struct device *dev)
3546
{
E
Eric Benard 已提交
3547
	struct net_device *ndev = dev_get_drvdata(dev);
3548
	struct fec_enet_private *fep = netdev_priv(ndev);
N
Nimrod Andy 已提交
3549
	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3550
	int ret;
N
Nimrod Andy 已提交
3551
	int val;
3552

N
Nimrod Andy 已提交
3553
	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3554 3555 3556 3557
		ret = regulator_enable(fep->reg_phy);
		if (ret)
			return ret;
	}
3558

3559
	rtnl_lock();
3560
	if (netif_running(ndev)) {
3561 3562 3563 3564 3565
		ret = fec_enet_clk_enable(ndev, true);
		if (ret) {
			rtnl_unlock();
			goto failed_clk;
		}
N
Nimrod Andy 已提交
3566 3567 3568 3569 3570 3571 3572 3573 3574 3575
		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
			if (pdata && pdata->sleep_mode_enable)
				pdata->sleep_mode_enable(false);
			val = readl(fep->hwp + FEC_ECNTRL);
			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
			writel(val, fep->hwp + FEC_ECNTRL);
			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
		} else {
			pinctrl_pm_select_default_state(&fep->pdev->dev);
		}
3576
		fec_restart(ndev);
3577
		netif_tx_lock_bh(ndev);
3578
		netif_device_attach(ndev);
3579
		netif_tx_unlock_bh(ndev);
3580
		napi_enable(&fep->napi);
3581
		phy_start(ndev->phydev);
3582
	}
3583
	rtnl_unlock();
3584

3585
	return 0;
3586

3587
failed_clk:
3588 3589 3590
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
	return ret;
3591 3592
}

3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614
static int __maybe_unused fec_runtime_suspend(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct fec_enet_private *fep = netdev_priv(ndev);

	clk_disable_unprepare(fep->clk_ipg);

	return 0;
}

static int __maybe_unused fec_runtime_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct fec_enet_private *fep = netdev_priv(ndev);

	return clk_prepare_enable(fep->clk_ipg);
}

static const struct dev_pm_ops fec_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
	SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
3615

3616 3617
static struct platform_driver fec_driver = {
	.driver	= {
3618
		.name	= DRIVER_NAME,
E
Eric Benard 已提交
3619
		.pm	= &fec_pm_ops,
3620
		.of_match_table = fec_dt_ids,
3621
	},
3622
	.id_table = fec_devtype,
E
Eric Benard 已提交
3623
	.probe	= fec_probe,
3624
	.remove	= fec_drv_remove,
3625 3626
};

3627
module_platform_driver(fec_driver);
L
Linus Torvalds 已提交
3628

F
Fabio Estevam 已提交
3629
MODULE_ALIAS("platform:"DRIVER_NAME);
L
Linus Torvalds 已提交
3630
MODULE_LICENSE("GPL");