fec_main.c 94.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
 *
5
 * Right now, I am very wasteful with the buffers.  I allocate memory
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14
 * pages and then divide them into 2K frame buffers.  This way I know I
 * have buffers large enough to hold one frame within one buffer descriptor.
 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
 * will be much more memory efficient and will easily handle lots of
 * small packets.
 *
 * Much better multiple PHY support by Magnus Damm.
 * Copyright (c) 2000 Ericsson Radio Systems AB.
 *
15 16
 * Support for FEC controller of ColdFire processors.
 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17 18
 *
 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19
 * Copyright (c) 2004-2006 Macq Electronique SA.
20
 *
S
Shawn Guo 已提交
21
 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
L
Linus Torvalds 已提交
22 23 24 25 26
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
27
#include <linux/pm_runtime.h>
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
37 38 39
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
N
Nimrod Andy 已提交
40
#include <net/tso.h>
41 42 43
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
L
Linus Torvalds 已提交
44 45 46
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
47 48
#include <linux/io.h>
#include <linux/irq.h>
49
#include <linux/clk.h>
50
#include <linux/platform_device.h>
51
#include <linux/mdio.h>
52
#include <linux/phy.h>
53
#include <linux/fec.h>
54 55 56
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
57
#include <linux/of_mdio.h>
58
#include <linux/of_net.h>
59
#include <linux/regulator/consumer.h>
60
#include <linux/if_vlan.h>
F
Fabio Estevam 已提交
61
#include <linux/pinctrl/consumer.h>
62
#include <linux/prefetch.h>
63
#include <soc/imx/cpuidle.h>
L
Linus Torvalds 已提交
64

65
#include <asm/cacheflush.h>
66

L
Linus Torvalds 已提交
67 68
#include "fec.h"

69
static void set_multicast_list(struct net_device *ndev);
70
static void fec_enet_itr_coal_init(struct net_device *ndev);
71

72 73
#define DRIVER_NAME	"fec"

74 75
#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))

76 77 78 79 80 81 82
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE	(1 << 5)
#define FEC_ENET_RSEM_V	0x84
#define FEC_ENET_RSFL_V	16
#define FEC_ENET_RAEM_V	0x8
#define FEC_ENET_RAFL_V	0x8
#define FEC_ENET_OPD_V	0xFFF0
83
#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
84

85 86
static struct platform_device_id fec_devtype[] = {
	{
87
		/* keep it for coldfire */
88 89
		.name = DRIVER_NAME,
		.driver_data = 0,
90 91
	}, {
		.name = "imx25-fec",
92
		.driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
93 94
	}, {
		.name = "imx27-fec",
95
		.driver_data = FEC_QUIRK_MIB_CLEAR,
96 97
	}, {
		.name = "imx28-fec",
98
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
99
				FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
S
Shawn Guo 已提交
100 101
	}, {
		.name = "imx6q-fec",
102
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
103
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
104 105
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
				FEC_QUIRK_HAS_RACC,
106
	}, {
107
		.name = "mvf600-fec",
108
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
109 110 111 112
	}, {
		.name = "imx6sx-fec",
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
113
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
114
				FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
115
				FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
116 117 118 119
	}, {
		.name = "imx6ul-fec",
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
120 121 122
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
				FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
				FEC_QUIRK_HAS_COALESCE,
123 124 125
	}, {
		/* sentinel */
	}
126
};
127
MODULE_DEVICE_TABLE(platform, fec_devtype);
128

129
enum imx_fec_type {
L
Lothar Waßmann 已提交
130
	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
131 132
	IMX27_FEC,	/* runs on i.mx27/35/51 */
	IMX28_FEC,
S
Shawn Guo 已提交
133
	IMX6Q_FEC,
134
	MVF600_FEC,
135
	IMX6SX_FEC,
136
	IMX6UL_FEC,
137 138 139 140 141 142
};

static const struct of_device_id fec_dt_ids[] = {
	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
S
Shawn Guo 已提交
143
	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
144
	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
145
	{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
146
	{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
147 148 149 150
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);

151 152 153
static unsigned char macaddr[ETH_ALEN];
module_param_array(macaddr, byte, NULL, 0);
MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
L
Linus Torvalds 已提交
154

155
#if defined(CONFIG_M5272)
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165
/*
 * Some hardware gets it MAC address out of local flash memory.
 * if this is non-zero then assume it is the address to get MAC from.
 */
#if defined(CONFIG_NETtel)
#define	FEC_FLASHMAC	0xf0006006
#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
#define	FEC_FLASHMAC	0xf0006000
#elif defined(CONFIG_CANCam)
#define	FEC_FLASHMAC	0xf0020000
166 167 168
#elif defined (CONFIG_M5272C3)
#define	FEC_FLASHMAC	(0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
L
Lothar Waßmann 已提交
169
#define FEC_FLASHMAC	0xffc0406b
L
Linus Torvalds 已提交
170 171 172
#else
#define	FEC_FLASHMAC	0
#endif
173
#endif /* CONFIG_M5272 */
174

175
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
L
Linus Torvalds 已提交
176
 */
177
#define PKT_MAXBUF_SIZE		1522
L
Linus Torvalds 已提交
178
#define PKT_MINBUF_SIZE		64
179
#define PKT_MAXBLR_SIZE		1536
L
Linus Torvalds 已提交
180

181 182 183
/* FEC receive acceleration */
#define FEC_RACC_IPDIS		(1 << 1)
#define FEC_RACC_PRODIS		(1 << 2)
184
#define FEC_RACC_SHIFT16	BIT(7)
185 186
#define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)

187 188 189
/* MIB Control Register */
#define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)

L
Linus Torvalds 已提交
190
/*
191
 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
L
Linus Torvalds 已提交
192 193 194
 * size bits. Other FEC hardware does not, so we need to take that into
 * account when setting it.
 */
195
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
196
    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
L
Linus Torvalds 已提交
197 198 199 200 201
#define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
#else
#define	OPT_FRAME_SIZE	0
#endif

202 203 204 205 206 207 208 209
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST		(1 << 30)
#define FEC_MMFR_OP_READ	(2 << 28)
#define FEC_MMFR_OP_WRITE	(1 << 28)
#define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
#define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
#define FEC_MMFR_TA		(2 << 16)
#define FEC_MMFR_DATA(v)	(v & 0xffff)
N
Nimrod Andy 已提交
210 211 212
/* FEC ECR bits definition */
#define FEC_ECR_MAGICEN		(1 << 2)
#define FEC_ECR_SLEEP		(1 << 3)
L
Linus Torvalds 已提交
213

214
#define FEC_MII_TIMEOUT		30000 /* us */
L
Linus Torvalds 已提交
215

S
Sascha Hauer 已提交
216 217
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
L
Linus Torvalds 已提交
218

219 220
#define FEC_PAUSE_FLAG_AUTONEG	0x1
#define FEC_PAUSE_FLAG_ENABLE	0x2
N
Nimrod Andy 已提交
221 222 223
#define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
#define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
#define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
224

225 226
#define COPYBREAK_DEFAULT	256

N
Nimrod Andy 已提交
227 228 229 230 231 232 233
#define TSO_HEADER_SIZE		128
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS	100
#define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)

#define IS_TSO_HEADER(txq, addr) \
	((addr >= txq->tso_hdrs_dma) && \
T
Troy Kisky 已提交
234
	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
N
Nimrod Andy 已提交
235

L
Lothar Waßmann 已提交
236 237
static int mii_cnt;

T
Troy Kisky 已提交
238 239 240 241
static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
					     struct bufdesc_prop *bd)
{
	return (bdp >= bd->last) ? bd->base
242
			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
T
Troy Kisky 已提交
243
}
244

T
Troy Kisky 已提交
245 246 247 248
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
					     struct bufdesc_prop *bd)
{
	return (bdp <= bd->base) ? bd->last
249
			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
250 251
}

T
Troy Kisky 已提交
252 253
static int fec_enet_get_bd_index(struct bufdesc *bdp,
				 struct bufdesc_prop *bd)
254
{
T
Troy Kisky 已提交
255
	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
256 257
}

T
Troy Kisky 已提交
258
static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
259 260 261
{
	int entries;

T
Troy Kisky 已提交
262 263
	entries = (((const char *)txq->dirty_tx -
			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
264

T
Troy Kisky 已提交
265
	return entries >= 0 ? entries : entries + txq->bd.ring_size;
266 267
}

268
static void swap_buffer(void *bufaddr, int len)
269 270 271 272
{
	int i;
	unsigned int *buf = bufaddr;

273
	for (i = 0; i < len; i += 4, buf++)
274
		swab32s(buf);
275 276
}

277 278 279 280 281 282 283 284 285 286
static void swap_buffer2(void *dst_buf, void *src_buf, int len)
{
	int i;
	unsigned int *src = src_buf;
	unsigned int *dst = dst_buf;

	for (i = 0; i < len; i += 4, src++, dst++)
		*dst = swab32p(src);
}

287 288 289
static void fec_dump(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
290 291 292
	struct bufdesc *bdp;
	struct fec_enet_priv_tx_q *txq;
	int index = 0;
293 294 295 296

	netdev_info(ndev, "TX ring dump\n");
	pr_info("Nr     SC     addr       len  SKB\n");

297
	txq = fep->tx_queue[0];
T
Troy Kisky 已提交
298
	bdp = txq->bd.base;
299

300
	do {
301
		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
302
			index,
T
Troy Kisky 已提交
303
			bdp == txq->bd.cur ? 'S' : ' ',
304
			bdp == txq->dirty_tx ? 'H' : ' ',
305 306 307
			fec16_to_cpu(bdp->cbd_sc),
			fec32_to_cpu(bdp->cbd_bufaddr),
			fec16_to_cpu(bdp->cbd_datlen),
308
			txq->tx_skbuff[index]);
T
Troy Kisky 已提交
309
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
310
		index++;
T
Troy Kisky 已提交
311
	} while (bdp != txq->bd.base);
312 313
}

314 315 316 317 318
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}

319 320 321 322 323 324 325 326 327 328
static int
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
{
	/* Only run for packets requiring a checksum. */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	if (unlikely(skb_cow_head(skb, 0)))
		return -1;

329 330
	if (is_ipv4_pkt(skb))
		ip_hdr(skb)->check = 0;
331 332 333 334 335
	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;

	return 0;
}

336
static struct bufdesc *
337 338 339
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
			     struct sk_buff *skb,
			     struct net_device *ndev)
L
Linus Torvalds 已提交
340
{
341
	struct fec_enet_private *fep = netdev_priv(ndev);
T
Troy Kisky 已提交
342
	struct bufdesc *bdp = txq->bd.cur;
343 344 345 346 347 348
	struct bufdesc_ex *ebdp;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int frag, frag_len;
	unsigned short status;
	unsigned int estatus = 0;
	skb_frag_t *this_frag;
349
	unsigned int index;
350
	void *bufaddr;
351
	dma_addr_t addr;
352
	int i;
L
Linus Torvalds 已提交
353

354 355
	for (frag = 0; frag < nr_frags; frag++) {
		this_frag = &skb_shinfo(skb)->frags[frag];
T
Troy Kisky 已提交
356
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
357 358
		ebdp = (struct bufdesc_ex *)bdp;

359
		status = fec16_to_cpu(bdp->cbd_sc);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
		status &= ~BD_ENET_TX_STATS;
		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
		frag_len = skb_shinfo(skb)->frags[frag].size;

		/* Handle the last BD specially */
		if (frag == nr_frags - 1) {
			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
			if (fep->bufdesc_ex) {
				estatus |= BD_ENET_TX_INT;
				if (unlikely(skb_shinfo(skb)->tx_flags &
					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
					estatus |= BD_ENET_TX_TS;
			}
		}

		if (fep->bufdesc_ex) {
376
			if (fep->quirks & FEC_QUIRK_HAS_AVB)
377
				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
378 379 380
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
			ebdp->cbd_bdu = 0;
381
			ebdp->cbd_esc = cpu_to_fec32(estatus);
382 383 384 385
		}

		bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;

T
Troy Kisky 已提交
386
		index = fec_enet_get_bd_index(bdp, &txq->bd);
387
		if (((unsigned long) bufaddr) & fep->tx_align ||
388
			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
389 390
			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
			bufaddr = txq->tx_bounce[index];
391

392
			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
393 394 395
				swap_buffer(bufaddr, frag_len);
		}

396 397 398
		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
				      DMA_TO_DEVICE);
		if (dma_mapping_error(&fep->pdev->dev, addr)) {
399 400 401 402 403
			if (net_ratelimit())
				netdev_err(ndev, "Tx DMA memory map failed\n");
			goto dma_mapping_error;
		}

404 405
		bdp->cbd_bufaddr = cpu_to_fec32(addr);
		bdp->cbd_datlen = cpu_to_fec16(frag_len);
406 407 408 409
		/* Make sure the updates to rest of the descriptor are
		 * performed before transferring ownership.
		 */
		wmb();
410
		bdp->cbd_sc = cpu_to_fec16(status);
411 412
	}

413
	return bdp;
414
dma_mapping_error:
T
Troy Kisky 已提交
415
	bdp = txq->bd.cur;
416
	for (i = 0; i < frag; i++) {
T
Troy Kisky 已提交
417
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
418 419
		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
420
	}
421
	return ERR_PTR(-ENOMEM);
422
}
L
Linus Torvalds 已提交
423

424 425
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
				   struct sk_buff *skb, struct net_device *ndev)
426 427 428 429 430
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	struct bufdesc *bdp, *last_bdp;
	void *bufaddr;
431
	dma_addr_t addr;
432 433 434 435
	unsigned short status;
	unsigned short buflen;
	unsigned int estatus = 0;
	unsigned int index;
N
Nimrod Andy 已提交
436
	int entries_free;
S
Sascha Hauer 已提交
437

T
Troy Kisky 已提交
438
	entries_free = fec_enet_get_free_txdesc_num(txq);
N
Nimrod Andy 已提交
439 440 441 442 443 444 445
	if (entries_free < MAX_SKB_FRAGS + 1) {
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "NOT enough BD for SG!\n");
		return NETDEV_TX_OK;
	}

446 447
	/* Protocol checksum off-load for TCP and UDP. */
	if (fec_enet_clear_csum(skb, ndev)) {
448
		dev_kfree_skb_any(skb);
449 450 451
		return NETDEV_TX_OK;
	}

452
	/* Fill in a Tx ring entry */
T
Troy Kisky 已提交
453
	bdp = txq->bd.cur;
454
	last_bdp = bdp;
455
	status = fec16_to_cpu(bdp->cbd_sc);
456
	status &= ~BD_ENET_TX_STATS;
L
Linus Torvalds 已提交
457

S
Sascha Hauer 已提交
458
	/* Set buffer length and buffer pointer */
459
	bufaddr = skb->data;
460
	buflen = skb_headlen(skb);
L
Linus Torvalds 已提交
461

T
Troy Kisky 已提交
462
	index = fec_enet_get_bd_index(bdp, &txq->bd);
463
	if (((unsigned long) bufaddr) & fep->tx_align ||
464
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
465 466
		memcpy(txq->tx_bounce[index], skb->data, buflen);
		bufaddr = txq->tx_bounce[index];
L
Linus Torvalds 已提交
467

468
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
469 470
			swap_buffer(bufaddr, buflen);
	}
471

472 473 474
	/* Push the data cache so the CPM does not get stale memory data. */
	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
	if (dma_mapping_error(&fep->pdev->dev, addr)) {
475 476 477 478 479
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "Tx DMA memory map failed\n");
		return NETDEV_TX_OK;
	}
L
Linus Torvalds 已提交
480

481
	if (nr_frags) {
482
		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
T
Troy Kisky 已提交
483 484 485 486
		if (IS_ERR(last_bdp)) {
			dma_unmap_single(&fep->pdev->dev, addr,
					 buflen, DMA_TO_DEVICE);
			dev_kfree_skb_any(skb);
487
			return NETDEV_TX_OK;
T
Troy Kisky 已提交
488
		}
489 490 491 492 493 494 495 496 497
	} else {
		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
		if (fep->bufdesc_ex) {
			estatus = BD_ENET_TX_INT;
			if (unlikely(skb_shinfo(skb)->tx_flags &
				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
				estatus |= BD_ENET_TX_TS;
		}
	}
T
Troy Kisky 已提交
498 499
	bdp->cbd_bufaddr = cpu_to_fec32(addr);
	bdp->cbd_datlen = cpu_to_fec16(buflen);
500

501 502 503
	if (fep->bufdesc_ex) {

		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
504

505
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
506
			fep->hwts_tx_en))
507
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
508

509
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
510
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
511

512 513 514 515
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;

		ebdp->cbd_bdu = 0;
516
		ebdp->cbd_esc = cpu_to_fec32(estatus);
517
	}
518

T
Troy Kisky 已提交
519
	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
520
	/* Save skb pointer */
521
	txq->tx_skbuff[index] = skb;
522

523 524 525 526
	/* Make sure the updates to rest of the descriptor are performed before
	 * transferring ownership.
	 */
	wmb();
527

528 529 530
	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
	 * it's the last BD of the frame, and to put the CRC on the end.
	 */
531
	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
532
	bdp->cbd_sc = cpu_to_fec16(status);
533

S
Sascha Hauer 已提交
534
	/* If this was the last BD in the ring, start at the beginning again. */
T
Troy Kisky 已提交
535
	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
L
Linus Torvalds 已提交
536

537 538
	skb_tx_timestamp(skb);

539
	/* Make sure the update to bdp and tx_skbuff are performed before
T
Troy Kisky 已提交
540
	 * txq->bd.cur.
541 542
	 */
	wmb();
T
Troy Kisky 已提交
543
	txq->bd.cur = bdp;
544 545

	/* Trigger transmission start */
546
	writel(0, txq->bd.reg_desc_active);
L
Linus Torvalds 已提交
547

548
	return 0;
L
Linus Torvalds 已提交
549 550
}

N
Nimrod Andy 已提交
551
static int
552 553 554 555
fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
			  struct net_device *ndev,
			  struct bufdesc *bdp, int index, char *data,
			  int size, bool last_tcp, bool is_last)
556 557
{
	struct fec_enet_private *fep = netdev_priv(ndev);
558
	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
N
Nimrod Andy 已提交
559 560
	unsigned short status;
	unsigned int estatus = 0;
561
	dma_addr_t addr;
562

563
	status = fec16_to_cpu(bdp->cbd_sc);
N
Nimrod Andy 已提交
564
	status &= ~BD_ENET_TX_STATS;
565

N
Nimrod Andy 已提交
566 567
	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);

568
	if (((unsigned long) data) & fep->tx_align ||
569
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
570 571
		memcpy(txq->tx_bounce[index], data, size);
		data = txq->tx_bounce[index];
N
Nimrod Andy 已提交
572

573
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
N
Nimrod Andy 已提交
574 575 576
			swap_buffer(data, size);
	}

577 578
	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
	if (dma_mapping_error(&fep->pdev->dev, addr)) {
N
Nimrod Andy 已提交
579
		dev_kfree_skb_any(skb);
580
		if (net_ratelimit())
N
Nimrod Andy 已提交
581
			netdev_err(ndev, "Tx DMA memory map failed\n");
582 583 584
		return NETDEV_TX_BUSY;
	}

585 586
	bdp->cbd_datlen = cpu_to_fec16(size);
	bdp->cbd_bufaddr = cpu_to_fec32(addr);
587

N
Nimrod Andy 已提交
588
	if (fep->bufdesc_ex) {
589
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
590
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
N
Nimrod Andy 已提交
591 592 593
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
		ebdp->cbd_bdu = 0;
594
		ebdp->cbd_esc = cpu_to_fec32(estatus);
N
Nimrod Andy 已提交
595 596 597 598 599 600 601 602
	}

	/* Handle the last BD specially */
	if (last_tcp)
		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
	if (is_last) {
		status |= BD_ENET_TX_INTR;
		if (fep->bufdesc_ex)
603
			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
N
Nimrod Andy 已提交
604 605
	}

606
	bdp->cbd_sc = cpu_to_fec16(status);
N
Nimrod Andy 已提交
607 608 609 610 611

	return 0;
}

static int
612 613 614
fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
			 struct sk_buff *skb, struct net_device *ndev,
			 struct bufdesc *bdp, int index)
N
Nimrod Andy 已提交
615 616 617
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
618
	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
N
Nimrod Andy 已提交
619 620 621 622 623
	void *bufaddr;
	unsigned long dmabuf;
	unsigned short status;
	unsigned int estatus = 0;

624
	status = fec16_to_cpu(bdp->cbd_sc);
N
Nimrod Andy 已提交
625 626 627
	status &= ~BD_ENET_TX_STATS;
	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);

628 629
	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
630
	if (((unsigned long)bufaddr) & fep->tx_align ||
631
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
632 633
		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
		bufaddr = txq->tx_bounce[index];
N
Nimrod Andy 已提交
634

635
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
N
Nimrod Andy 已提交
636 637 638 639 640 641 642 643 644 645 646 647
			swap_buffer(bufaddr, hdr_len);

		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
					hdr_len, DMA_TO_DEVICE);
		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
			dev_kfree_skb_any(skb);
			if (net_ratelimit())
				netdev_err(ndev, "Tx DMA memory map failed\n");
			return NETDEV_TX_BUSY;
		}
	}

648 649
	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
N
Nimrod Andy 已提交
650 651

	if (fep->bufdesc_ex) {
652
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
653
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
N
Nimrod Andy 已提交
654 655 656
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
		ebdp->cbd_bdu = 0;
657
		ebdp->cbd_esc = cpu_to_fec32(estatus);
N
Nimrod Andy 已提交
658 659
	}

660
	bdp->cbd_sc = cpu_to_fec16(status);
N
Nimrod Andy 已提交
661 662 663 664

	return 0;
}

665 666 667
static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
				   struct sk_buff *skb,
				   struct net_device *ndev)
N
Nimrod Andy 已提交
668 669 670 671
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int total_len, data_left;
T
Troy Kisky 已提交
672
	struct bufdesc *bdp = txq->bd.cur;
N
Nimrod Andy 已提交
673 674 675 676
	struct tso_t tso;
	unsigned int index = 0;
	int ret;

T
Troy Kisky 已提交
677
	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
N
Nimrod Andy 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "NOT enough BD for TSO!\n");
		return NETDEV_TX_OK;
	}

	/* Protocol checksum off-load for TCP and UDP. */
	if (fec_enet_clear_csum(skb, ndev)) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* Initialize the TSO handler, and prepare the first payload */
	tso_start(skb, &tso);

	total_len = skb->len - hdr_len;
	while (total_len > 0) {
		char *hdr;

T
Troy Kisky 已提交
697
		index = fec_enet_get_bd_index(bdp, &txq->bd);
N
Nimrod Andy 已提交
698 699 700 701
		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
		total_len -= data_left;

		/* prepare packet headers: MAC + IP + TCP */
702
		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
N
Nimrod Andy 已提交
703
		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
704
		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
N
Nimrod Andy 已提交
705 706 707 708 709 710 711
		if (ret)
			goto err_release;

		while (data_left > 0) {
			int size;

			size = min_t(int, tso.size, data_left);
T
Troy Kisky 已提交
712 713
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
			index = fec_enet_get_bd_index(bdp, &txq->bd);
714 715 716 717
			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
							bdp, index,
							tso.data, size,
							size == data_left,
N
Nimrod Andy 已提交
718 719 720 721 722 723 724 725
							total_len == 0);
			if (ret)
				goto err_release;

			data_left -= size;
			tso_build_data(skb, &tso, size);
		}

T
Troy Kisky 已提交
726
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
N
Nimrod Andy 已提交
727 728 729
	}

	/* Save skb pointer */
730
	txq->tx_skbuff[index] = skb;
N
Nimrod Andy 已提交
731 732

	skb_tx_timestamp(skb);
T
Troy Kisky 已提交
733
	txq->bd.cur = bdp;
N
Nimrod Andy 已提交
734 735

	/* Trigger transmission start */
736
	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
737 738 739 740 741
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active))
		writel(0, txq->bd.reg_desc_active);
N
Nimrod Andy 已提交
742 743 744 745 746 747 748 749 750 751 752 753 754

	return 0;

err_release:
	/* TODO: Release all used data descriptors for TSO */
	return ret;
}

static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int entries_free;
755 756 757
	unsigned short queue;
	struct fec_enet_priv_tx_q *txq;
	struct netdev_queue *nq;
N
Nimrod Andy 已提交
758 759
	int ret;

760 761 762 763
	queue = skb_get_queue_mapping(skb);
	txq = fep->tx_queue[queue];
	nq = netdev_get_tx_queue(ndev, queue);

N
Nimrod Andy 已提交
764
	if (skb_is_gso(skb))
765
		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
N
Nimrod Andy 已提交
766
	else
767
		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
768 769
	if (ret)
		return ret;
770

T
Troy Kisky 已提交
771
	entries_free = fec_enet_get_free_txdesc_num(txq);
772 773
	if (entries_free <= txq->tx_stop_threshold)
		netif_tx_stop_queue(nq);
774 775 776 777

	return NETDEV_TX_OK;
}

778 779 780 781 782
/* Init RX & TX buffer descriptors
 */
static void fec_enet_bd_init(struct net_device *dev)
{
	struct fec_enet_private *fep = netdev_priv(dev);
783 784
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
785 786
	struct bufdesc *bdp;
	unsigned int i;
787
	unsigned int q;
788

789 790 791
	for (q = 0; q < fep->num_rx_queues; q++) {
		/* Initialize the receive buffer descriptors. */
		rxq = fep->rx_queue[q];
T
Troy Kisky 已提交
792
		bdp = rxq->bd.base;
793

T
Troy Kisky 已提交
794
		for (i = 0; i < rxq->bd.ring_size; i++) {
795

796 797
			/* Initialize the BD for every fragment in the page. */
			if (bdp->cbd_bufaddr)
798
				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
799
			else
800
				bdp->cbd_sc = cpu_to_fec16(0);
T
Troy Kisky 已提交
801
			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
802 803 804
		}

		/* Set the last buffer to wrap */
T
Troy Kisky 已提交
805
		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
806
		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
807

T
Troy Kisky 已提交
808
		rxq->bd.cur = rxq->bd.base;
809 810 811 812 813
	}

	for (q = 0; q < fep->num_tx_queues; q++) {
		/* ...and the same for transmit */
		txq = fep->tx_queue[q];
T
Troy Kisky 已提交
814 815
		bdp = txq->bd.base;
		txq->bd.cur = bdp;
816

T
Troy Kisky 已提交
817
		for (i = 0; i < txq->bd.ring_size; i++) {
818
			/* Initialize the BD for every fragment in the page. */
819
			bdp->cbd_sc = cpu_to_fec16(0);
820 821 822 823
			if (txq->tx_skbuff[i]) {
				dev_kfree_skb_any(txq->tx_skbuff[i]);
				txq->tx_skbuff[i] = NULL;
			}
824
			bdp->cbd_bufaddr = cpu_to_fec32(0);
T
Troy Kisky 已提交
825
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
826 827 828
		}

		/* Set the last buffer to wrap */
T
Troy Kisky 已提交
829
		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
830
		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
831
		txq->dirty_tx = bdp;
832
	}
833
}
834

F
Frank Li 已提交
835 836 837 838 839 840
static void fec_enet_active_rxring(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;

	for (i = 0; i < fep->num_rx_queues; i++)
841
		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
F
Frank Li 已提交
842 843
}

844 845 846 847 848 849
static void fec_enet_enable_ring(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
	int i;
850

851 852
	for (i = 0; i < fep->num_rx_queues; i++) {
		rxq = fep->rx_queue[i];
T
Troy Kisky 已提交
853
		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
854
		writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
855

856 857 858 859 860
		/* enable DMA1/2 */
		if (i)
			writel(RCMR_MATCHEN | RCMR_CMP(i),
			       fep->hwp + FEC_RCMR(i));
	}
861

862 863
	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = fep->tx_queue[i];
T
Troy Kisky 已提交
864
		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
865 866 867 868 869

		/* enable DMA1/2 */
		if (i)
			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
			       fep->hwp + FEC_DMA_CFG(i));
870
	}
871
}
872

873 874 875 876 877 878 879 880 881
static void fec_enet_reset_skb(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_priv_tx_q *txq;
	int i, j;

	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = fep->tx_queue[i];

T
Troy Kisky 已提交
882
		for (j = 0; j < txq->bd.ring_size; j++) {
883 884 885 886 887 888
			if (txq->tx_skbuff[j]) {
				dev_kfree_skb_any(txq->tx_skbuff[j]);
				txq->tx_skbuff[j] = NULL;
			}
		}
	}
889 890
}

891 892 893 894
/*
 * This function is called to start or restart the FEC during a link
 * change, transmit timeout, or to reconfigure the FEC.  The network
 * packet processing for this device must be stopped before this call.
895
 */
L
Linus Torvalds 已提交
896
static void
897
fec_restart(struct net_device *ndev)
L
Linus Torvalds 已提交
898
{
899
	struct fec_enet_private *fep = netdev_priv(ndev);
900
	u32 val;
901 902
	u32 temp_mac[2];
	u32 rcntl = OPT_FRAME_SIZE | 0x04;
S
Shawn Guo 已提交
903
	u32 ecntl = 0x2; /* ETHEREN */
L
Linus Torvalds 已提交
904

905 906 907 908
	/* Whack a reset.  We should wait for this.
	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
	 * instead of reset MAC itself.
	 */
909
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
910 911 912 913 914
		writel(0, fep->hwp + FEC_ECNTRL);
	} else {
		writel(1, fep->hwp + FEC_ECNTRL);
		udelay(10);
	}
L
Linus Torvalds 已提交
915

916 917 918 919
	/*
	 * enet-mac reset will reset mac address registers too,
	 * so need to reconfigure it.
	 */
920 921 922 923 924
	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
	writel((__force u32)cpu_to_be32(temp_mac[0]),
	       fep->hwp + FEC_ADDR_LOW);
	writel((__force u32)cpu_to_be32(temp_mac[1]),
	       fep->hwp + FEC_ADDR_HIGH);
L
Linus Torvalds 已提交
925

926
	/* Clear any outstanding interrupt. */
927
	writel(0xffffffff, fep->hwp + FEC_IEVENT);
L
Linus Torvalds 已提交
928

929 930
	fec_enet_bd_init(ndev);

931
	fec_enet_enable_ring(ndev);
932

933 934
	/* Reset tx SKB buffers. */
	fec_enet_reset_skb(ndev);
935

936
	/* Enable MII mode */
937
	if (fep->full_duplex == DUPLEX_FULL) {
938
		/* FD enable */
939 940
		writel(0x04, fep->hwp + FEC_X_CNTRL);
	} else {
941 942
		/* No Rcv on Xmit */
		rcntl |= 0x02;
943 944
		writel(0x0, fep->hwp + FEC_X_CNTRL);
	}
945

946 947 948
	/* Set MII speed */
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);

G
Guenter Roeck 已提交
949
#if !defined(CONFIG_M5272)
950 951
	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
		val = readl(fep->hwp + FEC_RACC);
952 953
		/* align IP header */
		val |= FEC_RACC_SHIFT16;
954
		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
955
			/* set RX checksum */
956 957 958 959
			val |= FEC_RACC_OPTIONS;
		else
			val &= ~FEC_RACC_OPTIONS;
		writel(val, fep->hwp + FEC_RACC);
960
		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
961
	}
G
Guenter Roeck 已提交
962
#endif
963

964 965 966 967
	/*
	 * The phy interface and speed need to get configured
	 * differently on enet-mac.
	 */
968
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
969 970
		/* Enable flow control and length check */
		rcntl |= 0x40000000 | 0x00000020;
971

S
Shawn Guo 已提交
972
		/* RGMII, RMII or MII */
M
Markus Pargmann 已提交
973 974 975 976
		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
S
Shawn Guo 已提交
977 978
			rcntl |= (1 << 6);
		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
979
			rcntl |= (1 << 8);
980
		else
981
			rcntl &= ~(1 << 8);
982

S
Shawn Guo 已提交
983
		/* 1G, 100M or 10M */
984 985
		if (ndev->phydev) {
			if (ndev->phydev->speed == SPEED_1000)
S
Shawn Guo 已提交
986
				ecntl |= (1 << 5);
987
			else if (ndev->phydev->speed == SPEED_100)
S
Shawn Guo 已提交
988 989 990 991
				rcntl &= ~(1 << 9);
			else
				rcntl |= (1 << 9);
		}
992 993
	} else {
#ifdef FEC_MIIGSK_ENR
994
		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
995
			u32 cfgr;
996 997 998 999 1000 1001 1002 1003
			/* disable the gasket and wait */
			writel(0, fep->hwp + FEC_MIIGSK_ENR);
			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
				udelay(1);

			/*
			 * configure the gasket:
			 *   RMII, 50 MHz, no loopback, no echo
1004
			 *   MII, 25 MHz, no loopback, no echo
1005
			 */
1006 1007
			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1008
			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1009 1010
				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1011 1012 1013

			/* re-enable the gasket */
			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1014
		}
1015 1016
#endif
	}
1017

G
Guenter Roeck 已提交
1018
#if !defined(CONFIG_M5272)
1019 1020 1021
	/* enable pause frame*/
	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1022
	     ndev->phydev && ndev->phydev->pause)) {
1023 1024
		rcntl |= FEC_ENET_FCE;

1025
		/* set FIFO threshold parameter to reduce overrun */
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);

		/* OPD */
		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
	} else {
		rcntl &= ~FEC_ENET_FCE;
	}
G
Guenter Roeck 已提交
1036
#endif /* !defined(CONFIG_M5272) */
1037

1038
	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1039

1040 1041 1042 1043 1044 1045 1046
	/* Setup multicast filter. */
	set_multicast_list(ndev);
#ifndef CONFIG_M5272
	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
#endif

1047
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
S
Shawn Guo 已提交
1048 1049 1050 1051 1052 1053
		/* enable ENET endian swap */
		ecntl |= (1 << 8);
		/* enable ENET store and forward mode */
		writel(1 << 8, fep->hwp + FEC_X_WMRK);
	}

1054 1055
	if (fep->bufdesc_ex)
		ecntl |= (1 << 4);
1056

1057
#ifndef CONFIG_M5272
1058 1059
	/* Enable the MIB statistic event counters */
	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1060 1061
#endif

1062
	/* And last, enable the transmit and receive processing */
S
Shawn Guo 已提交
1063
	writel(ecntl, fep->hwp + FEC_ECNTRL);
F
Frank Li 已提交
1064
	fec_enet_active_rxring(ndev);
1065

1066 1067 1068
	if (fep->bufdesc_ex)
		fec_ptp_start_cyclecounter(ndev);

1069
	/* Enable interrupts we wish to service */
1070 1071 1072 1073
	if (fep->link)
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
	else
		writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1074 1075 1076 1077

	/* Init the interrupt coalescing */
	fec_enet_itr_coal_init(ndev);

1078 1079 1080 1081 1082 1083
}

static void
fec_stop(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
N
Nimrod Andy 已提交
1084
	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1085
	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
N
Nimrod Andy 已提交
1086
	u32 val;
1087 1088 1089 1090 1091 1092

	/* We cannot expect a graceful transmit stop without link !!! */
	if (fep->link) {
		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
		udelay(10);
		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1093
			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1094 1095
	}

1096 1097 1098 1099
	/* Whack a reset.  We should wait for this.
	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
	 * instead of reset MAC itself.
	 */
N
Nimrod Andy 已提交
1100 1101 1102 1103 1104 1105 1106 1107
	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
		if (fep->quirks & FEC_QUIRK_HAS_AVB) {
			writel(0, fep->hwp + FEC_ECNTRL);
		} else {
			writel(1, fep->hwp + FEC_ECNTRL);
			udelay(10);
		}
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1108
	} else {
N
Nimrod Andy 已提交
1109 1110 1111 1112 1113 1114 1115
		writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
		val = readl(fep->hwp + FEC_ECNTRL);
		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
		writel(val, fep->hwp + FEC_ECNTRL);

		if (pdata && pdata->sleep_mode_enable)
			pdata->sleep_mode_enable(true);
1116
	}
1117
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
S
Shawn Guo 已提交
1118 1119

	/* We have to keep ENET enabled to have MII interrupt stay working */
N
Nimrod Andy 已提交
1120 1121
	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
S
Shawn Guo 已提交
1122
		writel(2, fep->hwp + FEC_ECNTRL);
1123 1124
		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
	}
L
Linus Torvalds 已提交
1125 1126 1127
}


1128 1129 1130 1131 1132
static void
fec_timeout(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

1133 1134
	fec_dump(ndev);

1135 1136
	ndev->stats.tx_errors++;

1137
	schedule_work(&fep->tx_timeout_work);
1138 1139
}

1140
static void fec_enet_timeout_work(struct work_struct *work)
1141 1142
{
	struct fec_enet_private *fep =
1143
		container_of(work, struct fec_enet_private, tx_timeout_work);
1144
	struct net_device *ndev = fep->netdev;
1145

1146 1147 1148 1149 1150 1151 1152 1153
	rtnl_lock();
	if (netif_device_present(ndev) || netif_running(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
		fec_restart(ndev);
		netif_wake_queue(ndev);
		netif_tx_unlock_bh(ndev);
		napi_enable(&fep->napi);
1154
	}
1155
	rtnl_unlock();
1156 1157
}

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
static void
fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
	struct skb_shared_hwtstamps *hwtstamps)
{
	unsigned long flags;
	u64 ns;

	spin_lock_irqsave(&fep->tmreg_lock, flags);
	ns = timecounter_cyc2time(&fep->tc, ts);
	spin_unlock_irqrestore(&fep->tmreg_lock, flags);

	memset(hwtstamps, 0, sizeof(*hwtstamps));
	hwtstamps->hwtstamp = ns_to_ktime(ns);
}

L
Linus Torvalds 已提交
1173
static void
1174
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
L
Linus Torvalds 已提交
1175 1176
{
	struct	fec_enet_private *fep;
1177
	struct bufdesc *bdp;
1178
	unsigned short status;
L
Linus Torvalds 已提交
1179
	struct	sk_buff	*skb;
1180 1181
	struct fec_enet_priv_tx_q *txq;
	struct netdev_queue *nq;
1182
	int	index = 0;
N
Nimrod Andy 已提交
1183
	int	entries_free;
L
Linus Torvalds 已提交
1184

1185
	fep = netdev_priv(ndev);
1186 1187 1188 1189 1190 1191 1192

	queue_id = FEC_ENET_GET_QUQUE(queue_id);

	txq = fep->tx_queue[queue_id];
	/* get next bdp of dirty_tx */
	nq = netdev_get_tx_queue(ndev, queue_id);
	bdp = txq->dirty_tx;
L
Linus Torvalds 已提交
1193

1194
	/* get next bdp of dirty_tx */
T
Troy Kisky 已提交
1195
	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1196

T
Troy Kisky 已提交
1197 1198
	while (bdp != READ_ONCE(txq->bd.cur)) {
		/* Order the load of bd.cur and cbd_sc */
1199
		rmb();
1200
		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1201
		if (status & BD_ENET_TX_READY)
S
Sascha Hauer 已提交
1202 1203
			break;

T
Troy Kisky 已提交
1204
		index = fec_enet_get_bd_index(bdp, &txq->bd);
1205

1206
		skb = txq->tx_skbuff[index];
1207
		txq->tx_skbuff[index] = NULL;
1208 1209 1210 1211 1212 1213
		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
			dma_unmap_single(&fep->pdev->dev,
					 fec32_to_cpu(bdp->cbd_bufaddr),
					 fec16_to_cpu(bdp->cbd_datlen),
					 DMA_TO_DEVICE);
		bdp->cbd_bufaddr = cpu_to_fec32(0);
1214 1215
		if (!skb)
			goto skb_done;
1216

L
Linus Torvalds 已提交
1217
		/* Check for errors. */
1218
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
L
Linus Torvalds 已提交
1219 1220
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
1221
			ndev->stats.tx_errors++;
1222
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1223
				ndev->stats.tx_heartbeat_errors++;
1224
			if (status & BD_ENET_TX_LC)  /* Late collision */
1225
				ndev->stats.tx_window_errors++;
1226
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1227
				ndev->stats.tx_aborted_errors++;
1228
			if (status & BD_ENET_TX_UN)  /* Underrun */
1229
				ndev->stats.tx_fifo_errors++;
1230
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1231
				ndev->stats.tx_carrier_errors++;
L
Linus Torvalds 已提交
1232
		} else {
1233
			ndev->stats.tx_packets++;
1234
			ndev->stats.tx_bytes += skb->len;
L
Linus Torvalds 已提交
1235 1236
		}

1237 1238
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
			fep->bufdesc_ex) {
1239
			struct skb_shared_hwtstamps shhwtstamps;
1240
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1241

1242
			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1243 1244
			skb_tstamp_tx(skb, &shhwtstamps);
		}
1245

L
Linus Torvalds 已提交
1246 1247 1248
		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
1249
		if (status & BD_ENET_TX_DEF)
1250
			ndev->stats.collisions++;
1251

S
Sascha Hauer 已提交
1252
		/* Free the sk buffer associated with this last transmit */
L
Linus Torvalds 已提交
1253
		dev_kfree_skb_any(skb);
1254
skb_done:
1255 1256 1257 1258
		/* Make sure the update to bdp and tx_skbuff are performed
		 * before dirty_tx
		 */
		wmb();
1259
		txq->dirty_tx = bdp;
1260

S
Sascha Hauer 已提交
1261
		/* Update pointer to next buffer descriptor to be transmitted */
T
Troy Kisky 已提交
1262
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1263

S
Sascha Hauer 已提交
1264
		/* Since we have freed up a buffer, the ring is no longer full
L
Linus Torvalds 已提交
1265
		 */
N
Nimrod Andy 已提交
1266
		if (netif_queue_stopped(ndev)) {
T
Troy Kisky 已提交
1267
			entries_free = fec_enet_get_free_txdesc_num(txq);
1268 1269
			if (entries_free >= txq->tx_wake_threshold)
				netif_tx_wake_queue(nq);
N
Nimrod Andy 已提交
1270
		}
L
Linus Torvalds 已提交
1271
	}
1272

1273
	/* ERR006358: Keep the transmitter going */
T
Troy Kisky 已提交
1274
	if (bdp != txq->bd.cur &&
1275 1276
	    readl(txq->bd.reg_desc_active) == 0)
		writel(0, txq->bd.reg_desc_active);
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
}

static void
fec_enet_tx(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	u16 queue_id;
	/* First process class A queue, then Class B and Best Effort queue */
	for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
		clear_bit(queue_id, &fep->work_tx);
		fec_enet_tx_queue(ndev, queue_id);
	}
	return;
L
Linus Torvalds 已提交
1290 1291
}

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
static int
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
	struct  fec_enet_private *fep = netdev_priv(ndev);
	int off;

	off = ((unsigned long)skb->data) & fep->rx_align;
	if (off)
		skb_reserve(skb, fep->rx_align + 1 - off);

1302 1303
	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1304 1305 1306 1307 1308 1309 1310 1311 1312
		if (net_ratelimit())
			netdev_err(ndev, "Rx DMA memory map failed\n");
		return -ENOMEM;
	}

	return 0;
}

static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1313
			       struct bufdesc *bdp, u32 length, bool swap)
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
{
	struct  fec_enet_private *fep = netdev_priv(ndev);
	struct sk_buff *new_skb;

	if (length > fep->rx_copybreak)
		return false;

	new_skb = netdev_alloc_skb(ndev, length);
	if (!new_skb)
		return false;

1325 1326
	dma_sync_single_for_cpu(&fep->pdev->dev,
				fec32_to_cpu(bdp->cbd_bufaddr),
1327 1328
				FEC_ENET_RX_FRSIZE - fep->rx_align,
				DMA_FROM_DEVICE);
1329 1330 1331 1332
	if (!swap)
		memcpy(new_skb->data, (*skb)->data, length);
	else
		swap_buffer2(new_skb->data, (*skb)->data, length);
1333 1334 1335 1336 1337
	*skb = new_skb;

	return true;
}

T
Troy Kisky 已提交
1338
/* During a receive, the bd_rx.cur points to the current incoming buffer.
L
Linus Torvalds 已提交
1339 1340 1341 1342
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
1343
static int
1344
fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
L
Linus Torvalds 已提交
1345
{
1346
	struct fec_enet_private *fep = netdev_priv(ndev);
1347
	struct fec_enet_priv_rx_q *rxq;
S
Sascha Hauer 已提交
1348
	struct bufdesc *bdp;
1349
	unsigned short status;
1350 1351
	struct  sk_buff *skb_new = NULL;
	struct  sk_buff *skb;
L
Linus Torvalds 已提交
1352 1353
	ushort	pkt_len;
	__u8 *data;
1354
	int	pkt_received = 0;
1355 1356 1357
	struct	bufdesc_ex *ebdp = NULL;
	bool	vlan_packet_rcvd = false;
	u16	vlan_tag;
1358
	int	index = 0;
1359
	bool	is_copybreak;
1360
	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1361

1362 1363
#ifdef CONFIG_M532x
	flush_cache_all();
1364
#endif
1365 1366
	queue_id = FEC_ENET_GET_QUQUE(queue_id);
	rxq = fep->rx_queue[queue_id];
L
Linus Torvalds 已提交
1367 1368 1369 1370

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
T
Troy Kisky 已提交
1371
	bdp = rxq->bd.cur;
L
Linus Torvalds 已提交
1372

1373
	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
L
Linus Torvalds 已提交
1374

1375 1376 1377 1378
		if (pkt_received >= budget)
			break;
		pkt_received++;

1379
		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1380

S
Sascha Hauer 已提交
1381
		/* Check for errors. */
T
Troy Kisky 已提交
1382
		status ^= BD_ENET_RX_LAST;
S
Sascha Hauer 已提交
1383
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
T
Troy Kisky 已提交
1384 1385
			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
			   BD_ENET_RX_CL)) {
1386
			ndev->stats.rx_errors++;
T
Troy Kisky 已提交
1387 1388 1389 1390 1391 1392 1393
			if (status & BD_ENET_RX_OV) {
				/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
				goto rx_processing_done;
			}
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
						| BD_ENET_RX_LAST)) {
S
Sascha Hauer 已提交
1394
				/* Frame too long or too short. */
1395
				ndev->stats.rx_length_errors++;
T
Troy Kisky 已提交
1396 1397
				if (status & BD_ENET_RX_LAST)
					netdev_err(ndev, "rcv is not +last\n");
S
Sascha Hauer 已提交
1398 1399
			}
			if (status & BD_ENET_RX_CR)	/* CRC Error */
1400
				ndev->stats.rx_crc_errors++;
T
Troy Kisky 已提交
1401 1402 1403
			/* Report late collisions as a frame error. */
			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				ndev->stats.rx_frame_errors++;
S
Sascha Hauer 已提交
1404 1405
			goto rx_processing_done;
		}
L
Linus Torvalds 已提交
1406

S
Sascha Hauer 已提交
1407
		/* Process the incoming frame. */
1408
		ndev->stats.rx_packets++;
1409
		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1410
		ndev->stats.rx_bytes += pkt_len;
L
Linus Torvalds 已提交
1411

T
Troy Kisky 已提交
1412
		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1413
		skb = rxq->rx_skbuff[index];
1414

1415 1416 1417 1418
		/* The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
1419 1420
		is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
						  need_swap);
1421 1422 1423 1424 1425 1426
		if (!is_copybreak) {
			skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
			if (unlikely(!skb_new)) {
				ndev->stats.rx_dropped++;
				goto rx_processing_done;
			}
1427 1428
			dma_unmap_single(&fep->pdev->dev,
					 fec32_to_cpu(bdp->cbd_bufaddr),
1429 1430 1431 1432 1433 1434 1435
					 FEC_ENET_RX_FRSIZE - fep->rx_align,
					 DMA_FROM_DEVICE);
		}

		prefetch(skb->data - NET_IP_ALIGN);
		skb_put(skb, pkt_len - 4);
		data = skb->data;
1436

1437 1438 1439
		if (!is_copybreak && need_swap)
			swap_buffer(data, pkt_len);

1440 1441 1442 1443 1444
#if !defined(CONFIG_M5272)
		if (fep->quirks & FEC_QUIRK_HAS_RACC)
			data = skb_pull_inline(skb, 2);
#endif

1445 1446 1447 1448 1449 1450 1451 1452
		/* Extract the enhanced buffer descriptor */
		ebdp = NULL;
		if (fep->bufdesc_ex)
			ebdp = (struct bufdesc_ex *)bdp;

		/* If this is a VLAN packet remove the VLAN Tag */
		vlan_packet_rcvd = false;
		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1453 1454
		    fep->bufdesc_ex &&
		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1455 1456 1457 1458 1459 1460
			/* Push and remove the vlan tag */
			struct vlan_hdr *vlan_header =
					(struct vlan_hdr *) (data + ETH_HLEN);
			vlan_tag = ntohs(vlan_header->h_vlan_TCI);

			vlan_packet_rcvd = true;
1461

1462
			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1463
			skb_pull(skb, VLAN_HLEN);
1464 1465
		}

1466
		skb->protocol = eth_type_trans(skb, ndev);
L
Linus Torvalds 已提交
1467

1468 1469
		/* Get receive timestamp from the skb */
		if (fep->hwts_rx_en && fep->bufdesc_ex)
1470
			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1471 1472 1473 1474
					  skb_hwtstamps(skb));

		if (fep->bufdesc_ex &&
		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1475
			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1476 1477 1478 1479
				/* don't check it */
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			} else {
				skb_checksum_none_assert(skb);
1480
			}
1481
		}
1482

1483 1484 1485 1486 1487
		/* Handle received VLAN packets */
		if (vlan_packet_rcvd)
			__vlan_hwaccel_put_tag(skb,
					       htons(ETH_P_8021Q),
					       vlan_tag);
1488

1489 1490 1491
		napi_gro_receive(&fep->napi, skb);

		if (is_copybreak) {
1492 1493
			dma_sync_single_for_device(&fep->pdev->dev,
						   fec32_to_cpu(bdp->cbd_bufaddr),
1494 1495 1496 1497 1498
						   FEC_ENET_RX_FRSIZE - fep->rx_align,
						   DMA_FROM_DEVICE);
		} else {
			rxq->rx_skbuff[index] = skb_new;
			fec_enet_new_rxbdp(ndev, bdp, skb_new);
S
Sascha Hauer 已提交
1499
		}
S
Sascha Hauer 已提交
1500

S
Sascha Hauer 已提交
1501 1502 1503
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;
L
Linus Torvalds 已提交
1504

S
Sascha Hauer 已提交
1505 1506
		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
1507

1508 1509 1510
		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

1511
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1512 1513 1514
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}
1515 1516 1517 1518 1519
		/* Make sure the updates to rest of the descriptor are
		 * performed before transferring ownership.
		 */
		wmb();
		bdp->cbd_sc = cpu_to_fec16(status);
1520

S
Sascha Hauer 已提交
1521
		/* Update BD pointer to next entry */
T
Troy Kisky 已提交
1522
		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1523

S
Sascha Hauer 已提交
1524 1525 1526 1527
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
1528
		writel(0, rxq->bd.reg_desc_active);
S
Sascha Hauer 已提交
1529
	}
T
Troy Kisky 已提交
1530
	rxq->bd.cur = bdp;
1531 1532
	return pkt_received;
}
L
Linus Torvalds 已提交
1533

1534 1535 1536 1537 1538 1539 1540 1541
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	int     pkt_received = 0;
	u16	queue_id;
	struct fec_enet_private *fep = netdev_priv(ndev);

	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1542 1543 1544
		int ret;

		ret = fec_enet_rx_queue(ndev,
1545
					budget - pkt_received, queue_id);
1546 1547 1548 1549 1550

		if (ret < budget - pkt_received)
			clear_bit(queue_id, &fep->work_rx);

		pkt_received += ret;
1551
	}
1552
	return pkt_received;
L
Linus Torvalds 已提交
1553 1554
}

1555 1556 1557 1558 1559 1560 1561 1562
static bool
fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
{
	if (int_events == 0)
		return false;

	if (int_events & FEC_ENET_RXF)
		fep->work_rx |= (1 << 2);
F
Frank Li 已提交
1563 1564 1565 1566
	if (int_events & FEC_ENET_RXF_1)
		fep->work_rx |= (1 << 0);
	if (int_events & FEC_ENET_RXF_2)
		fep->work_rx |= (1 << 1);
1567 1568 1569

	if (int_events & FEC_ENET_TXF)
		fep->work_tx |= (1 << 2);
F
Frank Li 已提交
1570 1571 1572 1573
	if (int_events & FEC_ENET_TXF_1)
		fep->work_tx |= (1 << 0);
	if (int_events & FEC_ENET_TXF_2)
		fep->work_tx |= (1 << 1);
1574 1575 1576 1577

	return true;
}

1578 1579 1580 1581 1582 1583 1584 1585
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

1586
	int_events = readl(fep->hwp + FEC_IEVENT);
N
Nimrod Andy 已提交
1587
	writel(int_events, fep->hwp + FEC_IEVENT);
1588
	fec_enet_collect_events(fep, int_events);
1589

1590
	if ((fep->work_tx || fep->work_rx) && fep->link) {
1591
		ret = IRQ_HANDLED;
1592

N
Nimrod Andy 已提交
1593 1594
		if (napi_schedule_prep(&fep->napi)) {
			/* Disable the NAPI interrupts */
1595
			writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
N
Nimrod Andy 已提交
1596 1597
			__napi_schedule(&fep->napi);
		}
1598
	}
1599

1600 1601 1602 1603
	if (int_events & FEC_ENET_MII) {
		ret = IRQ_HANDLED;
		complete(&fep->mdio_done);
	}
1604

1605 1606
	if (fep->ptp_clock)
		fec_ptp_check_pps_event(fep);
1607

1608 1609 1610
	return ret;
}

1611 1612 1613 1614
static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct fec_enet_private *fep = netdev_priv(ndev);
1615 1616 1617
	int pkts;

	pkts = fec_enet_rx(ndev, budget);
1618

1619 1620
	fec_enet_tx(ndev);

1621
	if (pkts < budget) {
1622
		napi_complete_done(napi, pkts);
1623 1624 1625 1626
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
	}
	return pkts;
}
1627

1628
/* ------------------------------------------------------------------------- */
1629
static void fec_get_mac(struct net_device *ndev)
L
Linus Torvalds 已提交
1630
{
1631
	struct fec_enet_private *fep = netdev_priv(ndev);
J
Jingoo Han 已提交
1632
	struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1633
	unsigned char *iap, tmpaddr[ETH_ALEN];
L
Linus Torvalds 已提交
1634

1635 1636 1637 1638 1639 1640 1641 1642
	/*
	 * try to get mac address in following order:
	 *
	 * 1) module parameter via kernel command line in form
	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
	 */
	iap = macaddr;

1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
	/*
	 * 2) from device tree data
	 */
	if (!is_valid_ether_addr(iap)) {
		struct device_node *np = fep->pdev->dev.of_node;
		if (np) {
			const char *mac = of_get_mac_address(np);
			if (mac)
				iap = (unsigned char *) mac;
		}
	}

1655
	/*
1656
	 * 3) from flash or fuse (via platform data)
1657 1658 1659 1660 1661 1662 1663
	 */
	if (!is_valid_ether_addr(iap)) {
#ifdef CONFIG_M5272
		if (FEC_FLASHMAC)
			iap = (unsigned char *)FEC_FLASHMAC;
#else
		if (pdata)
1664
			iap = (unsigned char *)&pdata->mac;
1665 1666 1667 1668
#endif
	}

	/*
1669
	 * 4) FEC mac registers set by bootloader
1670 1671
	 */
	if (!is_valid_ether_addr(iap)) {
1672 1673 1674 1675
		*((__be32 *) &tmpaddr[0]) =
			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
		*((__be16 *) &tmpaddr[4]) =
			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1676
		iap = &tmpaddr[0];
L
Linus Torvalds 已提交
1677 1678
	}

1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	/*
	 * 5) random mac address
	 */
	if (!is_valid_ether_addr(iap)) {
		/* Report it and use a random ethernet address instead */
		netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
		eth_hw_addr_random(ndev);
		netdev_info(ndev, "Using random MAC address: %pM\n",
			    ndev->dev_addr);
		return;
	}

1691
	memcpy(ndev->dev_addr, iap, ETH_ALEN);
L
Linus Torvalds 已提交
1692

1693 1694
	/* Adjust MAC if using macaddr */
	if (iap == macaddr)
S
Shawn Guo 已提交
1695
		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
L
Linus Torvalds 已提交
1696 1697
}

1698
/* ------------------------------------------------------------------------- */
L
Linus Torvalds 已提交
1699

1700 1701 1702
/*
 * Phy section
 */
1703
static void fec_enet_adjust_link(struct net_device *ndev)
L
Linus Torvalds 已提交
1704
{
1705
	struct fec_enet_private *fep = netdev_priv(ndev);
1706
	struct phy_device *phy_dev = ndev->phydev;
1707
	int status_change = 0;
L
Linus Torvalds 已提交
1708

1709 1710 1711
	/* Prevent a state halted on mii error */
	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
		phy_dev->state = PHY_RESUMING;
1712
		return;
1713
	}
L
Linus Torvalds 已提交
1714

1715 1716 1717 1718 1719 1720 1721 1722
	/*
	 * If the netdev is down, or is going down, we're not interested
	 * in link state events, so just mark our idea of the link as down
	 * and ignore the event.
	 */
	if (!netif_running(ndev) || !netif_device_present(ndev)) {
		fep->link = 0;
	} else if (phy_dev->link) {
1723
		if (!fep->link) {
1724
			fep->link = phy_dev->link;
1725 1726
			status_change = 1;
		}
L
Linus Torvalds 已提交
1727

1728 1729
		if (fep->full_duplex != phy_dev->duplex) {
			fep->full_duplex = phy_dev->duplex;
1730
			status_change = 1;
1731
		}
1732 1733 1734 1735 1736 1737 1738

		if (phy_dev->speed != fep->speed) {
			fep->speed = phy_dev->speed;
			status_change = 1;
		}

		/* if any of the above changed restart the FEC */
1739 1740 1741
		if (status_change) {
			napi_disable(&fep->napi);
			netif_tx_lock_bh(ndev);
1742
			fec_restart(ndev);
1743
			netif_wake_queue(ndev);
1744
			netif_tx_unlock_bh(ndev);
1745 1746
			napi_enable(&fep->napi);
		}
1747 1748
	} else {
		if (fep->link) {
1749 1750
			napi_disable(&fep->napi);
			netif_tx_lock_bh(ndev);
1751
			fec_stop(ndev);
1752 1753
			netif_tx_unlock_bh(ndev);
			napi_enable(&fep->napi);
1754
			fep->link = phy_dev->link;
1755 1756
			status_change = 1;
		}
L
Linus Torvalds 已提交
1757
	}
1758

1759 1760 1761
	if (status_change)
		phy_print_status(phy_dev);
}
L
Linus Torvalds 已提交
1762

1763
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
L
Linus Torvalds 已提交
1764
{
1765
	struct fec_enet_private *fep = bus->priv;
1766
	struct device *dev = &fep->pdev->dev;
1767
	unsigned long time_left;
1768 1769 1770
	int ret = 0;

	ret = pm_runtime_get_sync(dev);
1771
	if (ret < 0)
1772
		return ret;
L
Linus Torvalds 已提交
1773

1774
	fep->mii_timeout = 0;
1775
	reinit_completion(&fep->mdio_done);
1776 1777 1778 1779 1780 1781 1782

	/* start a read op */
	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);

	/* wait for end of transfer */
1783 1784 1785 1786
	time_left = wait_for_completion_timeout(&fep->mdio_done,
			usecs_to_jiffies(FEC_MII_TIMEOUT));
	if (time_left == 0) {
		fep->mii_timeout = 1;
1787
		netdev_err(fep->netdev, "MDIO read timeout\n");
1788 1789
		ret = -ETIMEDOUT;
		goto out;
L
Linus Torvalds 已提交
1790 1791
	}

1792 1793 1794 1795 1796 1797 1798
	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));

out:
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return ret;
1799
}
1800

1801 1802
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
L
Linus Torvalds 已提交
1803
{
1804
	struct fec_enet_private *fep = bus->priv;
1805
	struct device *dev = &fep->pdev->dev;
1806
	unsigned long time_left;
1807
	int ret;
1808 1809

	ret = pm_runtime_get_sync(dev);
1810
	if (ret < 0)
1811
		return ret;
1812 1813
	else
		ret = 0;
L
Linus Torvalds 已提交
1814

1815
	fep->mii_timeout = 0;
1816
	reinit_completion(&fep->mdio_done);
L
Linus Torvalds 已提交
1817

1818 1819
	/* start a write op */
	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1820 1821 1822 1823 1824
		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
		FEC_MMFR_TA | FEC_MMFR_DATA(value),
		fep->hwp + FEC_MII_DATA);

	/* wait for end of transfer */
1825 1826 1827 1828
	time_left = wait_for_completion_timeout(&fep->mdio_done,
			usecs_to_jiffies(FEC_MII_TIMEOUT));
	if (time_left == 0) {
		fep->mii_timeout = 1;
1829
		netdev_err(fep->netdev, "MDIO write timeout\n");
1830
		ret  = -ETIMEDOUT;
1831
	}
L
Linus Torvalds 已提交
1832

1833 1834 1835 1836
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return ret;
1837
}
L
Linus Torvalds 已提交
1838

1839 1840 1841 1842 1843 1844 1845 1846 1847
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int ret;

	if (enable) {
		ret = clk_prepare_enable(fep->clk_ahb);
		if (ret)
			return ret;
1848 1849 1850 1851 1852

		ret = clk_prepare_enable(fep->clk_enet_out);
		if (ret)
			goto failed_clk_enet_out;

1853
		if (fep->clk_ptp) {
1854
			mutex_lock(&fep->ptp_clk_mutex);
1855
			ret = clk_prepare_enable(fep->clk_ptp);
1856 1857
			if (ret) {
				mutex_unlock(&fep->ptp_clk_mutex);
1858
				goto failed_clk_ptp;
1859 1860 1861 1862
			} else {
				fep->ptp_clk_on = true;
			}
			mutex_unlock(&fep->ptp_clk_mutex);
1863
		}
1864 1865 1866 1867

		ret = clk_prepare_enable(fep->clk_ref);
		if (ret)
			goto failed_clk_ref;
1868 1869
	} else {
		clk_disable_unprepare(fep->clk_ahb);
1870
		clk_disable_unprepare(fep->clk_enet_out);
1871 1872
		if (fep->clk_ptp) {
			mutex_lock(&fep->ptp_clk_mutex);
1873
			clk_disable_unprepare(fep->clk_ptp);
1874 1875 1876
			fep->ptp_clk_on = false;
			mutex_unlock(&fep->ptp_clk_mutex);
		}
1877
		clk_disable_unprepare(fep->clk_ref);
1878 1879 1880
	}

	return 0;
1881 1882 1883 1884

failed_clk_ref:
	if (fep->clk_ref)
		clk_disable_unprepare(fep->clk_ref);
1885 1886 1887 1888 1889 1890 1891 1892 1893
failed_clk_ptp:
	if (fep->clk_enet_out)
		clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
		clk_disable_unprepare(fep->clk_ahb);

	return ret;
}

1894
static int fec_enet_mii_probe(struct net_device *ndev)
1895
{
1896
	struct fec_enet_private *fep = netdev_priv(ndev);
1897
	struct phy_device *phy_dev = NULL;
1898 1899 1900
	char mdio_bus_id[MII_BUS_ID_SIZE];
	char phy_name[MII_BUS_ID_SIZE + 3];
	int phy_id;
S
Shawn Guo 已提交
1901
	int dev_id = fep->dev_id;
1902

1903 1904 1905 1906
	if (fep->phy_node) {
		phy_dev = of_phy_connect(ndev, fep->phy_node,
					 &fec_enet_adjust_link, 0,
					 fep->phy_interface);
1907 1908
		if (!phy_dev) {
			netdev_err(ndev, "Unable to connect to phy\n");
1909
			return -ENODEV;
1910
		}
1911 1912 1913
	} else {
		/* check for attached phy */
		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1914
			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1915 1916 1917
				continue;
			if (dev_id--)
				continue;
1918
			strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1919 1920
			break;
		}
L
Linus Torvalds 已提交
1921

1922 1923
		if (phy_id >= PHY_MAX_ADDR) {
			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1924
			strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1925 1926 1927 1928 1929 1930 1931
			phy_id = 0;
		}

		snprintf(phy_name, sizeof(phy_name),
			 PHY_ID_FMT, mdio_bus_id, phy_id);
		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
				      fep->phy_interface);
1932 1933 1934
	}

	if (IS_ERR(phy_dev)) {
1935
		netdev_err(ndev, "could not attach to PHY\n");
1936
		return PTR_ERR(phy_dev);
1937
	}
L
Linus Torvalds 已提交
1938

1939
	/* mask with MAC supported features */
1940
	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
S
Shawn Guo 已提交
1941
		phy_dev->supported &= PHY_GBIT_FEATURES;
1942
		phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
G
Guenter Roeck 已提交
1943
#if !defined(CONFIG_M5272)
1944
		phy_dev->supported |= SUPPORTED_Pause;
G
Guenter Roeck 已提交
1945
#endif
1946
	}
S
Shawn Guo 已提交
1947 1948 1949
	else
		phy_dev->supported &= PHY_BASIC_FEATURES;

1950
	phy_dev->advertising = phy_dev->supported;
L
Linus Torvalds 已提交
1951

1952 1953
	fep->link = 0;
	fep->full_duplex = 0;
L
Linus Torvalds 已提交
1954

1955
	phy_attached_info(phy_dev);
1956

1957
	return 0;
L
Linus Torvalds 已提交
1958 1959
}

1960
static int fec_enet_mii_init(struct platform_device *pdev)
1961
{
1962
	static struct mii_bus *fec0_mii_bus;
1963 1964
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct fec_enet_private *fep = netdev_priv(ndev);
1965
	struct device_node *node;
1966
	int err = -ENXIO;
1967
	u32 mii_speed, holdtime;
1968

1969
	/*
1970
	 * The i.MX28 dual fec interfaces are not equal.
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
	 * Here are the differences:
	 *
	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
	 *  - fec0 acts as the 1588 time master while fec1 is slave
	 *  - external phys can only be configured by fec0
	 *
	 * That is to say fec1 can not work independently. It only works
	 * when fec0 is working. The reason behind this design is that the
	 * second interface is added primarily for Switch mode.
	 *
	 * Because of the last point above, both phys are attached on fec0
	 * mdio interface in board design, and need to be configured by
	 * fec0 mii_bus.
	 */
1985
	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1986
		/* fec1 uses fec0 mii_bus */
L
Lothar Waßmann 已提交
1987 1988 1989 1990 1991 1992
		if (mii_cnt && fec0_mii_bus) {
			fep->mii_bus = fec0_mii_bus;
			mii_cnt++;
			return 0;
		}
		return -ENOENT;
1993 1994
	}

1995
	fep->mii_timeout = 0;
L
Linus Torvalds 已提交
1996

1997 1998
	/*
	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
S
Shawn Guo 已提交
1999 2000 2001 2002 2003
	 *
	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
	 * document.
2004
	 */
2005
	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2006
	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2007 2008 2009
		mii_speed--;
	if (mii_speed > 63) {
		dev_err(&pdev->dev,
2010
			"fec clock (%lu) too fast to get right mii speed\n",
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
			clk_get_rate(fep->clk_ipg));
		err = -EINVAL;
		goto err_out;
	}

	/*
	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
	 * versions are RAZ there, so just ignore the difference and write the
	 * register always.
	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
	 * output.
	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
	 * holdtime cannot result in a value greater than 3.
	 */
	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;

	fep->phy_speed = mii_speed << 1 | holdtime << 8;

2032
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
L
Linus Torvalds 已提交
2033

2034 2035 2036 2037
	fep->mii_bus = mdiobus_alloc();
	if (fep->mii_bus == NULL) {
		err = -ENOMEM;
		goto err_out;
L
Linus Torvalds 已提交
2038 2039
	}

2040 2041 2042
	fep->mii_bus->name = "fec_enet_mii_bus";
	fep->mii_bus->read = fec_enet_mdio_read;
	fep->mii_bus->write = fec_enet_mdio_write;
2043 2044
	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
		pdev->name, fep->dev_id + 1);
2045 2046 2047
	fep->mii_bus->priv = fep;
	fep->mii_bus->parent = &pdev->dev;

2048 2049 2050 2051 2052 2053 2054 2055 2056
	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
	if (node) {
		err = of_mdiobus_register(fep->mii_bus, node);
		of_node_put(node);
	} else {
		err = mdiobus_register(fep->mii_bus);
	}

	if (err)
2057
		goto err_out_free_mdiobus;
L
Linus Torvalds 已提交
2058

L
Lothar Waßmann 已提交
2059 2060
	mii_cnt++;

2061
	/* save fec0 mii_bus */
2062
	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2063 2064
		fec0_mii_bus = fep->mii_bus;

2065
	return 0;
L
Linus Torvalds 已提交
2066

2067 2068 2069 2070
err_out_free_mdiobus:
	mdiobus_free(fep->mii_bus);
err_out:
	return err;
L
Linus Torvalds 已提交
2071 2072
}

2073
static void fec_enet_mii_remove(struct fec_enet_private *fep)
L
Linus Torvalds 已提交
2074
{
L
Lothar Waßmann 已提交
2075 2076 2077 2078
	if (--mii_cnt == 0) {
		mdiobus_unregister(fep->mii_bus);
		mdiobus_free(fep->mii_bus);
	}
L
Linus Torvalds 已提交
2079 2080
}

2081
static void fec_enet_get_drvinfo(struct net_device *ndev,
2082
				 struct ethtool_drvinfo *info)
L
Linus Torvalds 已提交
2083
{
2084
	struct fec_enet_private *fep = netdev_priv(ndev);
2085

2086 2087 2088 2089
	strlcpy(info->driver, fep->pdev->dev.driver->name,
		sizeof(info->driver));
	strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
L
Linus Torvalds 已提交
2090 2091
}

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
static int fec_enet_get_regs_len(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct resource *r;
	int s = 0;

	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
	if (r)
		s = resource_size(r);

	return s;
}

/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2107
	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
static u32 fec_enet_register_offset[] = {
	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
	RMON_T_P_GTE2048, RMON_T_OCTETS,
	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
	RMON_R_P_GTE2048, RMON_R_OCTETS,
	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
#else
static u32 fec_enet_register_offset[] = {
	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
};
#endif

static void fec_enet_get_regs(struct net_device *ndev,
			      struct ethtool_regs *regs, void *regbuf)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
	u32 *buf = (u32 *)regbuf;
	u32 i, off;

	memset(buf, 0, regs->len);

	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
		off = fec_enet_register_offset[i] / 4;
		buf[off] = readl(&theregs[off]);
	}
}

2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
static int fec_enet_get_ts_info(struct net_device *ndev,
				struct ethtool_ts_info *info)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (fep->bufdesc_ex) {

		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
					SOF_TIMESTAMPING_RX_SOFTWARE |
					SOF_TIMESTAMPING_SOFTWARE |
					SOF_TIMESTAMPING_TX_HARDWARE |
					SOF_TIMESTAMPING_RX_HARDWARE |
					SOF_TIMESTAMPING_RAW_HARDWARE;
		if (fep->ptp_clock)
			info->phc_index = ptp_clock_index(fep->ptp_clock);
		else
			info->phc_index = -1;

		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
				 (1 << HWTSTAMP_TX_ON);

		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
				   (1 << HWTSTAMP_FILTER_ALL);
		return 0;
	} else {
		return ethtool_op_get_ts_info(ndev, info);
	}
}

G
Guenter Roeck 已提交
2196 2197
#if !defined(CONFIG_M5272)

2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
static void fec_enet_get_pauseparam(struct net_device *ndev,
				    struct ethtool_pauseparam *pause)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
	pause->rx_pause = pause->tx_pause;
}

static int fec_enet_set_pauseparam(struct net_device *ndev,
				   struct ethtool_pauseparam *pause)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

2213
	if (!ndev->phydev)
2214 2215
		return -ENODEV;

2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
	if (pause->tx_pause != pause->rx_pause) {
		netdev_info(ndev,
			"hardware only support enable/disable both tx and rx");
		return -EINVAL;
	}

	fep->pause_flag = 0;

	/* tx pause must be same as rx pause */
	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;

	if (pause->rx_pause || pause->autoneg) {
2229 2230
		ndev->phydev->supported |= ADVERTISED_Pause;
		ndev->phydev->advertising |= ADVERTISED_Pause;
2231
	} else {
2232 2233
		ndev->phydev->supported &= ~ADVERTISED_Pause;
		ndev->phydev->advertising &= ~ADVERTISED_Pause;
2234 2235 2236 2237 2238
	}

	if (pause->autoneg) {
		if (netif_running(ndev))
			fec_stop(ndev);
2239
		phy_start_aneg(ndev->phydev);
2240
	}
2241 2242 2243
	if (netif_running(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
2244
		fec_restart(ndev);
2245
		netif_wake_queue(ndev);
2246
		netif_tx_unlock_bh(ndev);
2247 2248
		napi_enable(&fep->napi);
	}
2249 2250 2251 2252

	return 0;
}

2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
static const struct fec_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} fec_stats[] = {
	/* RMON TX */
	{ "tx_dropped", RMON_T_DROP },
	{ "tx_packets", RMON_T_PACKETS },
	{ "tx_broadcast", RMON_T_BC_PKT },
	{ "tx_multicast", RMON_T_MC_PKT },
	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
	{ "tx_undersize", RMON_T_UNDERSIZE },
	{ "tx_oversize", RMON_T_OVERSIZE },
	{ "tx_fragment", RMON_T_FRAG },
	{ "tx_jabber", RMON_T_JAB },
	{ "tx_collision", RMON_T_COL },
	{ "tx_64byte", RMON_T_P64 },
	{ "tx_65to127byte", RMON_T_P65TO127 },
	{ "tx_128to255byte", RMON_T_P128TO255 },
	{ "tx_256to511byte", RMON_T_P256TO511 },
	{ "tx_512to1023byte", RMON_T_P512TO1023 },
	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
	{ "tx_octets", RMON_T_OCTETS },

	/* IEEE TX */
	{ "IEEE_tx_drop", IEEE_T_DROP },
	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
	{ "IEEE_tx_1col", IEEE_T_1COL },
	{ "IEEE_tx_mcol", IEEE_T_MCOL },
	{ "IEEE_tx_def", IEEE_T_DEF },
	{ "IEEE_tx_lcol", IEEE_T_LCOL },
	{ "IEEE_tx_excol", IEEE_T_EXCOL },
	{ "IEEE_tx_macerr", IEEE_T_MACERR },
	{ "IEEE_tx_cserr", IEEE_T_CSERR },
	{ "IEEE_tx_sqe", IEEE_T_SQE },
	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },

	/* RMON RX */
	{ "rx_packets", RMON_R_PACKETS },
	{ "rx_broadcast", RMON_R_BC_PKT },
	{ "rx_multicast", RMON_R_MC_PKT },
	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
	{ "rx_undersize", RMON_R_UNDERSIZE },
	{ "rx_oversize", RMON_R_OVERSIZE },
	{ "rx_fragment", RMON_R_FRAG },
	{ "rx_jabber", RMON_R_JAB },
	{ "rx_64byte", RMON_R_P64 },
	{ "rx_65to127byte", RMON_R_P65TO127 },
	{ "rx_128to255byte", RMON_R_P128TO255 },
	{ "rx_256to511byte", RMON_R_P256TO511 },
	{ "rx_512to1023byte", RMON_R_P512TO1023 },
	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
	{ "rx_octets", RMON_R_OCTETS },

	/* IEEE RX */
	{ "IEEE_rx_drop", IEEE_R_DROP },
	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
	{ "IEEE_rx_crc", IEEE_R_CRC },
	{ "IEEE_rx_align", IEEE_R_ALIGN },
	{ "IEEE_rx_macerr", IEEE_R_MACERR },
	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};

2319 2320
#define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))

2321
static void fec_enet_update_ethtool_stats(struct net_device *dev)
2322 2323 2324 2325 2326
{
	struct fec_enet_private *fep = netdev_priv(dev);
	int i;

	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
}

static void fec_enet_get_ethtool_stats(struct net_device *dev,
				       struct ethtool_stats *stats, u64 *data)
{
	struct fec_enet_private *fep = netdev_priv(dev);

	if (netif_running(dev))
		fec_enet_update_ethtool_stats(dev);

2338
	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
}

static void fec_enet_get_strings(struct net_device *netdev,
	u32 stringset, u8 *data)
{
	int i;
	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
				fec_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(fec_stats);
	default:
		return -EOPNOTSUPP;
	}
}
2363

2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378
static void fec_enet_clear_ethtool_stats(struct net_device *dev)
{
	struct fec_enet_private *fep = netdev_priv(dev);
	int i;

	/* Disable MIB statistics counters */
	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);

	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
		writel(0, fep->hwp + fec_stats[i].offset);

	/* Don't disable MIB statistics counters */
	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
}

2379 2380 2381 2382 2383
#else	/* !defined(CONFIG_M5272) */
#define FEC_STATS_SIZE	0
static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
{
}
2384 2385 2386 2387

static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
{
}
G
Guenter Roeck 已提交
2388
#endif /* !defined(CONFIG_M5272) */
2389

2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
/* ITR clock source is enet system clock (clk_ahb).
 * TCTT unit is cycle_ns * 64 cycle
 * So, the ICTT value = X us / (cycle_ns * 64)
 */
static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	return us * (fep->itr_clk_rate / 64000) / 1000;
}

/* Set threshold for interrupt coalescing */
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int rx_itr, tx_itr;

	/* Must be greater than zero to avoid unpredictable behavior */
	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
	    !fep->tx_time_itr || !fep->tx_pkts_itr)
		return;

	/* Select enet system clock as Interrupt Coalescing
	 * timer Clock Source
	 */
	rx_itr = FEC_ITR_CLK_SEL;
	tx_itr = FEC_ITR_CLK_SEL;

	/* set ICFT and ICTT */
	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));

	rx_itr |= FEC_ITR_EN;
	tx_itr |= FEC_ITR_EN;

	writel(tx_itr, fep->hwp + FEC_TXIC0);
	writel(rx_itr, fep->hwp + FEC_RXIC0);
2429 2430 2431 2432 2433 2434
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
		writel(tx_itr, fep->hwp + FEC_TXIC1);
		writel(rx_itr, fep->hwp + FEC_RXIC1);
		writel(tx_itr, fep->hwp + FEC_TXIC2);
		writel(rx_itr, fep->hwp + FEC_RXIC2);
	}
2435 2436 2437 2438 2439 2440 2441
}

static int
fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

2442
	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
		return -EOPNOTSUPP;

	ec->rx_coalesce_usecs = fep->rx_time_itr;
	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;

	ec->tx_coalesce_usecs = fep->tx_time_itr;
	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;

	return 0;
}

static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int cycle;

2460
	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2461 2462 2463
		return -EOPNOTSUPP;

	if (ec->rx_max_coalesced_frames > 255) {
2464
		pr_err("Rx coalesced frames exceed hardware limitation\n");
2465 2466 2467 2468
		return -EINVAL;
	}

	if (ec->tx_max_coalesced_frames > 255) {
2469
		pr_err("Tx coalesced frame exceed hardware limitation\n");
2470 2471 2472 2473 2474
		return -EINVAL;
	}

	cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
	if (cycle > 0xFFFF) {
2475
		pr_err("Rx coalesced usec exceed hardware limitation\n");
2476 2477 2478 2479 2480
		return -EINVAL;
	}

	cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
	if (cycle > 0xFFFF) {
2481
		pr_err("Rx coalesced usec exceed hardware limitation\n");
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508
		return -EINVAL;
	}

	fep->rx_time_itr = ec->rx_coalesce_usecs;
	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;

	fep->tx_time_itr = ec->tx_coalesce_usecs;
	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;

	fec_enet_itr_coal_set(ndev);

	return 0;
}

static void fec_enet_itr_coal_init(struct net_device *ndev)
{
	struct ethtool_coalesce ec;

	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;

	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;

	fec_enet_set_coalesce(ndev, &ec);
}

2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
static int fec_enet_get_tunable(struct net_device *netdev,
				const struct ethtool_tunable *tuna,
				void *data)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		*(u32 *)data = fep->rx_copybreak;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int fec_enet_set_tunable(struct net_device *netdev,
				const struct ethtool_tunable *tuna,
				const void *data)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		fep->rx_copybreak = *(u32 *)data;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

N
Nimrod Andy 已提交
2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
static void
fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
		wol->supported = WAKE_MAGIC;
		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
	} else {
		wol->supported = wol->wolopts = 0;
	}
}

static int
fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
		return -EINVAL;

	if (wol->wolopts & ~WAKE_MAGIC)
		return -EINVAL;

	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
	if (device_may_wakeup(&ndev->dev)) {
		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
		if (fep->irq[0] > 0)
			enable_irq_wake(fep->irq[0]);
	} else {
		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
		if (fep->irq[0] > 0)
			disable_irq_wake(fep->irq[0]);
	}

	return 0;
}

S
stephen hemminger 已提交
2585
static const struct ethtool_ops fec_enet_ethtool_ops = {
2586
	.get_drvinfo		= fec_enet_get_drvinfo,
2587 2588
	.get_regs_len		= fec_enet_get_regs_len,
	.get_regs		= fec_enet_get_regs,
2589
	.nway_reset		= phy_ethtool_nway_reset,
2590
	.get_link		= ethtool_op_get_link,
2591 2592
	.get_coalesce		= fec_enet_get_coalesce,
	.set_coalesce		= fec_enet_set_coalesce,
2593
#ifndef CONFIG_M5272
2594 2595
	.get_pauseparam		= fec_enet_get_pauseparam,
	.set_pauseparam		= fec_enet_set_pauseparam,
2596
	.get_strings		= fec_enet_get_strings,
2597
	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
2598 2599
	.get_sset_count		= fec_enet_get_sset_count,
#endif
2600
	.get_ts_info		= fec_enet_get_ts_info,
2601 2602
	.get_tunable		= fec_enet_get_tunable,
	.set_tunable		= fec_enet_set_tunable,
N
Nimrod Andy 已提交
2603 2604
	.get_wol		= fec_enet_get_wol,
	.set_wol		= fec_enet_set_wol,
2605 2606
	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2607
};
L
Linus Torvalds 已提交
2608

2609
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
L
Linus Torvalds 已提交
2610
{
2611
	struct fec_enet_private *fep = netdev_priv(ndev);
2612
	struct phy_device *phydev = ndev->phydev;
L
Linus Torvalds 已提交
2613

2614
	if (!netif_running(ndev))
2615
		return -EINVAL;
L
Linus Torvalds 已提交
2616

2617 2618 2619
	if (!phydev)
		return -ENODEV;

2620 2621 2622 2623 2624 2625
	if (fep->bufdesc_ex) {
		if (cmd == SIOCSHWTSTAMP)
			return fec_ptp_set(ndev, rq);
		if (cmd == SIOCGHWTSTAMP)
			return fec_ptp_get(ndev, rq);
	}
2626

2627
	return phy_mii_ioctl(phydev, rq, cmd);
L
Linus Torvalds 已提交
2628 2629
}

2630
static void fec_enet_free_buffers(struct net_device *ndev)
S
Sascha Hauer 已提交
2631
{
2632
	struct fec_enet_private *fep = netdev_priv(ndev);
2633
	unsigned int i;
S
Sascha Hauer 已提交
2634 2635
	struct sk_buff *skb;
	struct bufdesc	*bdp;
2636 2637
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
2638 2639 2640 2641
	unsigned int q;

	for (q = 0; q < fep->num_rx_queues; q++) {
		rxq = fep->rx_queue[q];
T
Troy Kisky 已提交
2642 2643
		bdp = rxq->bd.base;
		for (i = 0; i < rxq->bd.ring_size; i++) {
2644 2645 2646 2647
			skb = rxq->rx_skbuff[i];
			rxq->rx_skbuff[i] = NULL;
			if (skb) {
				dma_unmap_single(&fep->pdev->dev,
2648
						 fec32_to_cpu(bdp->cbd_bufaddr),
2649
						 FEC_ENET_RX_FRSIZE - fep->rx_align,
2650 2651 2652
						 DMA_FROM_DEVICE);
				dev_kfree_skb(skb);
			}
T
Troy Kisky 已提交
2653
			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2654 2655
		}
	}
2656

2657 2658
	for (q = 0; q < fep->num_tx_queues; q++) {
		txq = fep->tx_queue[q];
T
Troy Kisky 已提交
2659 2660
		bdp = txq->bd.base;
		for (i = 0; i < txq->bd.ring_size; i++) {
2661 2662 2663 2664
			kfree(txq->tx_bounce[i]);
			txq->tx_bounce[i] = NULL;
			skb = txq->tx_skbuff[i];
			txq->tx_skbuff[i] = NULL;
S
Sascha Hauer 已提交
2665
			dev_kfree_skb(skb);
2666
		}
S
Sascha Hauer 已提交
2667
	}
2668
}
S
Sascha Hauer 已提交
2669

2670 2671 2672 2673 2674 2675 2676 2677 2678
static void fec_enet_free_queue(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;
	struct fec_enet_priv_tx_q *txq;

	for (i = 0; i < fep->num_tx_queues; i++)
		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
			txq = fep->tx_queue[i];
2679
			dma_free_coherent(&fep->pdev->dev,
T
Troy Kisky 已提交
2680
					  txq->bd.ring_size * TSO_HEADER_SIZE,
2681 2682 2683 2684 2685
					  txq->tso_hdrs,
					  txq->tso_hdrs_dma);
		}

	for (i = 0; i < fep->num_rx_queues; i++)
2686
		kfree(fep->rx_queue[i]);
2687
	for (i = 0; i < fep->num_tx_queues; i++)
2688
		kfree(fep->tx_queue[i]);
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
}

static int fec_enet_alloc_queue(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;
	int ret = 0;
	struct fec_enet_priv_tx_q *txq;

	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
		if (!txq) {
			ret = -ENOMEM;
			goto alloc_failed;
		}

		fep->tx_queue[i] = txq;
T
Troy Kisky 已提交
2706 2707
		txq->bd.ring_size = TX_RING_SIZE;
		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2708 2709 2710

		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
		txq->tx_wake_threshold =
T
Troy Kisky 已提交
2711
			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2712

2713
		txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
T
Troy Kisky 已提交
2714
					txq->bd.ring_size * TSO_HEADER_SIZE,
2715 2716 2717 2718 2719 2720
					&txq->tso_hdrs_dma,
					GFP_KERNEL);
		if (!txq->tso_hdrs) {
			ret = -ENOMEM;
			goto alloc_failed;
		}
2721
	}
2722 2723 2724 2725 2726 2727 2728 2729 2730

	for (i = 0; i < fep->num_rx_queues; i++) {
		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
					   GFP_KERNEL);
		if (!fep->rx_queue[i]) {
			ret = -ENOMEM;
			goto alloc_failed;
		}

T
Troy Kisky 已提交
2731 2732
		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2733 2734 2735 2736 2737 2738
	}
	return ret;

alloc_failed:
	fec_enet_free_queue(ndev);
	return ret;
S
Sascha Hauer 已提交
2739 2740
}

2741 2742
static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
S
Sascha Hauer 已提交
2743
{
2744
	struct fec_enet_private *fep = netdev_priv(ndev);
2745
	unsigned int i;
S
Sascha Hauer 已提交
2746 2747
	struct sk_buff *skb;
	struct bufdesc	*bdp;
2748
	struct fec_enet_priv_rx_q *rxq;
S
Sascha Hauer 已提交
2749

2750
	rxq = fep->rx_queue[queue];
T
Troy Kisky 已提交
2751 2752
	bdp = rxq->bd.base;
	for (i = 0; i < rxq->bd.ring_size; i++) {
2753
		skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2754 2755
		if (!skb)
			goto err_alloc;
S
Sascha Hauer 已提交
2756

2757
		if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2758
			dev_kfree_skb(skb);
2759
			goto err_alloc;
2760
		}
2761

2762
		rxq->rx_skbuff[i] = skb;
2763
		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2764 2765 2766

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2767
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2768 2769
		}

T
Troy Kisky 已提交
2770
		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
S
Sascha Hauer 已提交
2771 2772 2773
	}

	/* Set the last buffer to wrap. */
T
Troy Kisky 已提交
2774
	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2775
	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2776
	return 0;
S
Sascha Hauer 已提交
2777

2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
 err_alloc:
	fec_enet_free_buffers(ndev);
	return -ENOMEM;
}

static int
fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int i;
	struct bufdesc  *bdp;
	struct fec_enet_priv_tx_q *txq;

	txq = fep->tx_queue[queue];
T
Troy Kisky 已提交
2792 2793
	bdp = txq->bd.base;
	for (i = 0; i < txq->bd.ring_size; i++) {
2794 2795
		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
		if (!txq->tx_bounce[i])
2796
			goto err_alloc;
S
Sascha Hauer 已提交
2797

2798 2799
		bdp->cbd_sc = cpu_to_fec16(0);
		bdp->cbd_bufaddr = cpu_to_fec32(0);
2800

2801 2802
		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2803
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2804 2805
		}

T
Troy Kisky 已提交
2806
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
S
Sascha Hauer 已提交
2807 2808 2809
	}

	/* Set the last buffer to wrap. */
T
Troy Kisky 已提交
2810
	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2811
	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
S
Sascha Hauer 已提交
2812 2813

	return 0;
2814 2815 2816 2817

 err_alloc:
	fec_enet_free_buffers(ndev);
	return -ENOMEM;
S
Sascha Hauer 已提交
2818 2819
}

2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834
static int fec_enet_alloc_buffers(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int i;

	for (i = 0; i < fep->num_rx_queues; i++)
		if (fec_enet_alloc_rxq_buffers(ndev, i))
			return -ENOMEM;

	for (i = 0; i < fep->num_tx_queues; i++)
		if (fec_enet_alloc_txq_buffers(ndev, i))
			return -ENOMEM;
	return 0;
}

L
Linus Torvalds 已提交
2835
static int
2836
fec_enet_open(struct net_device *ndev)
L
Linus Torvalds 已提交
2837
{
2838
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
2839
	int ret;
L
Linus Torvalds 已提交
2840

2841
	ret = pm_runtime_get_sync(&fep->pdev->dev);
2842
	if (ret < 0)
2843 2844
		return ret;

N
Nimrod Andy 已提交
2845
	pinctrl_pm_select_default_state(&fep->pdev->dev);
2846 2847
	ret = fec_enet_clk_enable(ndev, true);
	if (ret)
2848
		goto clk_enable;
2849

L
Linus Torvalds 已提交
2850 2851 2852 2853
	/* I should reset the ring buffers here, but I don't yet know
	 * a simple way to do that.
	 */

2854
	ret = fec_enet_alloc_buffers(ndev);
S
Sascha Hauer 已提交
2855
	if (ret)
2856
		goto err_enet_alloc;
S
Sascha Hauer 已提交
2857

2858 2859 2860
	/* Init MAC prior to mii bus probe */
	fec_restart(ndev);

2861
	/* Probe and connect to PHY when open the interface */
2862
	ret = fec_enet_mii_probe(ndev);
2863 2864
	if (ret)
		goto err_enet_mii_probe;
2865

2866 2867 2868
	if (fep->quirks & FEC_QUIRK_ERR006687)
		imx6q_cpuidle_fec_irqs_used();

2869
	napi_enable(&fep->napi);
2870
	phy_start(ndev->phydev);
2871 2872
	netif_tx_start_all_queues(ndev);

N
Nimrod Andy 已提交
2873 2874 2875
	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
				 FEC_WOL_FLAG_ENABLE);

S
Sascha Hauer 已提交
2876
	return 0;
2877 2878 2879 2880 2881

err_enet_mii_probe:
	fec_enet_free_buffers(ndev);
err_enet_alloc:
	fec_enet_clk_enable(ndev, false);
2882 2883 2884
clk_enable:
	pm_runtime_mark_last_busy(&fep->pdev->dev);
	pm_runtime_put_autosuspend(&fep->pdev->dev);
2885 2886
	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
	return ret;
L
Linus Torvalds 已提交
2887 2888 2889
}

static int
2890
fec_enet_close(struct net_device *ndev)
L
Linus Torvalds 已提交
2891
{
2892
	struct fec_enet_private *fep = netdev_priv(ndev);
L
Linus Torvalds 已提交
2893

2894
	phy_stop(ndev->phydev);
2895

2896 2897 2898
	if (netif_device_present(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_disable(ndev);
2899
		fec_stop(ndev);
2900
	}
L
Linus Torvalds 已提交
2901

2902
	phy_disconnect(ndev->phydev);
2903

2904 2905 2906
	if (fep->quirks & FEC_QUIRK_ERR006687)
		imx6q_cpuidle_fec_irqs_unused();

2907 2908
	fec_enet_update_ethtool_stats(ndev);

2909
	fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
2910
	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2911 2912 2913
	pm_runtime_mark_last_busy(&fep->pdev->dev);
	pm_runtime_put_autosuspend(&fep->pdev->dev);

2914
	fec_enet_free_buffers(ndev);
S
Sascha Hauer 已提交
2915

L
Linus Torvalds 已提交
2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
	return 0;
}

/* Set or clear the multicast filter for this adaptor.
 * Skeleton taken from sunlance driver.
 * The CPM Ethernet implementation allows Multicast as well as individual
 * MAC address filtering.  Some of the drivers check to make sure it is
 * a group multicast address, and discard those that are not.  I guess I
 * will do the same for now, but just remove the test if you want
 * individual filtering as well (do the upper net layers want or support
 * this kind of feature?).
 */

2929
#define FEC_HASH_BITS	6		/* #bits in hash */
L
Linus Torvalds 已提交
2930 2931
#define CRC32_POLY	0xEDB88320

2932
static void set_multicast_list(struct net_device *ndev)
L
Linus Torvalds 已提交
2933
{
2934
	struct fec_enet_private *fep = netdev_priv(ndev);
2935
	struct netdev_hw_addr *ha;
2936
	unsigned int i, bit, data, crc, tmp;
L
Linus Torvalds 已提交
2937
	unsigned char hash;
2938
	unsigned int hash_high = 0, hash_low = 0;
L
Linus Torvalds 已提交
2939

2940
	if (ndev->flags & IFF_PROMISC) {
S
Sascha Hauer 已提交
2941 2942 2943
		tmp = readl(fep->hwp + FEC_R_CNTRL);
		tmp |= 0x8;
		writel(tmp, fep->hwp + FEC_R_CNTRL);
2944 2945
		return;
	}
L
Linus Torvalds 已提交
2946

2947 2948 2949 2950
	tmp = readl(fep->hwp + FEC_R_CNTRL);
	tmp &= ~0x8;
	writel(tmp, fep->hwp + FEC_R_CNTRL);

2951
	if (ndev->flags & IFF_ALLMULTI) {
2952 2953 2954 2955 2956 2957 2958 2959 2960
		/* Catch all multicast addresses, so set the
		 * filter to all 1's
		 */
		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);

		return;
	}

2961
	/* Add the addresses in hash register */
2962
	netdev_for_each_mc_addr(ha, ndev) {
2963 2964 2965
		/* calculate crc32 value of mac address */
		crc = 0xffffffff;

2966
		for (i = 0; i < ndev->addr_len; i++) {
2967
			data = ha->addr[i];
2968 2969 2970
			for (bit = 0; bit < 8; bit++, data >>= 1) {
				crc = (crc >> 1) ^
				(((crc ^ data) & 1) ? CRC32_POLY : 0);
L
Linus Torvalds 已提交
2971 2972
			}
		}
2973

2974
		/* only upper 6 bits (FEC_HASH_BITS) are used
2975
		 * which point to specific bit in the hash registers
2976
		 */
2977
		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2978

2979 2980 2981 2982
		if (hash > 31)
			hash_high |= 1 << (hash - 32);
		else
			hash_low |= 1 << hash;
L
Linus Torvalds 已提交
2983
	}
2984 2985 2986

	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
L
Linus Torvalds 已提交
2987 2988
}

S
Sascha Hauer 已提交
2989
/* Set a MAC change in hardware. */
S
Sascha Hauer 已提交
2990
static int
2991
fec_set_mac_address(struct net_device *ndev, void *p)
L
Linus Torvalds 已提交
2992
{
2993
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
2994 2995
	struct sockaddr *addr = p;

2996 2997 2998 2999 3000
	if (addr) {
		if (!is_valid_ether_addr(addr->sa_data))
			return -EADDRNOTAVAIL;
		memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
	}
L
Linus Torvalds 已提交
3001

3002 3003 3004 3005 3006 3007 3008 3009
	/* Add netif status check here to avoid system hang in below case:
	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
	 * After ethx down, fec all clocks are gated off and then register
	 * access causes system hang.
	 */
	if (!netif_running(ndev))
		return 0;

3010 3011
	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
S
Sascha Hauer 已提交
3012
		fep->hwp + FEC_ADDR_LOW);
3013
	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3014
		fep->hwp + FEC_ADDR_HIGH);
S
Sascha Hauer 已提交
3015
	return 0;
L
Linus Torvalds 已提交
3016 3017
}

3018
#ifdef CONFIG_NET_POLL_CONTROLLER
3019 3020
/**
 * fec_poll_controller - FEC Poll controller function
3021 3022 3023 3024 3025
 * @dev: The FEC network adapter
 *
 * Polled functionality used by netconsole and others in non interrupt mode
 *
 */
3026
static void fec_poll_controller(struct net_device *dev)
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
{
	int i;
	struct fec_enet_private *fep = netdev_priv(dev);

	for (i = 0; i < FEC_IRQ_NUM; i++) {
		if (fep->irq[i] > 0) {
			disable_irq(fep->irq[i]);
			fec_enet_interrupt(fep->irq[i], dev);
			enable_irq(fep->irq[i]);
		}
	}
}
#endif

3041
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
	netdev_features_t features)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;

	netdev->features = features;

	/* Receive checksum has been changed */
	if (changed & NETIF_F_RXCSUM) {
		if (features & NETIF_F_RXCSUM)
			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
		else
			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3055
	}
3056 3057 3058 3059 3060 3061 3062
}

static int fec_set_features(struct net_device *netdev,
	netdev_features_t features)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;
3063

3064
	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3065 3066 3067 3068
		napi_disable(&fep->napi);
		netif_tx_lock_bh(netdev);
		fec_stop(netdev);
		fec_enet_set_netdev_features(netdev, features);
3069
		fec_restart(netdev);
3070
		netif_tx_wake_all_queues(netdev);
3071 3072
		netif_tx_unlock_bh(netdev);
		napi_enable(&fep->napi);
3073 3074
	} else {
		fec_enet_set_netdev_features(netdev, features);
3075 3076 3077 3078 3079
	}

	return 0;
}

S
Sascha Hauer 已提交
3080 3081 3082 3083
static const struct net_device_ops fec_netdev_ops = {
	.ndo_open		= fec_enet_open,
	.ndo_stop		= fec_enet_close,
	.ndo_start_xmit		= fec_enet_start_xmit,
3084
	.ndo_set_rx_mode	= set_multicast_list,
S
Sascha Hauer 已提交
3085 3086 3087
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_tx_timeout		= fec_timeout,
	.ndo_set_mac_address	= fec_set_mac_address,
3088
	.ndo_do_ioctl		= fec_enet_ioctl,
3089 3090 3091
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= fec_poll_controller,
#endif
3092
	.ndo_set_features	= fec_set_features,
S
Sascha Hauer 已提交
3093 3094
};

3095 3096 3097 3098 3099 3100 3101 3102
static const unsigned short offset_des_active_rxq[] = {
	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
};

static const unsigned short offset_des_active_txq[] = {
	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
};

L
Linus Torvalds 已提交
3103 3104
 /*
  * XXX:  We need to clean up on failure exits here.
3105
  *
L
Linus Torvalds 已提交
3106
  */
3107
static int fec_enet_init(struct net_device *ndev)
L
Linus Torvalds 已提交
3108
{
3109
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
3110
	struct bufdesc *cbd_base;
3111
	dma_addr_t bd_dma;
3112
	int bd_size;
3113
	unsigned int i;
T
Troy Kisky 已提交
3114 3115 3116
	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
			sizeof(struct bufdesc);
	unsigned dsize_log2 = __fls(dsize);
3117

T
Troy Kisky 已提交
3118
	WARN_ON(dsize != (1 << dsize_log2));
3119 3120 3121 3122 3123 3124 3125 3126
#if defined(CONFIG_ARM)
	fep->rx_align = 0xf;
	fep->tx_align = 0xf;
#else
	fep->rx_align = 0x3;
	fep->tx_align = 0x3;
#endif

3127
	fec_enet_alloc_queue(ndev);
N
Nimrod Andy 已提交
3128

T
Troy Kisky 已提交
3129
	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
L
Linus Torvalds 已提交
3130

S
Sascha Hauer 已提交
3131
	/* Allocate memory for buffer descriptors. */
3132 3133
	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
				       GFP_KERNEL);
3134
	if (!cbd_base) {
N
Nimrod Andy 已提交
3135 3136 3137
		return -ENOMEM;
	}

3138
	memset(cbd_base, 0, bd_size);
L
Linus Torvalds 已提交
3139

3140
	/* Get the Ethernet address */
3141
	fec_get_mac(ndev);
3142 3143
	/* make sure MAC we just acquired is programmed into the hw */
	fec_set_mac_address(ndev, NULL);
L
Linus Torvalds 已提交
3144

S
Sascha Hauer 已提交
3145
	/* Set receive and transmit descriptor base. */
3146
	for (i = 0; i < fep->num_rx_queues; i++) {
T
Troy Kisky 已提交
3147 3148 3149 3150 3151 3152 3153 3154 3155
		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
		unsigned size = dsize * rxq->bd.ring_size;

		rxq->bd.qid = i;
		rxq->bd.base = cbd_base;
		rxq->bd.cur = cbd_base;
		rxq->bd.dma = bd_dma;
		rxq->bd.dsize = dsize;
		rxq->bd.dsize_log2 = dsize_log2;
3156
		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
T
Troy Kisky 已提交
3157 3158 3159
		bd_dma += size;
		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3160 3161 3162
	}

	for (i = 0; i < fep->num_tx_queues; i++) {
T
Troy Kisky 已提交
3163 3164 3165 3166 3167 3168 3169 3170 3171
		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
		unsigned size = dsize * txq->bd.ring_size;

		txq->bd.qid = i;
		txq->bd.base = cbd_base;
		txq->bd.cur = cbd_base;
		txq->bd.dma = bd_dma;
		txq->bd.dsize = dsize;
		txq->bd.dsize_log2 = dsize_log2;
3172
		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
T
Troy Kisky 已提交
3173 3174 3175
		bd_dma += size;
		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3176
	}
3177

L
Linus Torvalds 已提交
3178

S
Sascha Hauer 已提交
3179
	/* The FEC Ethernet specific entries in the device structure */
3180 3181 3182
	ndev->watchdog_timeo = TX_TIMEOUT;
	ndev->netdev_ops = &fec_netdev_ops;
	ndev->ethtool_ops = &fec_enet_ethtool_ops;
3183

3184
	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
F
Fabio Estevam 已提交
3185
	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3186

3187
	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3188 3189 3190
		/* enable hw VLAN support */
		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;

3191
	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
N
Nimrod Andy 已提交
3192 3193
		ndev->gso_max_segs = FEC_MAX_TSO_SEGS;

3194 3195
		/* enable hw accelerator */
		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
N
Nimrod Andy 已提交
3196
				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3197 3198
		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
	}
3199

3200
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3201 3202 3203 3204
		fep->tx_align = 0;
		fep->rx_align = 0x3f;
	}

3205 3206
	ndev->hw_features = ndev->features;

3207
	fec_restart(ndev);
L
Linus Torvalds 已提交
3208

3209 3210 3211 3212
	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
		fec_enet_clear_ethtool_stats(ndev);
	else
		fec_enet_update_ethtool_stats(ndev);
3213

L
Linus Torvalds 已提交
3214 3215 3216
	return 0;
}

3217
#ifdef CONFIG_OF
3218
static int fec_reset_phy(struct platform_device *pdev)
3219 3220
{
	int err, phy_reset;
3221
	bool active_high = false;
3222
	int msec = 1, phy_post_delay = 0;
3223 3224 3225
	struct device_node *np = pdev->dev.of_node;

	if (!np)
3226
		return 0;
3227

3228
	err = of_property_read_u32(np, "phy-reset-duration", &msec);
3229
	/* A sane reset duration should not be longer than 1s */
3230
	if (!err && msec > 1000)
3231 3232
		msec = 1;

3233
	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3234 3235 3236 3237
	if (phy_reset == -EPROBE_DEFER)
		return phy_reset;
	else if (!gpio_is_valid(phy_reset))
		return 0;
3238

3239 3240 3241 3242 3243
	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
	/* valid reset duration should be less than 1s */
	if (!err && phy_post_delay > 1000)
		return -EINVAL;

3244
	active_high = of_property_read_bool(np, "phy-reset-active-high");
3245

3246
	err = devm_gpio_request_one(&pdev->dev, phy_reset,
3247
			active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3248
			"phy-reset");
3249
	if (err) {
3250
		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3251
		return err;
3252
	}
3253 3254 3255 3256 3257 3258

	if (msec > 20)
		msleep(msec);
	else
		usleep_range(msec * 1000, msec * 1000 + 1000);

3259
	gpio_set_value_cansleep(phy_reset, !active_high);
3260

3261 3262 3263 3264 3265 3266 3267 3268 3269
	if (!phy_post_delay)
		return 0;

	if (phy_post_delay > 20)
		msleep(phy_post_delay);
	else
		usleep_range(phy_post_delay * 1000,
			     phy_post_delay * 1000 + 1000);

3270
	return 0;
3271 3272
}
#else /* CONFIG_OF */
3273
static int fec_reset_phy(struct platform_device *pdev)
3274 3275 3276 3277 3278
{
	/*
	 * In case of platform probe, the reset has been done
	 * by machine code.
	 */
3279
	return 0;
3280 3281 3282
}
#endif /* CONFIG_OF */

3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
static void
fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
{
	struct device_node *np = pdev->dev.of_node;

	*num_tx = *num_rx = 1;

	if (!np || !of_device_is_available(np))
		return;

	/* parse the num of tx and rx queues */
3294
	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3295

3296
	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3297 3298

	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3299 3300
		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
			 *num_tx);
3301 3302 3303 3304 3305
		*num_tx = 1;
		return;
	}

	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3306 3307
		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
			 *num_rx);
3308 3309 3310 3311 3312 3313
		*num_rx = 1;
		return;
	}

}

3314
static int
3315 3316 3317
fec_probe(struct platform_device *pdev)
{
	struct fec_enet_private *fep;
3318
	struct fec_platform_data *pdata;
3319 3320 3321
	struct net_device *ndev;
	int i, irq, ret = 0;
	struct resource *r;
3322
	const struct of_device_id *of_id;
S
Shawn Guo 已提交
3323
	static int dev_id;
3324
	struct device_node *np = pdev->dev.of_node, *phy_node;
3325 3326
	int num_tx_qs;
	int num_rx_qs;
3327

3328 3329
	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);

3330
	/* Init network device */
3331
	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3332
				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
3333 3334
	if (!ndev)
		return -ENOMEM;
3335 3336 3337 3338 3339 3340

	SET_NETDEV_DEV(ndev, &pdev->dev);

	/* setup board info structure */
	fep = netdev_priv(ndev);

3341 3342 3343 3344 3345
	of_id = of_match_device(fec_dt_ids, &pdev->dev);
	if (of_id)
		pdev->id_entry = of_id->data;
	fep->quirks = pdev->id_entry->driver_data;

3346
	fep->netdev = ndev;
3347 3348 3349
	fep->num_rx_queues = num_rx_qs;
	fep->num_tx_queues = num_tx_qs;

G
Guenter Roeck 已提交
3350
#if !defined(CONFIG_M5272)
3351
	/* default enable pause frame auto negotiation */
3352
	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3353
		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
G
Guenter Roeck 已提交
3354
#endif
3355

N
Nimrod Andy 已提交
3356 3357 3358
	/* Select default pin state */
	pinctrl_pm_select_default_state(&pdev->dev);

3359
	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3360 3361 3362 3363 3364 3365
	fep->hwp = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(fep->hwp)) {
		ret = PTR_ERR(fep->hwp);
		goto failed_ioremap;
	}

3366
	fep->pdev = pdev;
S
Shawn Guo 已提交
3367
	fep->dev_id = dev_id++;
3368 3369 3370

	platform_set_drvdata(pdev, ndev);

3371 3372 3373 3374 3375
	if ((of_machine_is_compatible("fsl,imx6q") ||
	     of_machine_is_compatible("fsl,imx6dl")) &&
	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
		fep->quirks |= FEC_QUIRK_ERR006687;

N
Nimrod Andy 已提交
3376 3377 3378
	if (of_get_property(np, "fsl,magic-packet", NULL))
		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;

3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
	phy_node = of_parse_phandle(np, "phy-handle", 0);
	if (!phy_node && of_phy_is_fixed_link(np)) {
		ret = of_phy_register_fixed_link(np);
		if (ret < 0) {
			dev_err(&pdev->dev,
				"broken fixed-link specification\n");
			goto failed_phy;
		}
		phy_node = of_node_get(np);
	}
	fep->phy_node = phy_node;

3391
	ret = of_get_phy_mode(pdev->dev.of_node);
3392
	if (ret < 0) {
J
Jingoo Han 已提交
3393
		pdata = dev_get_platdata(&pdev->dev);
3394 3395 3396 3397 3398 3399 3400 3401
		if (pdata)
			fep->phy_interface = pdata->phy;
		else
			fep->phy_interface = PHY_INTERFACE_MODE_MII;
	} else {
		fep->phy_interface = ret;
	}

3402 3403 3404
	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(fep->clk_ipg)) {
		ret = PTR_ERR(fep->clk_ipg);
3405 3406
		goto failed_clk;
	}
3407 3408 3409 3410 3411 3412 3413

	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(fep->clk_ahb)) {
		ret = PTR_ERR(fep->clk_ahb);
		goto failed_clk;
	}

3414 3415
	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);

3416 3417 3418 3419 3420
	/* enet_out is optional, depends on board */
	fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
	if (IS_ERR(fep->clk_enet_out))
		fep->clk_enet_out = NULL;

3421 3422
	fep->ptp_clk_on = false;
	mutex_init(&fep->ptp_clk_mutex);
3423 3424 3425 3426 3427 3428

	/* clk_ref is optional, depends on board */
	fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
	if (IS_ERR(fep->clk_ref))
		fep->clk_ref = NULL;

3429
	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3430 3431
	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
	if (IS_ERR(fep->clk_ptp)) {
3432
		fep->clk_ptp = NULL;
3433
		fep->bufdesc_ex = false;
3434 3435
	}

3436
	ret = fec_enet_clk_enable(ndev, true);
3437 3438 3439
	if (ret)
		goto failed_clk;

3440 3441 3442 3443
	ret = clk_prepare_enable(fep->clk_ipg);
	if (ret)
		goto failed_clk_ipg;

3444 3445 3446
	fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
	if (!IS_ERR(fep->reg_phy)) {
		ret = regulator_enable(fep->reg_phy);
3447 3448 3449
		if (ret) {
			dev_err(&pdev->dev,
				"Failed to enable phy regulator: %d\n", ret);
3450
			clk_disable_unprepare(fep->clk_ipg);
3451 3452
			goto failed_regulator;
		}
3453 3454
	} else {
		fep->reg_phy = NULL;
3455 3456
	}

3457 3458
	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);
3459
	pm_runtime_get_noresume(&pdev->dev);
3460 3461 3462
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

3463 3464 3465
	ret = fec_reset_phy(pdev);
	if (ret)
		goto failed_reset;
3466

F
Fabio Estevam 已提交
3467
	if (fep->bufdesc_ex)
3468
		fec_ptp_init(pdev);
F
Fabio Estevam 已提交
3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481

	ret = fec_enet_init(ndev);
	if (ret)
		goto failed_init;

	for (i = 0; i < FEC_IRQ_NUM; i++) {
		irq = platform_get_irq(pdev, i);
		if (irq < 0) {
			if (i)
				break;
			ret = irq;
			goto failed_irq;
		}
F
Fabio Estevam 已提交
3482
		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3483
				       0, pdev->name, ndev);
F
Fabio Estevam 已提交
3484
		if (ret)
F
Fabio Estevam 已提交
3485
			goto failed_irq;
N
Nimrod Andy 已提交
3486 3487

		fep->irq[i] = irq;
F
Fabio Estevam 已提交
3488 3489
	}

3490
	init_completion(&fep->mdio_done);
3491 3492 3493 3494
	ret = fec_enet_mii_init(pdev);
	if (ret)
		goto failed_mii_init;

3495 3496
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(ndev);
3497
	fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
3498
	pinctrl_pm_select_sleep_state(&pdev->dev);
3499

3500 3501 3502 3503
	ret = register_netdev(ndev);
	if (ret)
		goto failed_register;

N
Nimrod Andy 已提交
3504 3505 3506
	device_init_wakeup(&ndev->dev, fep->wol_flag &
			   FEC_WOL_HAS_MAGIC_PACKET);

F
Fabio Estevam 已提交
3507 3508 3509
	if (fep->bufdesc_ex && fep->ptp_clock)
		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);

3510
	fep->rx_copybreak = COPYBREAK_DEFAULT;
3511
	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3512 3513 3514 3515

	pm_runtime_mark_last_busy(&pdev->dev);
	pm_runtime_put_autosuspend(&pdev->dev);

3516 3517 3518
	return 0;

failed_register:
3519 3520
	fec_enet_mii_remove(fep);
failed_mii_init:
3521 3522
failed_irq:
failed_init:
3523
	fec_ptp_stop(pdev);
3524 3525
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
3526 3527 3528
failed_reset:
	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
3529
failed_regulator:
3530
failed_clk_ipg:
3531
	fec_enet_clk_enable(ndev, false);
3532
failed_clk:
3533 3534
	if (of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
3535 3536
failed_phy:
	of_node_put(phy_node);
3537 3538 3539 3540 3541 3542
failed_ioremap:
	free_netdev(ndev);

	return ret;
}

3543
static int
3544 3545 3546 3547
fec_drv_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct fec_enet_private *fep = netdev_priv(ndev);
3548
	struct device_node *np = pdev->dev.of_node;
3549

3550
	cancel_work_sync(&fep->tx_timeout_work);
3551
	fec_ptp_stop(pdev);
L
Lothar Waßmann 已提交
3552
	unregister_netdev(ndev);
3553
	fec_enet_mii_remove(fep);
3554 3555
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
3556 3557
	if (of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);
3558
	of_node_put(fep->phy_node);
3559
	free_netdev(ndev);
3560

3561 3562 3563
	return 0;
}

3564
static int __maybe_unused fec_suspend(struct device *dev)
3565
{
E
Eric Benard 已提交
3566
	struct net_device *ndev = dev_get_drvdata(dev);
3567
	struct fec_enet_private *fep = netdev_priv(ndev);
3568

3569
	rtnl_lock();
3570
	if (netif_running(ndev)) {
N
Nimrod Andy 已提交
3571 3572
		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3573
		phy_stop(ndev->phydev);
3574 3575
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
3576
		netif_device_detach(ndev);
3577 3578
		netif_tx_unlock_bh(ndev);
		fec_stop(ndev);
3579
		fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
3580 3581
		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3582
	}
3583 3584
	rtnl_unlock();

N
Nimrod Andy 已提交
3585
	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3586 3587
		regulator_disable(fep->reg_phy);

3588 3589 3590 3591 3592 3593
	/* SOC supply clock to phy, when clock is disabled, phy link down
	 * SOC control phy regulator, when regulator is disabled, phy link down
	 */
	if (fep->clk_enet_out || fep->reg_phy)
		fep->link = 0;

3594 3595 3596
	return 0;
}

3597
static int __maybe_unused fec_resume(struct device *dev)
3598
{
E
Eric Benard 已提交
3599
	struct net_device *ndev = dev_get_drvdata(dev);
3600
	struct fec_enet_private *fep = netdev_priv(ndev);
N
Nimrod Andy 已提交
3601
	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3602
	int ret;
N
Nimrod Andy 已提交
3603
	int val;
3604

N
Nimrod Andy 已提交
3605
	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3606 3607 3608 3609
		ret = regulator_enable(fep->reg_phy);
		if (ret)
			return ret;
	}
3610

3611
	rtnl_lock();
3612
	if (netif_running(ndev)) {
3613 3614 3615 3616 3617
		ret = fec_enet_clk_enable(ndev, true);
		if (ret) {
			rtnl_unlock();
			goto failed_clk;
		}
N
Nimrod Andy 已提交
3618 3619 3620 3621 3622 3623 3624 3625 3626 3627
		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
			if (pdata && pdata->sleep_mode_enable)
				pdata->sleep_mode_enable(false);
			val = readl(fep->hwp + FEC_ECNTRL);
			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
			writel(val, fep->hwp + FEC_ECNTRL);
			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
		} else {
			pinctrl_pm_select_default_state(&fep->pdev->dev);
		}
3628
		fec_restart(ndev);
3629
		netif_tx_lock_bh(ndev);
3630
		netif_device_attach(ndev);
3631
		netif_tx_unlock_bh(ndev);
3632
		napi_enable(&fep->napi);
3633
		phy_start(ndev->phydev);
3634
	}
3635
	rtnl_unlock();
3636

3637
	return 0;
3638

3639
failed_clk:
3640 3641 3642
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
	return ret;
3643 3644
}

3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666
static int __maybe_unused fec_runtime_suspend(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct fec_enet_private *fep = netdev_priv(ndev);

	clk_disable_unprepare(fep->clk_ipg);

	return 0;
}

static int __maybe_unused fec_runtime_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct fec_enet_private *fep = netdev_priv(ndev);

	return clk_prepare_enable(fep->clk_ipg);
}

static const struct dev_pm_ops fec_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
	SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
3667

3668 3669
static struct platform_driver fec_driver = {
	.driver	= {
3670
		.name	= DRIVER_NAME,
E
Eric Benard 已提交
3671
		.pm	= &fec_pm_ops,
3672
		.of_match_table = fec_dt_ids,
3673
	},
3674
	.id_table = fec_devtype,
E
Eric Benard 已提交
3675
	.probe	= fec_probe,
3676
	.remove	= fec_drv_remove,
3677 3678
};

3679
module_platform_driver(fec_driver);
L
Linus Torvalds 已提交
3680

F
Fabio Estevam 已提交
3681
MODULE_ALIAS("platform:"DRIVER_NAME);
L
Linus Torvalds 已提交
3682
MODULE_LICENSE("GPL");