fec_main.c 92.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
 *
5
 * Right now, I am very wasteful with the buffers.  I allocate memory
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14
 * pages and then divide them into 2K frame buffers.  This way I know I
 * have buffers large enough to hold one frame within one buffer descriptor.
 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
 * will be much more memory efficient and will easily handle lots of
 * small packets.
 *
 * Much better multiple PHY support by Magnus Damm.
 * Copyright (c) 2000 Ericsson Radio Systems AB.
 *
15 16
 * Support for FEC controller of ColdFire processors.
 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17 18
 *
 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19
 * Copyright (c) 2004-2006 Macq Electronique SA.
20
 *
S
Shawn Guo 已提交
21
 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
L
Linus Torvalds 已提交
22 23 24 25 26
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
27
#include <linux/pm_runtime.h>
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
37 38 39
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
N
Nimrod Andy 已提交
40
#include <net/tso.h>
41 42 43
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
L
Linus Torvalds 已提交
44 45 46
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
47 48
#include <linux/io.h>
#include <linux/irq.h>
49
#include <linux/clk.h>
50
#include <linux/platform_device.h>
51
#include <linux/mdio.h>
52
#include <linux/phy.h>
53
#include <linux/fec.h>
54 55 56
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
57
#include <linux/of_mdio.h>
58
#include <linux/of_net.h>
59
#include <linux/regulator/consumer.h>
60
#include <linux/if_vlan.h>
F
Fabio Estevam 已提交
61
#include <linux/pinctrl/consumer.h>
62
#include <linux/prefetch.h>
63
#include <soc/imx/cpuidle.h>
L
Linus Torvalds 已提交
64

65
#include <asm/cacheflush.h>
66

L
Linus Torvalds 已提交
67 68
#include "fec.h"

69
static void set_multicast_list(struct net_device *ndev);
70
static void fec_enet_itr_coal_init(struct net_device *ndev);
71

72 73
#define DRIVER_NAME	"fec"

74 75
#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))

76 77 78 79 80 81 82
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE	(1 << 5)
#define FEC_ENET_RSEM_V	0x84
#define FEC_ENET_RSFL_V	16
#define FEC_ENET_RAEM_V	0x8
#define FEC_ENET_RAFL_V	0x8
#define FEC_ENET_OPD_V	0xFFF0
83
#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
84

85 86
static struct platform_device_id fec_devtype[] = {
	{
87
		/* keep it for coldfire */
88 89
		.name = DRIVER_NAME,
		.driver_data = 0,
90 91
	}, {
		.name = "imx25-fec",
92
		.driver_data = FEC_QUIRK_USE_GASKET,
93 94
	}, {
		.name = "imx27-fec",
95
		.driver_data = 0,
96 97
	}, {
		.name = "imx28-fec",
98
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
99
				FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
S
Shawn Guo 已提交
100 101
	}, {
		.name = "imx6q-fec",
102
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
103
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
104 105
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
				FEC_QUIRK_HAS_RACC,
106
	}, {
107
		.name = "mvf600-fec",
108
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
109 110 111 112
	}, {
		.name = "imx6sx-fec",
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
113
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
114
				FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
115
				FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
116 117 118 119 120 121
	}, {
		.name = "imx6ul-fec",
		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
				FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
				FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
				FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
122 123 124
	}, {
		/* sentinel */
	}
125
};
126
MODULE_DEVICE_TABLE(platform, fec_devtype);
127

128
enum imx_fec_type {
L
Lothar Waßmann 已提交
129
	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
130 131
	IMX27_FEC,	/* runs on i.mx27/35/51 */
	IMX28_FEC,
S
Shawn Guo 已提交
132
	IMX6Q_FEC,
133
	MVF600_FEC,
134
	IMX6SX_FEC,
135
	IMX6UL_FEC,
136 137 138 139 140 141
};

static const struct of_device_id fec_dt_ids[] = {
	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
S
Shawn Guo 已提交
142
	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
143
	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
144
	{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
145
	{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
146 147 148 149
	{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);

150 151 152
static unsigned char macaddr[ETH_ALEN];
module_param_array(macaddr, byte, NULL, 0);
MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
L
Linus Torvalds 已提交
153

154
#if defined(CONFIG_M5272)
L
Linus Torvalds 已提交
155 156 157 158 159 160 161 162 163 164
/*
 * Some hardware gets it MAC address out of local flash memory.
 * if this is non-zero then assume it is the address to get MAC from.
 */
#if defined(CONFIG_NETtel)
#define	FEC_FLASHMAC	0xf0006006
#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
#define	FEC_FLASHMAC	0xf0006000
#elif defined(CONFIG_CANCam)
#define	FEC_FLASHMAC	0xf0020000
165 166 167
#elif defined (CONFIG_M5272C3)
#define	FEC_FLASHMAC	(0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
L
Lothar Waßmann 已提交
168
#define FEC_FLASHMAC	0xffc0406b
L
Linus Torvalds 已提交
169 170 171
#else
#define	FEC_FLASHMAC	0
#endif
172
#endif /* CONFIG_M5272 */
173

174
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
L
Linus Torvalds 已提交
175
 */
176
#define PKT_MAXBUF_SIZE		1522
L
Linus Torvalds 已提交
177
#define PKT_MINBUF_SIZE		64
178
#define PKT_MAXBLR_SIZE		1536
L
Linus Torvalds 已提交
179

180 181 182
/* FEC receive acceleration */
#define FEC_RACC_IPDIS		(1 << 1)
#define FEC_RACC_PRODIS		(1 << 2)
183
#define FEC_RACC_SHIFT16	BIT(7)
184 185
#define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)

L
Linus Torvalds 已提交
186
/*
187
 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
L
Linus Torvalds 已提交
188 189 190
 * size bits. Other FEC hardware does not, so we need to take that into
 * account when setting it.
 */
191
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
192
    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
L
Linus Torvalds 已提交
193 194 195 196 197
#define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
#else
#define	OPT_FRAME_SIZE	0
#endif

198 199 200 201 202 203 204 205
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST		(1 << 30)
#define FEC_MMFR_OP_READ	(2 << 28)
#define FEC_MMFR_OP_WRITE	(1 << 28)
#define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
#define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
#define FEC_MMFR_TA		(2 << 16)
#define FEC_MMFR_DATA(v)	(v & 0xffff)
N
Nimrod Andy 已提交
206 207 208
/* FEC ECR bits definition */
#define FEC_ECR_MAGICEN		(1 << 2)
#define FEC_ECR_SLEEP		(1 << 3)
L
Linus Torvalds 已提交
209

210
#define FEC_MII_TIMEOUT		30000 /* us */
L
Linus Torvalds 已提交
211

S
Sascha Hauer 已提交
212 213
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
L
Linus Torvalds 已提交
214

215 216
#define FEC_PAUSE_FLAG_AUTONEG	0x1
#define FEC_PAUSE_FLAG_ENABLE	0x2
N
Nimrod Andy 已提交
217 218 219
#define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
#define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
#define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
220

221 222
#define COPYBREAK_DEFAULT	256

N
Nimrod Andy 已提交
223 224 225 226 227 228 229
#define TSO_HEADER_SIZE		128
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS	100
#define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)

#define IS_TSO_HEADER(txq, addr) \
	((addr >= txq->tso_hdrs_dma) && \
T
Troy Kisky 已提交
230
	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
N
Nimrod Andy 已提交
231

L
Lothar Waßmann 已提交
232 233
static int mii_cnt;

T
Troy Kisky 已提交
234 235 236 237 238 239
static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
					     struct bufdesc_prop *bd)
{
	return (bdp >= bd->last) ? bd->base
			: (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
}
240

T
Troy Kisky 已提交
241 242 243 244 245
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
					     struct bufdesc_prop *bd)
{
	return (bdp <= bd->base) ? bd->last
			: (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
246 247
}

T
Troy Kisky 已提交
248 249
static int fec_enet_get_bd_index(struct bufdesc *bdp,
				 struct bufdesc_prop *bd)
250
{
T
Troy Kisky 已提交
251
	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
252 253
}

T
Troy Kisky 已提交
254
static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
255 256 257
{
	int entries;

T
Troy Kisky 已提交
258 259
	entries = (((const char *)txq->dirty_tx -
			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
260

T
Troy Kisky 已提交
261
	return entries >= 0 ? entries : entries + txq->bd.ring_size;
262 263
}

264
static void swap_buffer(void *bufaddr, int len)
265 266 267 268
{
	int i;
	unsigned int *buf = bufaddr;

269
	for (i = 0; i < len; i += 4, buf++)
270
		swab32s(buf);
271 272
}

273 274 275 276 277 278 279 280 281 282
static void swap_buffer2(void *dst_buf, void *src_buf, int len)
{
	int i;
	unsigned int *src = src_buf;
	unsigned int *dst = dst_buf;

	for (i = 0; i < len; i += 4, src++, dst++)
		*dst = swab32p(src);
}

283 284 285
static void fec_dump(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
286 287 288
	struct bufdesc *bdp;
	struct fec_enet_priv_tx_q *txq;
	int index = 0;
289 290 291 292

	netdev_info(ndev, "TX ring dump\n");
	pr_info("Nr     SC     addr       len  SKB\n");

293
	txq = fep->tx_queue[0];
T
Troy Kisky 已提交
294
	bdp = txq->bd.base;
295

296
	do {
297
		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
298
			index,
T
Troy Kisky 已提交
299
			bdp == txq->bd.cur ? 'S' : ' ',
300
			bdp == txq->dirty_tx ? 'H' : ' ',
301 302 303
			fec16_to_cpu(bdp->cbd_sc),
			fec32_to_cpu(bdp->cbd_bufaddr),
			fec16_to_cpu(bdp->cbd_datlen),
304
			txq->tx_skbuff[index]);
T
Troy Kisky 已提交
305
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
306
		index++;
T
Troy Kisky 已提交
307
	} while (bdp != txq->bd.base);
308 309
}

310 311 312 313 314
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}

315 316 317 318 319 320 321 322 323 324
static int
fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
{
	/* Only run for packets requiring a checksum. */
	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	if (unlikely(skb_cow_head(skb, 0)))
		return -1;

325 326
	if (is_ipv4_pkt(skb))
		ip_hdr(skb)->check = 0;
327 328 329 330 331
	*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;

	return 0;
}

332
static struct bufdesc *
333 334 335
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
			     struct sk_buff *skb,
			     struct net_device *ndev)
L
Linus Torvalds 已提交
336
{
337
	struct fec_enet_private *fep = netdev_priv(ndev);
T
Troy Kisky 已提交
338
	struct bufdesc *bdp = txq->bd.cur;
339 340 341 342 343 344
	struct bufdesc_ex *ebdp;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int frag, frag_len;
	unsigned short status;
	unsigned int estatus = 0;
	skb_frag_t *this_frag;
345
	unsigned int index;
346
	void *bufaddr;
347
	dma_addr_t addr;
348
	int i;
L
Linus Torvalds 已提交
349

350 351
	for (frag = 0; frag < nr_frags; frag++) {
		this_frag = &skb_shinfo(skb)->frags[frag];
T
Troy Kisky 已提交
352
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
353 354
		ebdp = (struct bufdesc_ex *)bdp;

355
		status = fec16_to_cpu(bdp->cbd_sc);
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
		status &= ~BD_ENET_TX_STATS;
		status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
		frag_len = skb_shinfo(skb)->frags[frag].size;

		/* Handle the last BD specially */
		if (frag == nr_frags - 1) {
			status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
			if (fep->bufdesc_ex) {
				estatus |= BD_ENET_TX_INT;
				if (unlikely(skb_shinfo(skb)->tx_flags &
					SKBTX_HW_TSTAMP && fep->hwts_tx_en))
					estatus |= BD_ENET_TX_TS;
			}
		}

		if (fep->bufdesc_ex) {
372
			if (fep->quirks & FEC_QUIRK_HAS_AVB)
373
				estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
374 375 376
			if (skb->ip_summed == CHECKSUM_PARTIAL)
				estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
			ebdp->cbd_bdu = 0;
377
			ebdp->cbd_esc = cpu_to_fec32(estatus);
378 379 380 381
		}

		bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;

T
Troy Kisky 已提交
382
		index = fec_enet_get_bd_index(bdp, &txq->bd);
383
		if (((unsigned long) bufaddr) & fep->tx_align ||
384
			fep->quirks & FEC_QUIRK_SWAP_FRAME) {
385 386
			memcpy(txq->tx_bounce[index], bufaddr, frag_len);
			bufaddr = txq->tx_bounce[index];
387

388
			if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
389 390 391
				swap_buffer(bufaddr, frag_len);
		}

392 393 394
		addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
				      DMA_TO_DEVICE);
		if (dma_mapping_error(&fep->pdev->dev, addr)) {
395 396 397 398 399
			if (net_ratelimit())
				netdev_err(ndev, "Tx DMA memory map failed\n");
			goto dma_mapping_error;
		}

400 401
		bdp->cbd_bufaddr = cpu_to_fec32(addr);
		bdp->cbd_datlen = cpu_to_fec16(frag_len);
402 403 404 405
		/* Make sure the updates to rest of the descriptor are
		 * performed before transferring ownership.
		 */
		wmb();
406
		bdp->cbd_sc = cpu_to_fec16(status);
407 408
	}

409
	return bdp;
410
dma_mapping_error:
T
Troy Kisky 已提交
411
	bdp = txq->bd.cur;
412
	for (i = 0; i < frag; i++) {
T
Troy Kisky 已提交
413
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
414 415
		dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
				 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
416
	}
417
	return ERR_PTR(-ENOMEM);
418
}
L
Linus Torvalds 已提交
419

420 421
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
				   struct sk_buff *skb, struct net_device *ndev)
422 423 424 425 426
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	struct bufdesc *bdp, *last_bdp;
	void *bufaddr;
427
	dma_addr_t addr;
428 429 430 431
	unsigned short status;
	unsigned short buflen;
	unsigned int estatus = 0;
	unsigned int index;
N
Nimrod Andy 已提交
432
	int entries_free;
S
Sascha Hauer 已提交
433

T
Troy Kisky 已提交
434
	entries_free = fec_enet_get_free_txdesc_num(txq);
N
Nimrod Andy 已提交
435 436 437 438 439 440 441
	if (entries_free < MAX_SKB_FRAGS + 1) {
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "NOT enough BD for SG!\n");
		return NETDEV_TX_OK;
	}

442 443
	/* Protocol checksum off-load for TCP and UDP. */
	if (fec_enet_clear_csum(skb, ndev)) {
444
		dev_kfree_skb_any(skb);
445 446 447
		return NETDEV_TX_OK;
	}

448
	/* Fill in a Tx ring entry */
T
Troy Kisky 已提交
449
	bdp = txq->bd.cur;
450
	last_bdp = bdp;
451
	status = fec16_to_cpu(bdp->cbd_sc);
452
	status &= ~BD_ENET_TX_STATS;
L
Linus Torvalds 已提交
453

S
Sascha Hauer 已提交
454
	/* Set buffer length and buffer pointer */
455
	bufaddr = skb->data;
456
	buflen = skb_headlen(skb);
L
Linus Torvalds 已提交
457

T
Troy Kisky 已提交
458
	index = fec_enet_get_bd_index(bdp, &txq->bd);
459
	if (((unsigned long) bufaddr) & fep->tx_align ||
460
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
461 462
		memcpy(txq->tx_bounce[index], skb->data, buflen);
		bufaddr = txq->tx_bounce[index];
L
Linus Torvalds 已提交
463

464
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
465 466
			swap_buffer(bufaddr, buflen);
	}
467

468 469 470
	/* Push the data cache so the CPM does not get stale memory data. */
	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
	if (dma_mapping_error(&fep->pdev->dev, addr)) {
471 472 473 474 475
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "Tx DMA memory map failed\n");
		return NETDEV_TX_OK;
	}
L
Linus Torvalds 已提交
476

477
	if (nr_frags) {
478
		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
T
Troy Kisky 已提交
479 480 481 482
		if (IS_ERR(last_bdp)) {
			dma_unmap_single(&fep->pdev->dev, addr,
					 buflen, DMA_TO_DEVICE);
			dev_kfree_skb_any(skb);
483
			return NETDEV_TX_OK;
T
Troy Kisky 已提交
484
		}
485 486 487 488 489 490 491 492 493
	} else {
		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
		if (fep->bufdesc_ex) {
			estatus = BD_ENET_TX_INT;
			if (unlikely(skb_shinfo(skb)->tx_flags &
				SKBTX_HW_TSTAMP && fep->hwts_tx_en))
				estatus |= BD_ENET_TX_TS;
		}
	}
T
Troy Kisky 已提交
494 495
	bdp->cbd_bufaddr = cpu_to_fec32(addr);
	bdp->cbd_datlen = cpu_to_fec16(buflen);
496

497 498 499
	if (fep->bufdesc_ex) {

		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
500

501
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
502
			fep->hwts_tx_en))
503
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
504

505
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
506
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
507

508 509 510 511
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;

		ebdp->cbd_bdu = 0;
512
		ebdp->cbd_esc = cpu_to_fec32(estatus);
513
	}
514

T
Troy Kisky 已提交
515
	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
516
	/* Save skb pointer */
517
	txq->tx_skbuff[index] = skb;
518

519 520 521 522
	/* Make sure the updates to rest of the descriptor are performed before
	 * transferring ownership.
	 */
	wmb();
523

524 525 526
	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
	 * it's the last BD of the frame, and to put the CRC on the end.
	 */
527
	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
528
	bdp->cbd_sc = cpu_to_fec16(status);
529

S
Sascha Hauer 已提交
530
	/* If this was the last BD in the ring, start at the beginning again. */
T
Troy Kisky 已提交
531
	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
L
Linus Torvalds 已提交
532

533 534
	skb_tx_timestamp(skb);

535
	/* Make sure the update to bdp and tx_skbuff are performed before
T
Troy Kisky 已提交
536
	 * txq->bd.cur.
537 538
	 */
	wmb();
T
Troy Kisky 已提交
539
	txq->bd.cur = bdp;
540 541

	/* Trigger transmission start */
542
	writel(0, txq->bd.reg_desc_active);
L
Linus Torvalds 已提交
543

544
	return 0;
L
Linus Torvalds 已提交
545 546
}

N
Nimrod Andy 已提交
547
static int
548 549 550 551
fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
			  struct net_device *ndev,
			  struct bufdesc *bdp, int index, char *data,
			  int size, bool last_tcp, bool is_last)
552 553
{
	struct fec_enet_private *fep = netdev_priv(ndev);
554
	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
N
Nimrod Andy 已提交
555 556
	unsigned short status;
	unsigned int estatus = 0;
557
	dma_addr_t addr;
558

559
	status = fec16_to_cpu(bdp->cbd_sc);
N
Nimrod Andy 已提交
560
	status &= ~BD_ENET_TX_STATS;
561

N
Nimrod Andy 已提交
562 563
	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);

564
	if (((unsigned long) data) & fep->tx_align ||
565
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
566 567
		memcpy(txq->tx_bounce[index], data, size);
		data = txq->tx_bounce[index];
N
Nimrod Andy 已提交
568

569
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
N
Nimrod Andy 已提交
570 571 572
			swap_buffer(data, size);
	}

573 574
	addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
	if (dma_mapping_error(&fep->pdev->dev, addr)) {
N
Nimrod Andy 已提交
575
		dev_kfree_skb_any(skb);
576
		if (net_ratelimit())
N
Nimrod Andy 已提交
577
			netdev_err(ndev, "Tx DMA memory map failed\n");
578 579 580
		return NETDEV_TX_BUSY;
	}

581 582
	bdp->cbd_datlen = cpu_to_fec16(size);
	bdp->cbd_bufaddr = cpu_to_fec32(addr);
583

N
Nimrod Andy 已提交
584
	if (fep->bufdesc_ex) {
585
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
586
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
N
Nimrod Andy 已提交
587 588 589
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
		ebdp->cbd_bdu = 0;
590
		ebdp->cbd_esc = cpu_to_fec32(estatus);
N
Nimrod Andy 已提交
591 592 593 594 595 596 597 598
	}

	/* Handle the last BD specially */
	if (last_tcp)
		status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
	if (is_last) {
		status |= BD_ENET_TX_INTR;
		if (fep->bufdesc_ex)
599
			ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
N
Nimrod Andy 已提交
600 601
	}

602
	bdp->cbd_sc = cpu_to_fec16(status);
N
Nimrod Andy 已提交
603 604 605 606 607

	return 0;
}

static int
608 609 610
fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
			 struct sk_buff *skb, struct net_device *ndev,
			 struct bufdesc *bdp, int index)
N
Nimrod Andy 已提交
611 612 613
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
614
	struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
N
Nimrod Andy 已提交
615 616 617 618 619
	void *bufaddr;
	unsigned long dmabuf;
	unsigned short status;
	unsigned int estatus = 0;

620
	status = fec16_to_cpu(bdp->cbd_sc);
N
Nimrod Andy 已提交
621 622 623
	status &= ~BD_ENET_TX_STATS;
	status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);

624 625
	bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
	dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
626
	if (((unsigned long)bufaddr) & fep->tx_align ||
627
		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
628 629
		memcpy(txq->tx_bounce[index], skb->data, hdr_len);
		bufaddr = txq->tx_bounce[index];
N
Nimrod Andy 已提交
630

631
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
N
Nimrod Andy 已提交
632 633 634 635 636 637 638 639 640 641 642 643
			swap_buffer(bufaddr, hdr_len);

		dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
					hdr_len, DMA_TO_DEVICE);
		if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
			dev_kfree_skb_any(skb);
			if (net_ratelimit())
				netdev_err(ndev, "Tx DMA memory map failed\n");
			return NETDEV_TX_BUSY;
		}
	}

644 645
	bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
	bdp->cbd_datlen = cpu_to_fec16(hdr_len);
N
Nimrod Andy 已提交
646 647

	if (fep->bufdesc_ex) {
648
		if (fep->quirks & FEC_QUIRK_HAS_AVB)
649
			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
N
Nimrod Andy 已提交
650 651 652
		if (skb->ip_summed == CHECKSUM_PARTIAL)
			estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
		ebdp->cbd_bdu = 0;
653
		ebdp->cbd_esc = cpu_to_fec32(estatus);
N
Nimrod Andy 已提交
654 655
	}

656
	bdp->cbd_sc = cpu_to_fec16(status);
N
Nimrod Andy 已提交
657 658 659 660

	return 0;
}

661 662 663
static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
				   struct sk_buff *skb,
				   struct net_device *ndev)
N
Nimrod Andy 已提交
664 665 666 667
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int total_len, data_left;
T
Troy Kisky 已提交
668
	struct bufdesc *bdp = txq->bd.cur;
N
Nimrod Andy 已提交
669 670 671 672
	struct tso_t tso;
	unsigned int index = 0;
	int ret;

T
Troy Kisky 已提交
673
	if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
N
Nimrod Andy 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		dev_kfree_skb_any(skb);
		if (net_ratelimit())
			netdev_err(ndev, "NOT enough BD for TSO!\n");
		return NETDEV_TX_OK;
	}

	/* Protocol checksum off-load for TCP and UDP. */
	if (fec_enet_clear_csum(skb, ndev)) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* Initialize the TSO handler, and prepare the first payload */
	tso_start(skb, &tso);

	total_len = skb->len - hdr_len;
	while (total_len > 0) {
		char *hdr;

T
Troy Kisky 已提交
693
		index = fec_enet_get_bd_index(bdp, &txq->bd);
N
Nimrod Andy 已提交
694 695 696 697
		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
		total_len -= data_left;

		/* prepare packet headers: MAC + IP + TCP */
698
		hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
N
Nimrod Andy 已提交
699
		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
700
		ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
N
Nimrod Andy 已提交
701 702 703 704 705 706 707
		if (ret)
			goto err_release;

		while (data_left > 0) {
			int size;

			size = min_t(int, tso.size, data_left);
T
Troy Kisky 已提交
708 709
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
			index = fec_enet_get_bd_index(bdp, &txq->bd);
710 711 712 713
			ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
							bdp, index,
							tso.data, size,
							size == data_left,
N
Nimrod Andy 已提交
714 715 716 717 718 719 720 721
							total_len == 0);
			if (ret)
				goto err_release;

			data_left -= size;
			tso_build_data(skb, &tso, size);
		}

T
Troy Kisky 已提交
722
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
N
Nimrod Andy 已提交
723 724 725
	}

	/* Save skb pointer */
726
	txq->tx_skbuff[index] = skb;
N
Nimrod Andy 已提交
727 728

	skb_tx_timestamp(skb);
T
Troy Kisky 已提交
729
	txq->bd.cur = bdp;
N
Nimrod Andy 已提交
730 731

	/* Trigger transmission start */
732
	if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
733 734 735 736 737
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active) ||
	    !readl(txq->bd.reg_desc_active))
		writel(0, txq->bd.reg_desc_active);
N
Nimrod Andy 已提交
738 739 740 741 742 743 744 745 746 747 748 749 750

	return 0;

err_release:
	/* TODO: Release all used data descriptors for TSO */
	return ret;
}

static netdev_tx_t
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int entries_free;
751 752 753
	unsigned short queue;
	struct fec_enet_priv_tx_q *txq;
	struct netdev_queue *nq;
N
Nimrod Andy 已提交
754 755
	int ret;

756 757 758 759
	queue = skb_get_queue_mapping(skb);
	txq = fep->tx_queue[queue];
	nq = netdev_get_tx_queue(ndev, queue);

N
Nimrod Andy 已提交
760
	if (skb_is_gso(skb))
761
		ret = fec_enet_txq_submit_tso(txq, skb, ndev);
N
Nimrod Andy 已提交
762
	else
763
		ret = fec_enet_txq_submit_skb(txq, skb, ndev);
764 765
	if (ret)
		return ret;
766

T
Troy Kisky 已提交
767
	entries_free = fec_enet_get_free_txdesc_num(txq);
768 769
	if (entries_free <= txq->tx_stop_threshold)
		netif_tx_stop_queue(nq);
770 771 772 773

	return NETDEV_TX_OK;
}

774 775 776 777 778
/* Init RX & TX buffer descriptors
 */
static void fec_enet_bd_init(struct net_device *dev)
{
	struct fec_enet_private *fep = netdev_priv(dev);
779 780
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
781 782
	struct bufdesc *bdp;
	unsigned int i;
783
	unsigned int q;
784

785 786 787
	for (q = 0; q < fep->num_rx_queues; q++) {
		/* Initialize the receive buffer descriptors. */
		rxq = fep->rx_queue[q];
T
Troy Kisky 已提交
788
		bdp = rxq->bd.base;
789

T
Troy Kisky 已提交
790
		for (i = 0; i < rxq->bd.ring_size; i++) {
791

792 793
			/* Initialize the BD for every fragment in the page. */
			if (bdp->cbd_bufaddr)
794
				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
795
			else
796
				bdp->cbd_sc = cpu_to_fec16(0);
T
Troy Kisky 已提交
797
			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
798 799 800
		}

		/* Set the last buffer to wrap */
T
Troy Kisky 已提交
801
		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
802
		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
803

T
Troy Kisky 已提交
804
		rxq->bd.cur = rxq->bd.base;
805 806 807 808 809
	}

	for (q = 0; q < fep->num_tx_queues; q++) {
		/* ...and the same for transmit */
		txq = fep->tx_queue[q];
T
Troy Kisky 已提交
810 811
		bdp = txq->bd.base;
		txq->bd.cur = bdp;
812

T
Troy Kisky 已提交
813
		for (i = 0; i < txq->bd.ring_size; i++) {
814
			/* Initialize the BD for every fragment in the page. */
815
			bdp->cbd_sc = cpu_to_fec16(0);
816 817 818 819
			if (txq->tx_skbuff[i]) {
				dev_kfree_skb_any(txq->tx_skbuff[i]);
				txq->tx_skbuff[i] = NULL;
			}
820
			bdp->cbd_bufaddr = cpu_to_fec32(0);
T
Troy Kisky 已提交
821
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
822 823 824
		}

		/* Set the last buffer to wrap */
T
Troy Kisky 已提交
825
		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
826
		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
827
		txq->dirty_tx = bdp;
828
	}
829
}
830

F
Frank Li 已提交
831 832 833 834 835 836
static void fec_enet_active_rxring(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;

	for (i = 0; i < fep->num_rx_queues; i++)
837
		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
F
Frank Li 已提交
838 839
}

840 841 842 843 844 845
static void fec_enet_enable_ring(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
	int i;
846

847 848
	for (i = 0; i < fep->num_rx_queues; i++) {
		rxq = fep->rx_queue[i];
T
Troy Kisky 已提交
849
		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
850
		writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
851

852 853 854 855 856
		/* enable DMA1/2 */
		if (i)
			writel(RCMR_MATCHEN | RCMR_CMP(i),
			       fep->hwp + FEC_RCMR(i));
	}
857

858 859
	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = fep->tx_queue[i];
T
Troy Kisky 已提交
860
		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
861 862 863 864 865

		/* enable DMA1/2 */
		if (i)
			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
			       fep->hwp + FEC_DMA_CFG(i));
866
	}
867
}
868

869 870 871 872 873 874 875 876 877
static void fec_enet_reset_skb(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_priv_tx_q *txq;
	int i, j;

	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = fep->tx_queue[i];

T
Troy Kisky 已提交
878
		for (j = 0; j < txq->bd.ring_size; j++) {
879 880 881 882 883 884
			if (txq->tx_skbuff[j]) {
				dev_kfree_skb_any(txq->tx_skbuff[j]);
				txq->tx_skbuff[j] = NULL;
			}
		}
	}
885 886
}

887 888 889 890
/*
 * This function is called to start or restart the FEC during a link
 * change, transmit timeout, or to reconfigure the FEC.  The network
 * packet processing for this device must be stopped before this call.
891
 */
L
Linus Torvalds 已提交
892
static void
893
fec_restart(struct net_device *ndev)
L
Linus Torvalds 已提交
894
{
895
	struct fec_enet_private *fep = netdev_priv(ndev);
896
	u32 val;
897 898
	u32 temp_mac[2];
	u32 rcntl = OPT_FRAME_SIZE | 0x04;
S
Shawn Guo 已提交
899
	u32 ecntl = 0x2; /* ETHEREN */
L
Linus Torvalds 已提交
900

901 902 903 904
	/* Whack a reset.  We should wait for this.
	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
	 * instead of reset MAC itself.
	 */
905
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
906 907 908 909 910
		writel(0, fep->hwp + FEC_ECNTRL);
	} else {
		writel(1, fep->hwp + FEC_ECNTRL);
		udelay(10);
	}
L
Linus Torvalds 已提交
911

912 913 914 915
	/*
	 * enet-mac reset will reset mac address registers too,
	 * so need to reconfigure it.
	 */
916
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
917
		memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
918 919 920 921
		writel((__force u32)cpu_to_be32(temp_mac[0]),
		       fep->hwp + FEC_ADDR_LOW);
		writel((__force u32)cpu_to_be32(temp_mac[1]),
		       fep->hwp + FEC_ADDR_HIGH);
922
	}
L
Linus Torvalds 已提交
923

924
	/* Clear any outstanding interrupt. */
925
	writel(0xffffffff, fep->hwp + FEC_IEVENT);
L
Linus Torvalds 已提交
926

927 928
	fec_enet_bd_init(ndev);

929
	fec_enet_enable_ring(ndev);
930

931 932
	/* Reset tx SKB buffers. */
	fec_enet_reset_skb(ndev);
933

934
	/* Enable MII mode */
935
	if (fep->full_duplex == DUPLEX_FULL) {
936
		/* FD enable */
937 938
		writel(0x04, fep->hwp + FEC_X_CNTRL);
	} else {
939 940
		/* No Rcv on Xmit */
		rcntl |= 0x02;
941 942
		writel(0x0, fep->hwp + FEC_X_CNTRL);
	}
943

944 945 946
	/* Set MII speed */
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);

G
Guenter Roeck 已提交
947
#if !defined(CONFIG_M5272)
948 949
	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
		val = readl(fep->hwp + FEC_RACC);
950 951
		/* align IP header */
		val |= FEC_RACC_SHIFT16;
952
		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
953
			/* set RX checksum */
954 955 956 957
			val |= FEC_RACC_OPTIONS;
		else
			val &= ~FEC_RACC_OPTIONS;
		writel(val, fep->hwp + FEC_RACC);
958
		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
959
	}
G
Guenter Roeck 已提交
960
#endif
961

962 963 964 965
	/*
	 * The phy interface and speed need to get configured
	 * differently on enet-mac.
	 */
966
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
967 968
		/* Enable flow control and length check */
		rcntl |= 0x40000000 | 0x00000020;
969

S
Shawn Guo 已提交
970
		/* RGMII, RMII or MII */
M
Markus Pargmann 已提交
971 972 973 974
		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
S
Shawn Guo 已提交
975 976
			rcntl |= (1 << 6);
		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
977
			rcntl |= (1 << 8);
978
		else
979
			rcntl &= ~(1 << 8);
980

S
Shawn Guo 已提交
981
		/* 1G, 100M or 10M */
982 983
		if (ndev->phydev) {
			if (ndev->phydev->speed == SPEED_1000)
S
Shawn Guo 已提交
984
				ecntl |= (1 << 5);
985
			else if (ndev->phydev->speed == SPEED_100)
S
Shawn Guo 已提交
986 987 988 989
				rcntl &= ~(1 << 9);
			else
				rcntl |= (1 << 9);
		}
990 991
	} else {
#ifdef FEC_MIIGSK_ENR
992
		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
993
			u32 cfgr;
994 995 996 997 998 999 1000 1001
			/* disable the gasket and wait */
			writel(0, fep->hwp + FEC_MIIGSK_ENR);
			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
				udelay(1);

			/*
			 * configure the gasket:
			 *   RMII, 50 MHz, no loopback, no echo
1002
			 *   MII, 25 MHz, no loopback, no echo
1003
			 */
1004 1005
			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1006
			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1007 1008
				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1009 1010 1011

			/* re-enable the gasket */
			writel(2, fep->hwp + FEC_MIIGSK_ENR);
1012
		}
1013 1014
#endif
	}
1015

G
Guenter Roeck 已提交
1016
#if !defined(CONFIG_M5272)
1017 1018 1019
	/* enable pause frame*/
	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1020
	     ndev->phydev && ndev->phydev->pause)) {
1021 1022
		rcntl |= FEC_ENET_FCE;

1023
		/* set FIFO threshold parameter to reduce overrun */
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);

		/* OPD */
		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
	} else {
		rcntl &= ~FEC_ENET_FCE;
	}
G
Guenter Roeck 已提交
1034
#endif /* !defined(CONFIG_M5272) */
1035

1036
	writel(rcntl, fep->hwp + FEC_R_CNTRL);
1037

1038 1039 1040 1041 1042 1043 1044
	/* Setup multicast filter. */
	set_multicast_list(ndev);
#ifndef CONFIG_M5272
	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
#endif

1045
	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
S
Shawn Guo 已提交
1046 1047 1048 1049 1050 1051
		/* enable ENET endian swap */
		ecntl |= (1 << 8);
		/* enable ENET store and forward mode */
		writel(1 << 8, fep->hwp + FEC_X_WMRK);
	}

1052 1053
	if (fep->bufdesc_ex)
		ecntl |= (1 << 4);
1054

1055
#ifndef CONFIG_M5272
1056 1057
	/* Enable the MIB statistic event counters */
	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1058 1059
#endif

1060
	/* And last, enable the transmit and receive processing */
S
Shawn Guo 已提交
1061
	writel(ecntl, fep->hwp + FEC_ECNTRL);
F
Frank Li 已提交
1062
	fec_enet_active_rxring(ndev);
1063

1064 1065 1066
	if (fep->bufdesc_ex)
		fec_ptp_start_cyclecounter(ndev);

1067
	/* Enable interrupts we wish to service */
1068 1069 1070 1071
	if (fep->link)
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
	else
		writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1072 1073 1074 1075

	/* Init the interrupt coalescing */
	fec_enet_itr_coal_init(ndev);

1076 1077 1078 1079 1080 1081
}

static void
fec_stop(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
N
Nimrod Andy 已提交
1082
	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1083
	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
N
Nimrod Andy 已提交
1084
	u32 val;
1085 1086 1087 1088 1089 1090

	/* We cannot expect a graceful transmit stop without link !!! */
	if (fep->link) {
		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
		udelay(10);
		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1091
			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1092 1093
	}

1094 1095 1096 1097
	/* Whack a reset.  We should wait for this.
	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
	 * instead of reset MAC itself.
	 */
N
Nimrod Andy 已提交
1098 1099 1100 1101 1102 1103 1104 1105
	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
		if (fep->quirks & FEC_QUIRK_HAS_AVB) {
			writel(0, fep->hwp + FEC_ECNTRL);
		} else {
			writel(1, fep->hwp + FEC_ECNTRL);
			udelay(10);
		}
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1106
	} else {
N
Nimrod Andy 已提交
1107 1108 1109 1110 1111 1112 1113
		writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
		val = readl(fep->hwp + FEC_ECNTRL);
		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
		writel(val, fep->hwp + FEC_ECNTRL);

		if (pdata && pdata->sleep_mode_enable)
			pdata->sleep_mode_enable(true);
1114
	}
1115
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
S
Shawn Guo 已提交
1116 1117

	/* We have to keep ENET enabled to have MII interrupt stay working */
N
Nimrod Andy 已提交
1118 1119
	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
S
Shawn Guo 已提交
1120
		writel(2, fep->hwp + FEC_ECNTRL);
1121 1122
		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
	}
L
Linus Torvalds 已提交
1123 1124 1125
}


1126 1127 1128 1129 1130
static void
fec_timeout(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

1131 1132
	fec_dump(ndev);

1133 1134
	ndev->stats.tx_errors++;

1135
	schedule_work(&fep->tx_timeout_work);
1136 1137
}

1138
static void fec_enet_timeout_work(struct work_struct *work)
1139 1140
{
	struct fec_enet_private *fep =
1141
		container_of(work, struct fec_enet_private, tx_timeout_work);
1142
	struct net_device *ndev = fep->netdev;
1143

1144 1145 1146 1147 1148 1149 1150 1151
	rtnl_lock();
	if (netif_device_present(ndev) || netif_running(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
		fec_restart(ndev);
		netif_wake_queue(ndev);
		netif_tx_unlock_bh(ndev);
		napi_enable(&fep->napi);
1152
	}
1153
	rtnl_unlock();
1154 1155
}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
static void
fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
	struct skb_shared_hwtstamps *hwtstamps)
{
	unsigned long flags;
	u64 ns;

	spin_lock_irqsave(&fep->tmreg_lock, flags);
	ns = timecounter_cyc2time(&fep->tc, ts);
	spin_unlock_irqrestore(&fep->tmreg_lock, flags);

	memset(hwtstamps, 0, sizeof(*hwtstamps));
	hwtstamps->hwtstamp = ns_to_ktime(ns);
}

L
Linus Torvalds 已提交
1171
static void
1172
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
L
Linus Torvalds 已提交
1173 1174
{
	struct	fec_enet_private *fep;
1175
	struct bufdesc *bdp;
1176
	unsigned short status;
L
Linus Torvalds 已提交
1177
	struct	sk_buff	*skb;
1178 1179
	struct fec_enet_priv_tx_q *txq;
	struct netdev_queue *nq;
1180
	int	index = 0;
N
Nimrod Andy 已提交
1181
	int	entries_free;
L
Linus Torvalds 已提交
1182

1183
	fep = netdev_priv(ndev);
1184 1185 1186 1187 1188 1189 1190

	queue_id = FEC_ENET_GET_QUQUE(queue_id);

	txq = fep->tx_queue[queue_id];
	/* get next bdp of dirty_tx */
	nq = netdev_get_tx_queue(ndev, queue_id);
	bdp = txq->dirty_tx;
L
Linus Torvalds 已提交
1191

1192
	/* get next bdp of dirty_tx */
T
Troy Kisky 已提交
1193
	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1194

T
Troy Kisky 已提交
1195 1196
	while (bdp != READ_ONCE(txq->bd.cur)) {
		/* Order the load of bd.cur and cbd_sc */
1197
		rmb();
1198
		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1199
		if (status & BD_ENET_TX_READY)
S
Sascha Hauer 已提交
1200 1201
			break;

T
Troy Kisky 已提交
1202
		index = fec_enet_get_bd_index(bdp, &txq->bd);
1203

1204
		skb = txq->tx_skbuff[index];
1205
		txq->tx_skbuff[index] = NULL;
1206 1207 1208 1209 1210 1211
		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
			dma_unmap_single(&fep->pdev->dev,
					 fec32_to_cpu(bdp->cbd_bufaddr),
					 fec16_to_cpu(bdp->cbd_datlen),
					 DMA_TO_DEVICE);
		bdp->cbd_bufaddr = cpu_to_fec32(0);
1212 1213
		if (!skb)
			goto skb_done;
1214

L
Linus Torvalds 已提交
1215
		/* Check for errors. */
1216
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
L
Linus Torvalds 已提交
1217 1218
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
1219
			ndev->stats.tx_errors++;
1220
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
1221
				ndev->stats.tx_heartbeat_errors++;
1222
			if (status & BD_ENET_TX_LC)  /* Late collision */
1223
				ndev->stats.tx_window_errors++;
1224
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
1225
				ndev->stats.tx_aborted_errors++;
1226
			if (status & BD_ENET_TX_UN)  /* Underrun */
1227
				ndev->stats.tx_fifo_errors++;
1228
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
1229
				ndev->stats.tx_carrier_errors++;
L
Linus Torvalds 已提交
1230
		} else {
1231
			ndev->stats.tx_packets++;
1232
			ndev->stats.tx_bytes += skb->len;
L
Linus Torvalds 已提交
1233 1234
		}

1235 1236
		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
			fep->bufdesc_ex) {
1237
			struct skb_shared_hwtstamps shhwtstamps;
1238
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1239

1240
			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1241 1242
			skb_tstamp_tx(skb, &shhwtstamps);
		}
1243

L
Linus Torvalds 已提交
1244 1245 1246
		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
1247
		if (status & BD_ENET_TX_DEF)
1248
			ndev->stats.collisions++;
1249

S
Sascha Hauer 已提交
1250
		/* Free the sk buffer associated with this last transmit */
L
Linus Torvalds 已提交
1251
		dev_kfree_skb_any(skb);
1252
skb_done:
1253 1254 1255 1256
		/* Make sure the update to bdp and tx_skbuff are performed
		 * before dirty_tx
		 */
		wmb();
1257
		txq->dirty_tx = bdp;
1258

S
Sascha Hauer 已提交
1259
		/* Update pointer to next buffer descriptor to be transmitted */
T
Troy Kisky 已提交
1260
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1261

S
Sascha Hauer 已提交
1262
		/* Since we have freed up a buffer, the ring is no longer full
L
Linus Torvalds 已提交
1263
		 */
N
Nimrod Andy 已提交
1264
		if (netif_queue_stopped(ndev)) {
T
Troy Kisky 已提交
1265
			entries_free = fec_enet_get_free_txdesc_num(txq);
1266 1267
			if (entries_free >= txq->tx_wake_threshold)
				netif_tx_wake_queue(nq);
N
Nimrod Andy 已提交
1268
		}
L
Linus Torvalds 已提交
1269
	}
1270 1271

	/* ERR006538: Keep the transmitter going */
T
Troy Kisky 已提交
1272
	if (bdp != txq->bd.cur &&
1273 1274
	    readl(txq->bd.reg_desc_active) == 0)
		writel(0, txq->bd.reg_desc_active);
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
}

static void
fec_enet_tx(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	u16 queue_id;
	/* First process class A queue, then Class B and Best Effort queue */
	for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
		clear_bit(queue_id, &fep->work_tx);
		fec_enet_tx_queue(ndev, queue_id);
	}
	return;
L
Linus Torvalds 已提交
1288 1289
}

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
static int
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
	struct  fec_enet_private *fep = netdev_priv(ndev);
	int off;

	off = ((unsigned long)skb->data) & fep->rx_align;
	if (off)
		skb_reserve(skb, fep->rx_align + 1 - off);

1300 1301
	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
1302 1303 1304 1305 1306 1307 1308 1309 1310
		if (net_ratelimit())
			netdev_err(ndev, "Rx DMA memory map failed\n");
		return -ENOMEM;
	}

	return 0;
}

static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1311
			       struct bufdesc *bdp, u32 length, bool swap)
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
{
	struct  fec_enet_private *fep = netdev_priv(ndev);
	struct sk_buff *new_skb;

	if (length > fep->rx_copybreak)
		return false;

	new_skb = netdev_alloc_skb(ndev, length);
	if (!new_skb)
		return false;

1323 1324
	dma_sync_single_for_cpu(&fep->pdev->dev,
				fec32_to_cpu(bdp->cbd_bufaddr),
1325 1326
				FEC_ENET_RX_FRSIZE - fep->rx_align,
				DMA_FROM_DEVICE);
1327 1328 1329 1330
	if (!swap)
		memcpy(new_skb->data, (*skb)->data, length);
	else
		swap_buffer2(new_skb->data, (*skb)->data, length);
1331 1332 1333 1334 1335
	*skb = new_skb;

	return true;
}

T
Troy Kisky 已提交
1336
/* During a receive, the bd_rx.cur points to the current incoming buffer.
L
Linus Torvalds 已提交
1337 1338 1339 1340
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
1341
static int
1342
fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
L
Linus Torvalds 已提交
1343
{
1344
	struct fec_enet_private *fep = netdev_priv(ndev);
1345
	struct fec_enet_priv_rx_q *rxq;
S
Sascha Hauer 已提交
1346
	struct bufdesc *bdp;
1347
	unsigned short status;
1348 1349
	struct  sk_buff *skb_new = NULL;
	struct  sk_buff *skb;
L
Linus Torvalds 已提交
1350 1351
	ushort	pkt_len;
	__u8 *data;
1352
	int	pkt_received = 0;
1353 1354 1355
	struct	bufdesc_ex *ebdp = NULL;
	bool	vlan_packet_rcvd = false;
	u16	vlan_tag;
1356
	int	index = 0;
1357
	bool	is_copybreak;
1358
	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1359

1360 1361
#ifdef CONFIG_M532x
	flush_cache_all();
1362
#endif
1363 1364
	queue_id = FEC_ENET_GET_QUQUE(queue_id);
	rxq = fep->rx_queue[queue_id];
L
Linus Torvalds 已提交
1365 1366 1367 1368

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
T
Troy Kisky 已提交
1369
	bdp = rxq->bd.cur;
L
Linus Torvalds 已提交
1370

1371
	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
L
Linus Torvalds 已提交
1372

1373 1374 1375 1376
		if (pkt_received >= budget)
			break;
		pkt_received++;

1377
		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
1378

S
Sascha Hauer 已提交
1379
		/* Check for errors. */
T
Troy Kisky 已提交
1380
		status ^= BD_ENET_RX_LAST;
S
Sascha Hauer 已提交
1381
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
T
Troy Kisky 已提交
1382 1383
			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
			   BD_ENET_RX_CL)) {
1384
			ndev->stats.rx_errors++;
T
Troy Kisky 已提交
1385 1386 1387 1388 1389 1390 1391
			if (status & BD_ENET_RX_OV) {
				/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
				goto rx_processing_done;
			}
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
						| BD_ENET_RX_LAST)) {
S
Sascha Hauer 已提交
1392
				/* Frame too long or too short. */
1393
				ndev->stats.rx_length_errors++;
T
Troy Kisky 已提交
1394 1395
				if (status & BD_ENET_RX_LAST)
					netdev_err(ndev, "rcv is not +last\n");
S
Sascha Hauer 已提交
1396 1397
			}
			if (status & BD_ENET_RX_CR)	/* CRC Error */
1398
				ndev->stats.rx_crc_errors++;
T
Troy Kisky 已提交
1399 1400 1401
			/* Report late collisions as a frame error. */
			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				ndev->stats.rx_frame_errors++;
S
Sascha Hauer 已提交
1402 1403
			goto rx_processing_done;
		}
L
Linus Torvalds 已提交
1404

S
Sascha Hauer 已提交
1405
		/* Process the incoming frame. */
1406
		ndev->stats.rx_packets++;
1407
		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1408
		ndev->stats.rx_bytes += pkt_len;
L
Linus Torvalds 已提交
1409

T
Troy Kisky 已提交
1410
		index = fec_enet_get_bd_index(bdp, &rxq->bd);
1411
		skb = rxq->rx_skbuff[index];
1412

1413 1414 1415 1416
		/* The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
1417 1418
		is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
						  need_swap);
1419 1420 1421 1422 1423 1424
		if (!is_copybreak) {
			skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
			if (unlikely(!skb_new)) {
				ndev->stats.rx_dropped++;
				goto rx_processing_done;
			}
1425 1426
			dma_unmap_single(&fep->pdev->dev,
					 fec32_to_cpu(bdp->cbd_bufaddr),
1427 1428 1429 1430 1431 1432 1433
					 FEC_ENET_RX_FRSIZE - fep->rx_align,
					 DMA_FROM_DEVICE);
		}

		prefetch(skb->data - NET_IP_ALIGN);
		skb_put(skb, pkt_len - 4);
		data = skb->data;
1434 1435 1436 1437 1438 1439

#if !defined(CONFIG_M5272)
		if (fep->quirks & FEC_QUIRK_HAS_RACC)
			data = skb_pull_inline(skb, 2);
#endif

1440
		if (!is_copybreak && need_swap)
1441 1442
			swap_buffer(data, pkt_len);

1443 1444 1445 1446 1447 1448 1449 1450
		/* Extract the enhanced buffer descriptor */
		ebdp = NULL;
		if (fep->bufdesc_ex)
			ebdp = (struct bufdesc_ex *)bdp;

		/* If this is a VLAN packet remove the VLAN Tag */
		vlan_packet_rcvd = false;
		if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1451 1452
		    fep->bufdesc_ex &&
		    (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1453 1454 1455 1456 1457 1458
			/* Push and remove the vlan tag */
			struct vlan_hdr *vlan_header =
					(struct vlan_hdr *) (data + ETH_HLEN);
			vlan_tag = ntohs(vlan_header->h_vlan_TCI);

			vlan_packet_rcvd = true;
1459

1460
			memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1461
			skb_pull(skb, VLAN_HLEN);
1462 1463
		}

1464
		skb->protocol = eth_type_trans(skb, ndev);
L
Linus Torvalds 已提交
1465

1466 1467
		/* Get receive timestamp from the skb */
		if (fep->hwts_rx_en && fep->bufdesc_ex)
1468
			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1469 1470 1471 1472
					  skb_hwtstamps(skb));

		if (fep->bufdesc_ex &&
		    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1473
			if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1474 1475 1476 1477
				/* don't check it */
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			} else {
				skb_checksum_none_assert(skb);
1478
			}
1479
		}
1480

1481 1482 1483 1484 1485
		/* Handle received VLAN packets */
		if (vlan_packet_rcvd)
			__vlan_hwaccel_put_tag(skb,
					       htons(ETH_P_8021Q),
					       vlan_tag);
1486

1487 1488 1489
		napi_gro_receive(&fep->napi, skb);

		if (is_copybreak) {
1490 1491
			dma_sync_single_for_device(&fep->pdev->dev,
						   fec32_to_cpu(bdp->cbd_bufaddr),
1492 1493 1494 1495 1496
						   FEC_ENET_RX_FRSIZE - fep->rx_align,
						   DMA_FROM_DEVICE);
		} else {
			rxq->rx_skbuff[index] = skb_new;
			fec_enet_new_rxbdp(ndev, bdp, skb_new);
S
Sascha Hauer 已提交
1497
		}
S
Sascha Hauer 已提交
1498

S
Sascha Hauer 已提交
1499 1500 1501
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;
L
Linus Torvalds 已提交
1502

S
Sascha Hauer 已提交
1503 1504
		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
1505

1506 1507 1508
		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

1509
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1510 1511 1512
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}
1513 1514 1515 1516 1517
		/* Make sure the updates to rest of the descriptor are
		 * performed before transferring ownership.
		 */
		wmb();
		bdp->cbd_sc = cpu_to_fec16(status);
1518

S
Sascha Hauer 已提交
1519
		/* Update BD pointer to next entry */
T
Troy Kisky 已提交
1520
		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1521

S
Sascha Hauer 已提交
1522 1523 1524 1525
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
1526
		writel(0, rxq->bd.reg_desc_active);
S
Sascha Hauer 已提交
1527
	}
T
Troy Kisky 已提交
1528
	rxq->bd.cur = bdp;
1529 1530
	return pkt_received;
}
L
Linus Torvalds 已提交
1531

1532 1533 1534 1535 1536 1537 1538 1539
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	int     pkt_received = 0;
	u16	queue_id;
	struct fec_enet_private *fep = netdev_priv(ndev);

	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
1540 1541 1542
		int ret;

		ret = fec_enet_rx_queue(ndev,
1543
					budget - pkt_received, queue_id);
1544 1545 1546 1547 1548

		if (ret < budget - pkt_received)
			clear_bit(queue_id, &fep->work_rx);

		pkt_received += ret;
1549
	}
1550
	return pkt_received;
L
Linus Torvalds 已提交
1551 1552
}

1553 1554 1555 1556 1557 1558 1559 1560
static bool
fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
{
	if (int_events == 0)
		return false;

	if (int_events & FEC_ENET_RXF)
		fep->work_rx |= (1 << 2);
F
Frank Li 已提交
1561 1562 1563 1564
	if (int_events & FEC_ENET_RXF_1)
		fep->work_rx |= (1 << 0);
	if (int_events & FEC_ENET_RXF_2)
		fep->work_rx |= (1 << 1);
1565 1566 1567

	if (int_events & FEC_ENET_TXF)
		fep->work_tx |= (1 << 2);
F
Frank Li 已提交
1568 1569 1570 1571
	if (int_events & FEC_ENET_TXF_1)
		fep->work_tx |= (1 << 0);
	if (int_events & FEC_ENET_TXF_2)
		fep->work_tx |= (1 << 1);
1572 1573 1574 1575

	return true;
}

1576 1577 1578 1579 1580 1581 1582 1583
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct fec_enet_private *fep = netdev_priv(ndev);
	uint int_events;
	irqreturn_t ret = IRQ_NONE;

1584
	int_events = readl(fep->hwp + FEC_IEVENT);
N
Nimrod Andy 已提交
1585
	writel(int_events, fep->hwp + FEC_IEVENT);
1586
	fec_enet_collect_events(fep, int_events);
1587

1588
	if ((fep->work_tx || fep->work_rx) && fep->link) {
1589
		ret = IRQ_HANDLED;
1590

N
Nimrod Andy 已提交
1591 1592
		if (napi_schedule_prep(&fep->napi)) {
			/* Disable the NAPI interrupts */
1593
			writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
N
Nimrod Andy 已提交
1594 1595
			__napi_schedule(&fep->napi);
		}
1596
	}
1597

1598 1599 1600 1601
	if (int_events & FEC_ENET_MII) {
		ret = IRQ_HANDLED;
		complete(&fep->mdio_done);
	}
1602

1603 1604
	if (fep->ptp_clock)
		fec_ptp_check_pps_event(fep);
1605

1606 1607 1608
	return ret;
}

1609 1610 1611 1612
static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct fec_enet_private *fep = netdev_priv(ndev);
1613 1614 1615
	int pkts;

	pkts = fec_enet_rx(ndev, budget);
1616

1617 1618
	fec_enet_tx(ndev);

1619 1620 1621 1622 1623 1624
	if (pkts < budget) {
		napi_complete(napi);
		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
	}
	return pkts;
}
1625

1626
/* ------------------------------------------------------------------------- */
1627
static void fec_get_mac(struct net_device *ndev)
L
Linus Torvalds 已提交
1628
{
1629
	struct fec_enet_private *fep = netdev_priv(ndev);
J
Jingoo Han 已提交
1630
	struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1631
	unsigned char *iap, tmpaddr[ETH_ALEN];
L
Linus Torvalds 已提交
1632

1633 1634 1635 1636 1637 1638 1639 1640
	/*
	 * try to get mac address in following order:
	 *
	 * 1) module parameter via kernel command line in form
	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
	 */
	iap = macaddr;

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
	/*
	 * 2) from device tree data
	 */
	if (!is_valid_ether_addr(iap)) {
		struct device_node *np = fep->pdev->dev.of_node;
		if (np) {
			const char *mac = of_get_mac_address(np);
			if (mac)
				iap = (unsigned char *) mac;
		}
	}

1653
	/*
1654
	 * 3) from flash or fuse (via platform data)
1655 1656 1657 1658 1659 1660 1661
	 */
	if (!is_valid_ether_addr(iap)) {
#ifdef CONFIG_M5272
		if (FEC_FLASHMAC)
			iap = (unsigned char *)FEC_FLASHMAC;
#else
		if (pdata)
1662
			iap = (unsigned char *)&pdata->mac;
1663 1664 1665 1666
#endif
	}

	/*
1667
	 * 4) FEC mac registers set by bootloader
1668 1669
	 */
	if (!is_valid_ether_addr(iap)) {
1670 1671 1672 1673
		*((__be32 *) &tmpaddr[0]) =
			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
		*((__be16 *) &tmpaddr[4]) =
			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1674
		iap = &tmpaddr[0];
L
Linus Torvalds 已提交
1675 1676
	}

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
	/*
	 * 5) random mac address
	 */
	if (!is_valid_ether_addr(iap)) {
		/* Report it and use a random ethernet address instead */
		netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
		eth_hw_addr_random(ndev);
		netdev_info(ndev, "Using random MAC address: %pM\n",
			    ndev->dev_addr);
		return;
	}

1689
	memcpy(ndev->dev_addr, iap, ETH_ALEN);
L
Linus Torvalds 已提交
1690

1691 1692
	/* Adjust MAC if using macaddr */
	if (iap == macaddr)
S
Shawn Guo 已提交
1693
		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
L
Linus Torvalds 已提交
1694 1695
}

1696
/* ------------------------------------------------------------------------- */
L
Linus Torvalds 已提交
1697

1698 1699 1700
/*
 * Phy section
 */
1701
static void fec_enet_adjust_link(struct net_device *ndev)
L
Linus Torvalds 已提交
1702
{
1703
	struct fec_enet_private *fep = netdev_priv(ndev);
1704
	struct phy_device *phy_dev = ndev->phydev;
1705
	int status_change = 0;
L
Linus Torvalds 已提交
1706

1707 1708 1709
	/* Prevent a state halted on mii error */
	if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
		phy_dev->state = PHY_RESUMING;
1710
		return;
1711
	}
L
Linus Torvalds 已提交
1712

1713 1714 1715 1716 1717 1718 1719 1720
	/*
	 * If the netdev is down, or is going down, we're not interested
	 * in link state events, so just mark our idea of the link as down
	 * and ignore the event.
	 */
	if (!netif_running(ndev) || !netif_device_present(ndev)) {
		fep->link = 0;
	} else if (phy_dev->link) {
1721
		if (!fep->link) {
1722
			fep->link = phy_dev->link;
1723 1724
			status_change = 1;
		}
L
Linus Torvalds 已提交
1725

1726 1727
		if (fep->full_duplex != phy_dev->duplex) {
			fep->full_duplex = phy_dev->duplex;
1728
			status_change = 1;
1729
		}
1730 1731 1732 1733 1734 1735 1736

		if (phy_dev->speed != fep->speed) {
			fep->speed = phy_dev->speed;
			status_change = 1;
		}

		/* if any of the above changed restart the FEC */
1737 1738 1739
		if (status_change) {
			napi_disable(&fep->napi);
			netif_tx_lock_bh(ndev);
1740
			fec_restart(ndev);
1741
			netif_wake_queue(ndev);
1742
			netif_tx_unlock_bh(ndev);
1743 1744
			napi_enable(&fep->napi);
		}
1745 1746
	} else {
		if (fep->link) {
1747 1748
			napi_disable(&fep->napi);
			netif_tx_lock_bh(ndev);
1749
			fec_stop(ndev);
1750 1751
			netif_tx_unlock_bh(ndev);
			napi_enable(&fep->napi);
1752
			fep->link = phy_dev->link;
1753 1754
			status_change = 1;
		}
L
Linus Torvalds 已提交
1755
	}
1756

1757 1758 1759
	if (status_change)
		phy_print_status(phy_dev);
}
L
Linus Torvalds 已提交
1760

1761
static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
L
Linus Torvalds 已提交
1762
{
1763
	struct fec_enet_private *fep = bus->priv;
1764
	struct device *dev = &fep->pdev->dev;
1765
	unsigned long time_left;
1766 1767 1768
	int ret = 0;

	ret = pm_runtime_get_sync(dev);
1769
	if (ret < 0)
1770
		return ret;
L
Linus Torvalds 已提交
1771

1772
	fep->mii_timeout = 0;
1773
	reinit_completion(&fep->mdio_done);
1774 1775 1776 1777 1778 1779 1780

	/* start a read op */
	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);

	/* wait for end of transfer */
1781 1782 1783 1784
	time_left = wait_for_completion_timeout(&fep->mdio_done,
			usecs_to_jiffies(FEC_MII_TIMEOUT));
	if (time_left == 0) {
		fep->mii_timeout = 1;
1785
		netdev_err(fep->netdev, "MDIO read timeout\n");
1786 1787
		ret = -ETIMEDOUT;
		goto out;
L
Linus Torvalds 已提交
1788 1789
	}

1790 1791 1792 1793 1794 1795 1796
	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));

out:
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return ret;
1797
}
1798

1799 1800
static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
			   u16 value)
L
Linus Torvalds 已提交
1801
{
1802
	struct fec_enet_private *fep = bus->priv;
1803
	struct device *dev = &fep->pdev->dev;
1804
	unsigned long time_left;
1805
	int ret;
1806 1807

	ret = pm_runtime_get_sync(dev);
1808
	if (ret < 0)
1809
		return ret;
1810 1811
	else
		ret = 0;
L
Linus Torvalds 已提交
1812

1813
	fep->mii_timeout = 0;
1814
	reinit_completion(&fep->mdio_done);
L
Linus Torvalds 已提交
1815

1816 1817
	/* start a write op */
	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1818 1819 1820 1821 1822
		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
		FEC_MMFR_TA | FEC_MMFR_DATA(value),
		fep->hwp + FEC_MII_DATA);

	/* wait for end of transfer */
1823 1824 1825 1826
	time_left = wait_for_completion_timeout(&fep->mdio_done,
			usecs_to_jiffies(FEC_MII_TIMEOUT));
	if (time_left == 0) {
		fep->mii_timeout = 1;
1827
		netdev_err(fep->netdev, "MDIO write timeout\n");
1828
		ret  = -ETIMEDOUT;
1829
	}
L
Linus Torvalds 已提交
1830

1831 1832 1833 1834
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);

	return ret;
1835
}
L
Linus Torvalds 已提交
1836

1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int ret;

	if (enable) {
		ret = clk_prepare_enable(fep->clk_ahb);
		if (ret)
			return ret;
		if (fep->clk_enet_out) {
			ret = clk_prepare_enable(fep->clk_enet_out);
			if (ret)
				goto failed_clk_enet_out;
		}
		if (fep->clk_ptp) {
1852
			mutex_lock(&fep->ptp_clk_mutex);
1853
			ret = clk_prepare_enable(fep->clk_ptp);
1854 1855
			if (ret) {
				mutex_unlock(&fep->ptp_clk_mutex);
1856
				goto failed_clk_ptp;
1857 1858 1859 1860
			} else {
				fep->ptp_clk_on = true;
			}
			mutex_unlock(&fep->ptp_clk_mutex);
1861
		}
1862 1863 1864 1865 1866
		if (fep->clk_ref) {
			ret = clk_prepare_enable(fep->clk_ref);
			if (ret)
				goto failed_clk_ref;
		}
1867 1868 1869 1870
	} else {
		clk_disable_unprepare(fep->clk_ahb);
		if (fep->clk_enet_out)
			clk_disable_unprepare(fep->clk_enet_out);
1871 1872
		if (fep->clk_ptp) {
			mutex_lock(&fep->ptp_clk_mutex);
1873
			clk_disable_unprepare(fep->clk_ptp);
1874 1875 1876
			fep->ptp_clk_on = false;
			mutex_unlock(&fep->ptp_clk_mutex);
		}
1877 1878
		if (fep->clk_ref)
			clk_disable_unprepare(fep->clk_ref);
1879 1880 1881
	}

	return 0;
1882 1883 1884 1885

failed_clk_ref:
	if (fep->clk_ref)
		clk_disable_unprepare(fep->clk_ref);
1886 1887 1888 1889 1890 1891 1892 1893 1894
failed_clk_ptp:
	if (fep->clk_enet_out)
		clk_disable_unprepare(fep->clk_enet_out);
failed_clk_enet_out:
		clk_disable_unprepare(fep->clk_ahb);

	return ret;
}

1895
static int fec_enet_mii_probe(struct net_device *ndev)
1896
{
1897
	struct fec_enet_private *fep = netdev_priv(ndev);
1898
	struct phy_device *phy_dev = NULL;
1899 1900 1901
	char mdio_bus_id[MII_BUS_ID_SIZE];
	char phy_name[MII_BUS_ID_SIZE + 3];
	int phy_id;
S
Shawn Guo 已提交
1902
	int dev_id = fep->dev_id;
1903

1904 1905 1906 1907
	if (fep->phy_node) {
		phy_dev = of_phy_connect(ndev, fep->phy_node,
					 &fec_enet_adjust_link, 0,
					 fep->phy_interface);
1908 1909
		if (!phy_dev)
			return -ENODEV;
1910 1911 1912
	} else {
		/* check for attached phy */
		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1913
			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
1914 1915 1916
				continue;
			if (dev_id--)
				continue;
1917
			strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1918 1919
			break;
		}
L
Linus Torvalds 已提交
1920

1921 1922
		if (phy_id >= PHY_MAX_ADDR) {
			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
1923
			strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
1924 1925 1926 1927 1928 1929 1930
			phy_id = 0;
		}

		snprintf(phy_name, sizeof(phy_name),
			 PHY_ID_FMT, mdio_bus_id, phy_id);
		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
				      fep->phy_interface);
1931 1932 1933
	}

	if (IS_ERR(phy_dev)) {
1934
		netdev_err(ndev, "could not attach to PHY\n");
1935
		return PTR_ERR(phy_dev);
1936
	}
L
Linus Torvalds 已提交
1937

1938
	/* mask with MAC supported features */
1939
	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
S
Shawn Guo 已提交
1940
		phy_dev->supported &= PHY_GBIT_FEATURES;
1941
		phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
G
Guenter Roeck 已提交
1942
#if !defined(CONFIG_M5272)
1943
		phy_dev->supported |= SUPPORTED_Pause;
G
Guenter Roeck 已提交
1944
#endif
1945
	}
S
Shawn Guo 已提交
1946 1947 1948
	else
		phy_dev->supported &= PHY_BASIC_FEATURES;

1949
	phy_dev->advertising = phy_dev->supported;
L
Linus Torvalds 已提交
1950

1951 1952
	fep->link = 0;
	fep->full_duplex = 0;
L
Linus Torvalds 已提交
1953

1954
	phy_attached_info(phy_dev);
1955

1956
	return 0;
L
Linus Torvalds 已提交
1957 1958
}

1959
static int fec_enet_mii_init(struct platform_device *pdev)
1960
{
1961
	static struct mii_bus *fec0_mii_bus;
1962 1963
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct fec_enet_private *fep = netdev_priv(ndev);
1964
	struct device_node *node;
1965
	int err = -ENXIO;
1966
	u32 mii_speed, holdtime;
1967

1968
	/*
1969
	 * The i.MX28 dual fec interfaces are not equal.
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
	 * Here are the differences:
	 *
	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
	 *  - fec0 acts as the 1588 time master while fec1 is slave
	 *  - external phys can only be configured by fec0
	 *
	 * That is to say fec1 can not work independently. It only works
	 * when fec0 is working. The reason behind this design is that the
	 * second interface is added primarily for Switch mode.
	 *
	 * Because of the last point above, both phys are attached on fec0
	 * mdio interface in board design, and need to be configured by
	 * fec0 mii_bus.
	 */
1984
	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
1985
		/* fec1 uses fec0 mii_bus */
L
Lothar Waßmann 已提交
1986 1987 1988 1989 1990 1991
		if (mii_cnt && fec0_mii_bus) {
			fep->mii_bus = fec0_mii_bus;
			mii_cnt++;
			return 0;
		}
		return -ENOENT;
1992 1993
	}

1994
	fep->mii_timeout = 0;
L
Linus Torvalds 已提交
1995

1996 1997
	/*
	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
S
Shawn Guo 已提交
1998 1999 2000 2001 2002
	 *
	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
	 * document.
2003
	 */
2004
	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
2005
	if (fep->quirks & FEC_QUIRK_ENET_MAC)
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030
		mii_speed--;
	if (mii_speed > 63) {
		dev_err(&pdev->dev,
			"fec clock (%lu) to fast to get right mii speed\n",
			clk_get_rate(fep->clk_ipg));
		err = -EINVAL;
		goto err_out;
	}

	/*
	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
	 * versions are RAZ there, so just ignore the difference and write the
	 * register always.
	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
	 * output.
	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
	 * holdtime cannot result in a value greater than 3.
	 */
	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;

	fep->phy_speed = mii_speed << 1 | holdtime << 8;

2031
	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
L
Linus Torvalds 已提交
2032

2033 2034 2035 2036
	fep->mii_bus = mdiobus_alloc();
	if (fep->mii_bus == NULL) {
		err = -ENOMEM;
		goto err_out;
L
Linus Torvalds 已提交
2037 2038
	}

2039 2040 2041
	fep->mii_bus->name = "fec_enet_mii_bus";
	fep->mii_bus->read = fec_enet_mdio_read;
	fep->mii_bus->write = fec_enet_mdio_write;
2042 2043
	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
		pdev->name, fep->dev_id + 1);
2044 2045 2046
	fep->mii_bus->priv = fep;
	fep->mii_bus->parent = &pdev->dev;

2047 2048 2049 2050 2051 2052 2053 2054 2055
	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
	if (node) {
		err = of_mdiobus_register(fep->mii_bus, node);
		of_node_put(node);
	} else {
		err = mdiobus_register(fep->mii_bus);
	}

	if (err)
2056
		goto err_out_free_mdiobus;
L
Linus Torvalds 已提交
2057

L
Lothar Waßmann 已提交
2058 2059
	mii_cnt++;

2060
	/* save fec0 mii_bus */
2061
	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2062 2063
		fec0_mii_bus = fep->mii_bus;

2064
	return 0;
L
Linus Torvalds 已提交
2065

2066 2067 2068 2069
err_out_free_mdiobus:
	mdiobus_free(fep->mii_bus);
err_out:
	return err;
L
Linus Torvalds 已提交
2070 2071
}

2072
static void fec_enet_mii_remove(struct fec_enet_private *fep)
L
Linus Torvalds 已提交
2073
{
L
Lothar Waßmann 已提交
2074 2075 2076 2077
	if (--mii_cnt == 0) {
		mdiobus_unregister(fep->mii_bus);
		mdiobus_free(fep->mii_bus);
	}
L
Linus Torvalds 已提交
2078 2079
}

2080
static void fec_enet_get_drvinfo(struct net_device *ndev,
2081
				 struct ethtool_drvinfo *info)
L
Linus Torvalds 已提交
2082
{
2083
	struct fec_enet_private *fep = netdev_priv(ndev);
2084

2085 2086 2087 2088
	strlcpy(info->driver, fep->pdev->dev.driver->name,
		sizeof(info->driver));
	strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
L
Linus Torvalds 已提交
2089 2090
}

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
static int fec_enet_get_regs_len(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct resource *r;
	int s = 0;

	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
	if (r)
		s = resource_size(r);

	return s;
}

/* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2106
	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
static u32 fec_enet_register_offset[] = {
	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
	RMON_T_P_GTE2048, RMON_T_OCTETS,
	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
	RMON_R_P_GTE2048, RMON_R_OCTETS,
	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
#else
static u32 fec_enet_register_offset[] = {
	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
};
#endif

static void fec_enet_get_regs(struct net_device *ndev,
			      struct ethtool_regs *regs, void *regbuf)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
	u32 *buf = (u32 *)regbuf;
	u32 i, off;

	memset(buf, 0, regs->len);

	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
		off = fec_enet_register_offset[i] / 4;
		buf[off] = readl(&theregs[off]);
	}
}

2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
static int fec_enet_get_ts_info(struct net_device *ndev,
				struct ethtool_ts_info *info)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (fep->bufdesc_ex) {

		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
					SOF_TIMESTAMPING_RX_SOFTWARE |
					SOF_TIMESTAMPING_SOFTWARE |
					SOF_TIMESTAMPING_TX_HARDWARE |
					SOF_TIMESTAMPING_RX_HARDWARE |
					SOF_TIMESTAMPING_RAW_HARDWARE;
		if (fep->ptp_clock)
			info->phc_index = ptp_clock_index(fep->ptp_clock);
		else
			info->phc_index = -1;

		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
				 (1 << HWTSTAMP_TX_ON);

		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
				   (1 << HWTSTAMP_FILTER_ALL);
		return 0;
	} else {
		return ethtool_op_get_ts_info(ndev, info);
	}
}

G
Guenter Roeck 已提交
2195 2196
#if !defined(CONFIG_M5272)

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
static void fec_enet_get_pauseparam(struct net_device *ndev,
				    struct ethtool_pauseparam *pause)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
	pause->rx_pause = pause->tx_pause;
}

static int fec_enet_set_pauseparam(struct net_device *ndev,
				   struct ethtool_pauseparam *pause)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

2212
	if (!ndev->phydev)
2213 2214
		return -ENODEV;

2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
	if (pause->tx_pause != pause->rx_pause) {
		netdev_info(ndev,
			"hardware only support enable/disable both tx and rx");
		return -EINVAL;
	}

	fep->pause_flag = 0;

	/* tx pause must be same as rx pause */
	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;

	if (pause->rx_pause || pause->autoneg) {
2228 2229
		ndev->phydev->supported |= ADVERTISED_Pause;
		ndev->phydev->advertising |= ADVERTISED_Pause;
2230
	} else {
2231 2232
		ndev->phydev->supported &= ~ADVERTISED_Pause;
		ndev->phydev->advertising &= ~ADVERTISED_Pause;
2233 2234 2235 2236 2237
	}

	if (pause->autoneg) {
		if (netif_running(ndev))
			fec_stop(ndev);
2238
		phy_start_aneg(ndev->phydev);
2239
	}
2240 2241 2242
	if (netif_running(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
2243
		fec_restart(ndev);
2244
		netif_wake_queue(ndev);
2245
		netif_tx_unlock_bh(ndev);
2246 2247
		napi_enable(&fep->napi);
	}
2248 2249 2250 2251

	return 0;
}

2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
static const struct fec_stat {
	char name[ETH_GSTRING_LEN];
	u16 offset;
} fec_stats[] = {
	/* RMON TX */
	{ "tx_dropped", RMON_T_DROP },
	{ "tx_packets", RMON_T_PACKETS },
	{ "tx_broadcast", RMON_T_BC_PKT },
	{ "tx_multicast", RMON_T_MC_PKT },
	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
	{ "tx_undersize", RMON_T_UNDERSIZE },
	{ "tx_oversize", RMON_T_OVERSIZE },
	{ "tx_fragment", RMON_T_FRAG },
	{ "tx_jabber", RMON_T_JAB },
	{ "tx_collision", RMON_T_COL },
	{ "tx_64byte", RMON_T_P64 },
	{ "tx_65to127byte", RMON_T_P65TO127 },
	{ "tx_128to255byte", RMON_T_P128TO255 },
	{ "tx_256to511byte", RMON_T_P256TO511 },
	{ "tx_512to1023byte", RMON_T_P512TO1023 },
	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
	{ "tx_octets", RMON_T_OCTETS },

	/* IEEE TX */
	{ "IEEE_tx_drop", IEEE_T_DROP },
	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
	{ "IEEE_tx_1col", IEEE_T_1COL },
	{ "IEEE_tx_mcol", IEEE_T_MCOL },
	{ "IEEE_tx_def", IEEE_T_DEF },
	{ "IEEE_tx_lcol", IEEE_T_LCOL },
	{ "IEEE_tx_excol", IEEE_T_EXCOL },
	{ "IEEE_tx_macerr", IEEE_T_MACERR },
	{ "IEEE_tx_cserr", IEEE_T_CSERR },
	{ "IEEE_tx_sqe", IEEE_T_SQE },
	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },

	/* RMON RX */
	{ "rx_packets", RMON_R_PACKETS },
	{ "rx_broadcast", RMON_R_BC_PKT },
	{ "rx_multicast", RMON_R_MC_PKT },
	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
	{ "rx_undersize", RMON_R_UNDERSIZE },
	{ "rx_oversize", RMON_R_OVERSIZE },
	{ "rx_fragment", RMON_R_FRAG },
	{ "rx_jabber", RMON_R_JAB },
	{ "rx_64byte", RMON_R_P64 },
	{ "rx_65to127byte", RMON_R_P65TO127 },
	{ "rx_128to255byte", RMON_R_P128TO255 },
	{ "rx_256to511byte", RMON_R_P256TO511 },
	{ "rx_512to1023byte", RMON_R_P512TO1023 },
	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
	{ "rx_octets", RMON_R_OCTETS },

	/* IEEE RX */
	{ "IEEE_rx_drop", IEEE_R_DROP },
	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
	{ "IEEE_rx_crc", IEEE_R_CRC },
	{ "IEEE_rx_align", IEEE_R_ALIGN },
	{ "IEEE_rx_macerr", IEEE_R_MACERR },
	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};

static void fec_enet_get_ethtool_stats(struct net_device *dev,
	struct ethtool_stats *stats, u64 *data)
{
	struct fec_enet_private *fep = netdev_priv(dev);
	int i;

	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
		data[i] = readl(fep->hwp + fec_stats[i].offset);
}

static void fec_enet_get_strings(struct net_device *netdev,
	u32 stringset, u8 *data)
{
	int i;
	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
				fec_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return ARRAY_SIZE(fec_stats);
	default:
		return -EOPNOTSUPP;
	}
}
G
Guenter Roeck 已提交
2350
#endif /* !defined(CONFIG_M5272) */
2351

2352 2353
static int fec_enet_nway_reset(struct net_device *dev)
{
2354
	struct phy_device *phydev = dev->phydev;
2355 2356 2357 2358 2359 2360 2361

	if (!phydev)
		return -ENODEV;

	return genphy_restart_aneg(phydev);
}

2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
/* ITR clock source is enet system clock (clk_ahb).
 * TCTT unit is cycle_ns * 64 cycle
 * So, the ICTT value = X us / (cycle_ns * 64)
 */
static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	return us * (fep->itr_clk_rate / 64000) / 1000;
}

/* Set threshold for interrupt coalescing */
static void fec_enet_itr_coal_set(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int rx_itr, tx_itr;

	/* Must be greater than zero to avoid unpredictable behavior */
	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
	    !fep->tx_time_itr || !fep->tx_pkts_itr)
		return;

	/* Select enet system clock as Interrupt Coalescing
	 * timer Clock Source
	 */
	rx_itr = FEC_ITR_CLK_SEL;
	tx_itr = FEC_ITR_CLK_SEL;

	/* set ICFT and ICTT */
	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));

	rx_itr |= FEC_ITR_EN;
	tx_itr |= FEC_ITR_EN;

	writel(tx_itr, fep->hwp + FEC_TXIC0);
	writel(rx_itr, fep->hwp + FEC_RXIC0);
2401 2402 2403 2404 2405 2406
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
		writel(tx_itr, fep->hwp + FEC_TXIC1);
		writel(rx_itr, fep->hwp + FEC_RXIC1);
		writel(tx_itr, fep->hwp + FEC_TXIC2);
		writel(rx_itr, fep->hwp + FEC_RXIC2);
	}
2407 2408 2409 2410 2411 2412 2413
}

static int
fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

2414
	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
		return -EOPNOTSUPP;

	ec->rx_coalesce_usecs = fep->rx_time_itr;
	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;

	ec->tx_coalesce_usecs = fep->tx_time_itr;
	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;

	return 0;
}

static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int cycle;

2432
	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
2433 2434 2435
		return -EOPNOTSUPP;

	if (ec->rx_max_coalesced_frames > 255) {
2436
		pr_err("Rx coalesced frames exceed hardware limitation\n");
2437 2438 2439 2440
		return -EINVAL;
	}

	if (ec->tx_max_coalesced_frames > 255) {
2441
		pr_err("Tx coalesced frame exceed hardware limitation\n");
2442 2443 2444 2445 2446
		return -EINVAL;
	}

	cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
	if (cycle > 0xFFFF) {
2447
		pr_err("Rx coalesced usec exceed hardware limitation\n");
2448 2449 2450 2451 2452
		return -EINVAL;
	}

	cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
	if (cycle > 0xFFFF) {
2453
		pr_err("Rx coalesced usec exceed hardware limitation\n");
2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
		return -EINVAL;
	}

	fep->rx_time_itr = ec->rx_coalesce_usecs;
	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;

	fep->tx_time_itr = ec->tx_coalesce_usecs;
	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;

	fec_enet_itr_coal_set(ndev);

	return 0;
}

static void fec_enet_itr_coal_init(struct net_device *ndev)
{
	struct ethtool_coalesce ec;

	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;

	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;

	fec_enet_set_coalesce(ndev, &ec);
}

2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
static int fec_enet_get_tunable(struct net_device *netdev,
				const struct ethtool_tunable *tuna,
				void *data)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		*(u32 *)data = fep->rx_copybreak;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int fec_enet_set_tunable(struct net_device *netdev,
				const struct ethtool_tunable *tuna,
				const void *data)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		fep->rx_copybreak = *(u32 *)data;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

N
Nimrod Andy 已提交
2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
static void
fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
		wol->supported = WAKE_MAGIC;
		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
	} else {
		wol->supported = wol->wolopts = 0;
	}
}

static int
fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct fec_enet_private *fep = netdev_priv(ndev);

	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
		return -EINVAL;

	if (wol->wolopts & ~WAKE_MAGIC)
		return -EINVAL;

	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
	if (device_may_wakeup(&ndev->dev)) {
		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
		if (fep->irq[0] > 0)
			enable_irq_wake(fep->irq[0]);
	} else {
		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
		if (fep->irq[0] > 0)
			disable_irq_wake(fep->irq[0]);
	}

	return 0;
}

S
stephen hemminger 已提交
2557
static const struct ethtool_ops fec_enet_ethtool_ops = {
2558
	.get_drvinfo		= fec_enet_get_drvinfo,
2559 2560
	.get_regs_len		= fec_enet_get_regs_len,
	.get_regs		= fec_enet_get_regs,
2561
	.nway_reset		= fec_enet_nway_reset,
2562
	.get_link		= ethtool_op_get_link,
2563 2564
	.get_coalesce		= fec_enet_get_coalesce,
	.set_coalesce		= fec_enet_set_coalesce,
2565
#ifndef CONFIG_M5272
2566 2567
	.get_pauseparam		= fec_enet_get_pauseparam,
	.set_pauseparam		= fec_enet_set_pauseparam,
2568
	.get_strings		= fec_enet_get_strings,
2569
	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
2570 2571
	.get_sset_count		= fec_enet_get_sset_count,
#endif
2572
	.get_ts_info		= fec_enet_get_ts_info,
2573 2574
	.get_tunable		= fec_enet_get_tunable,
	.set_tunable		= fec_enet_set_tunable,
N
Nimrod Andy 已提交
2575 2576
	.get_wol		= fec_enet_get_wol,
	.set_wol		= fec_enet_set_wol,
2577 2578
	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2579
};
L
Linus Torvalds 已提交
2580

2581
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
L
Linus Torvalds 已提交
2582
{
2583
	struct fec_enet_private *fep = netdev_priv(ndev);
2584
	struct phy_device *phydev = ndev->phydev;
L
Linus Torvalds 已提交
2585

2586
	if (!netif_running(ndev))
2587
		return -EINVAL;
L
Linus Torvalds 已提交
2588

2589 2590 2591
	if (!phydev)
		return -ENODEV;

2592 2593 2594 2595 2596 2597
	if (fep->bufdesc_ex) {
		if (cmd == SIOCSHWTSTAMP)
			return fec_ptp_set(ndev, rq);
		if (cmd == SIOCGHWTSTAMP)
			return fec_ptp_get(ndev, rq);
	}
2598

2599
	return phy_mii_ioctl(phydev, rq, cmd);
L
Linus Torvalds 已提交
2600 2601
}

2602
static void fec_enet_free_buffers(struct net_device *ndev)
S
Sascha Hauer 已提交
2603
{
2604
	struct fec_enet_private *fep = netdev_priv(ndev);
2605
	unsigned int i;
S
Sascha Hauer 已提交
2606 2607
	struct sk_buff *skb;
	struct bufdesc	*bdp;
2608 2609
	struct fec_enet_priv_tx_q *txq;
	struct fec_enet_priv_rx_q *rxq;
2610 2611 2612 2613
	unsigned int q;

	for (q = 0; q < fep->num_rx_queues; q++) {
		rxq = fep->rx_queue[q];
T
Troy Kisky 已提交
2614 2615
		bdp = rxq->bd.base;
		for (i = 0; i < rxq->bd.ring_size; i++) {
2616 2617 2618 2619
			skb = rxq->rx_skbuff[i];
			rxq->rx_skbuff[i] = NULL;
			if (skb) {
				dma_unmap_single(&fep->pdev->dev,
2620
						 fec32_to_cpu(bdp->cbd_bufaddr),
2621
						 FEC_ENET_RX_FRSIZE - fep->rx_align,
2622 2623 2624
						 DMA_FROM_DEVICE);
				dev_kfree_skb(skb);
			}
T
Troy Kisky 已提交
2625
			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
2626 2627
		}
	}
2628

2629 2630
	for (q = 0; q < fep->num_tx_queues; q++) {
		txq = fep->tx_queue[q];
T
Troy Kisky 已提交
2631 2632
		bdp = txq->bd.base;
		for (i = 0; i < txq->bd.ring_size; i++) {
2633 2634 2635 2636
			kfree(txq->tx_bounce[i]);
			txq->tx_bounce[i] = NULL;
			skb = txq->tx_skbuff[i];
			txq->tx_skbuff[i] = NULL;
S
Sascha Hauer 已提交
2637
			dev_kfree_skb(skb);
2638
		}
S
Sascha Hauer 已提交
2639
	}
2640
}
S
Sascha Hauer 已提交
2641

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
static void fec_enet_free_queue(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;
	struct fec_enet_priv_tx_q *txq;

	for (i = 0; i < fep->num_tx_queues; i++)
		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
			txq = fep->tx_queue[i];
			dma_free_coherent(NULL,
T
Troy Kisky 已提交
2652
					  txq->bd.ring_size * TSO_HEADER_SIZE,
2653 2654 2655 2656 2657
					  txq->tso_hdrs,
					  txq->tso_hdrs_dma);
		}

	for (i = 0; i < fep->num_rx_queues; i++)
2658
		kfree(fep->rx_queue[i]);
2659
	for (i = 0; i < fep->num_tx_queues; i++)
2660
		kfree(fep->tx_queue[i]);
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677
}

static int fec_enet_alloc_queue(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	int i;
	int ret = 0;
	struct fec_enet_priv_tx_q *txq;

	for (i = 0; i < fep->num_tx_queues; i++) {
		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
		if (!txq) {
			ret = -ENOMEM;
			goto alloc_failed;
		}

		fep->tx_queue[i] = txq;
T
Troy Kisky 已提交
2678 2679
		txq->bd.ring_size = TX_RING_SIZE;
		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
2680 2681 2682

		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
		txq->tx_wake_threshold =
T
Troy Kisky 已提交
2683
			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
2684 2685

		txq->tso_hdrs = dma_alloc_coherent(NULL,
T
Troy Kisky 已提交
2686
					txq->bd.ring_size * TSO_HEADER_SIZE,
2687 2688 2689 2690 2691 2692
					&txq->tso_hdrs_dma,
					GFP_KERNEL);
		if (!txq->tso_hdrs) {
			ret = -ENOMEM;
			goto alloc_failed;
		}
2693
	}
2694 2695 2696 2697 2698 2699 2700 2701 2702

	for (i = 0; i < fep->num_rx_queues; i++) {
		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
					   GFP_KERNEL);
		if (!fep->rx_queue[i]) {
			ret = -ENOMEM;
			goto alloc_failed;
		}

T
Troy Kisky 已提交
2703 2704
		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
2705 2706 2707 2708 2709 2710
	}
	return ret;

alloc_failed:
	fec_enet_free_queue(ndev);
	return ret;
S
Sascha Hauer 已提交
2711 2712
}

2713 2714
static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
S
Sascha Hauer 已提交
2715
{
2716
	struct fec_enet_private *fep = netdev_priv(ndev);
2717
	unsigned int i;
S
Sascha Hauer 已提交
2718 2719
	struct sk_buff *skb;
	struct bufdesc	*bdp;
2720
	struct fec_enet_priv_rx_q *rxq;
S
Sascha Hauer 已提交
2721

2722
	rxq = fep->rx_queue[queue];
T
Troy Kisky 已提交
2723 2724
	bdp = rxq->bd.base;
	for (i = 0; i < rxq->bd.ring_size; i++) {
2725
		skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
2726 2727
		if (!skb)
			goto err_alloc;
S
Sascha Hauer 已提交
2728

2729
		if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
2730
			dev_kfree_skb(skb);
2731
			goto err_alloc;
2732
		}
2733

2734
		rxq->rx_skbuff[i] = skb;
2735
		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
2736 2737 2738

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2739
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
2740 2741
		}

T
Troy Kisky 已提交
2742
		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
S
Sascha Hauer 已提交
2743 2744 2745
	}

	/* Set the last buffer to wrap. */
T
Troy Kisky 已提交
2746
	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
2747
	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
2748
	return 0;
S
Sascha Hauer 已提交
2749

2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763
 err_alloc:
	fec_enet_free_buffers(ndev);
	return -ENOMEM;
}

static int
fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int i;
	struct bufdesc  *bdp;
	struct fec_enet_priv_tx_q *txq;

	txq = fep->tx_queue[queue];
T
Troy Kisky 已提交
2764 2765
	bdp = txq->bd.base;
	for (i = 0; i < txq->bd.ring_size; i++) {
2766 2767
		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
		if (!txq->tx_bounce[i])
2768
			goto err_alloc;
S
Sascha Hauer 已提交
2769

2770 2771
		bdp->cbd_sc = cpu_to_fec16(0);
		bdp->cbd_bufaddr = cpu_to_fec32(0);
2772

2773 2774
		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
2775
			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
2776 2777
		}

T
Troy Kisky 已提交
2778
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
S
Sascha Hauer 已提交
2779 2780 2781
	}

	/* Set the last buffer to wrap. */
T
Troy Kisky 已提交
2782
	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
2783
	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
S
Sascha Hauer 已提交
2784 2785

	return 0;
2786 2787 2788 2789

 err_alloc:
	fec_enet_free_buffers(ndev);
	return -ENOMEM;
S
Sascha Hauer 已提交
2790 2791
}

2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806
static int fec_enet_alloc_buffers(struct net_device *ndev)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	unsigned int i;

	for (i = 0; i < fep->num_rx_queues; i++)
		if (fec_enet_alloc_rxq_buffers(ndev, i))
			return -ENOMEM;

	for (i = 0; i < fep->num_tx_queues; i++)
		if (fec_enet_alloc_txq_buffers(ndev, i))
			return -ENOMEM;
	return 0;
}

L
Linus Torvalds 已提交
2807
static int
2808
fec_enet_open(struct net_device *ndev)
L
Linus Torvalds 已提交
2809
{
2810
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
2811
	int ret;
L
Linus Torvalds 已提交
2812

2813
	ret = pm_runtime_get_sync(&fep->pdev->dev);
2814
	if (ret < 0)
2815 2816
		return ret;

N
Nimrod Andy 已提交
2817
	pinctrl_pm_select_default_state(&fep->pdev->dev);
2818 2819
	ret = fec_enet_clk_enable(ndev, true);
	if (ret)
2820
		goto clk_enable;
2821

L
Linus Torvalds 已提交
2822 2823 2824 2825
	/* I should reset the ring buffers here, but I don't yet know
	 * a simple way to do that.
	 */

2826
	ret = fec_enet_alloc_buffers(ndev);
S
Sascha Hauer 已提交
2827
	if (ret)
2828
		goto err_enet_alloc;
S
Sascha Hauer 已提交
2829

2830 2831 2832
	/* Init MAC prior to mii bus probe */
	fec_restart(ndev);

2833
	/* Probe and connect to PHY when open the interface */
2834
	ret = fec_enet_mii_probe(ndev);
2835 2836
	if (ret)
		goto err_enet_mii_probe;
2837

2838 2839 2840
	if (fep->quirks & FEC_QUIRK_ERR006687)
		imx6q_cpuidle_fec_irqs_used();

2841
	napi_enable(&fep->napi);
2842
	phy_start(ndev->phydev);
2843 2844
	netif_tx_start_all_queues(ndev);

N
Nimrod Andy 已提交
2845 2846 2847
	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
				 FEC_WOL_FLAG_ENABLE);

S
Sascha Hauer 已提交
2848
	return 0;
2849 2850 2851 2852 2853

err_enet_mii_probe:
	fec_enet_free_buffers(ndev);
err_enet_alloc:
	fec_enet_clk_enable(ndev, false);
2854 2855 2856
clk_enable:
	pm_runtime_mark_last_busy(&fep->pdev->dev);
	pm_runtime_put_autosuspend(&fep->pdev->dev);
2857 2858
	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
	return ret;
L
Linus Torvalds 已提交
2859 2860 2861
}

static int
2862
fec_enet_close(struct net_device *ndev)
L
Linus Torvalds 已提交
2863
{
2864
	struct fec_enet_private *fep = netdev_priv(ndev);
L
Linus Torvalds 已提交
2865

2866
	phy_stop(ndev->phydev);
2867

2868 2869 2870
	if (netif_device_present(ndev)) {
		napi_disable(&fep->napi);
		netif_tx_disable(ndev);
2871
		fec_stop(ndev);
2872
	}
L
Linus Torvalds 已提交
2873

2874
	phy_disconnect(ndev->phydev);
2875

2876 2877 2878
	if (fep->quirks & FEC_QUIRK_ERR006687)
		imx6q_cpuidle_fec_irqs_unused();

2879
	fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
2880
	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2881 2882 2883
	pm_runtime_mark_last_busy(&fep->pdev->dev);
	pm_runtime_put_autosuspend(&fep->pdev->dev);

2884
	fec_enet_free_buffers(ndev);
S
Sascha Hauer 已提交
2885

L
Linus Torvalds 已提交
2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898
	return 0;
}

/* Set or clear the multicast filter for this adaptor.
 * Skeleton taken from sunlance driver.
 * The CPM Ethernet implementation allows Multicast as well as individual
 * MAC address filtering.  Some of the drivers check to make sure it is
 * a group multicast address, and discard those that are not.  I guess I
 * will do the same for now, but just remove the test if you want
 * individual filtering as well (do the upper net layers want or support
 * this kind of feature?).
 */

2899
#define FEC_HASH_BITS	6		/* #bits in hash */
L
Linus Torvalds 已提交
2900 2901
#define CRC32_POLY	0xEDB88320

2902
static void set_multicast_list(struct net_device *ndev)
L
Linus Torvalds 已提交
2903
{
2904
	struct fec_enet_private *fep = netdev_priv(ndev);
2905
	struct netdev_hw_addr *ha;
2906
	unsigned int i, bit, data, crc, tmp;
L
Linus Torvalds 已提交
2907 2908
	unsigned char hash;

2909
	if (ndev->flags & IFF_PROMISC) {
S
Sascha Hauer 已提交
2910 2911 2912
		tmp = readl(fep->hwp + FEC_R_CNTRL);
		tmp |= 0x8;
		writel(tmp, fep->hwp + FEC_R_CNTRL);
2913 2914
		return;
	}
L
Linus Torvalds 已提交
2915

2916 2917 2918 2919
	tmp = readl(fep->hwp + FEC_R_CNTRL);
	tmp &= ~0x8;
	writel(tmp, fep->hwp + FEC_R_CNTRL);

2920
	if (ndev->flags & IFF_ALLMULTI) {
2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934
		/* Catch all multicast addresses, so set the
		 * filter to all 1's
		 */
		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);

		return;
	}

	/* Clear filter and add the addresses in hash register
	 */
	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);

2935
	netdev_for_each_mc_addr(ha, ndev) {
2936 2937 2938
		/* calculate crc32 value of mac address */
		crc = 0xffffffff;

2939
		for (i = 0; i < ndev->addr_len; i++) {
2940
			data = ha->addr[i];
2941 2942 2943
			for (bit = 0; bit < 8; bit++, data >>= 1) {
				crc = (crc >> 1) ^
				(((crc ^ data) & 1) ? CRC32_POLY : 0);
L
Linus Torvalds 已提交
2944 2945
			}
		}
2946

2947
		/* only upper 6 bits (FEC_HASH_BITS) are used
2948 2949
		 * which point to specific bit in he hash registers
		 */
2950
		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2951 2952 2953 2954 2955 2956 2957 2958 2959 2960

		if (hash > 31) {
			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
			tmp |= 1 << (hash - 32);
			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
		} else {
			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
			tmp |= 1 << hash;
			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
		}
L
Linus Torvalds 已提交
2961 2962 2963
	}
}

S
Sascha Hauer 已提交
2964
/* Set a MAC change in hardware. */
S
Sascha Hauer 已提交
2965
static int
2966
fec_set_mac_address(struct net_device *ndev, void *p)
L
Linus Torvalds 已提交
2967
{
2968
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
2969 2970
	struct sockaddr *addr = p;

2971 2972 2973 2974 2975
	if (addr) {
		if (!is_valid_ether_addr(addr->sa_data))
			return -EADDRNOTAVAIL;
		memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
	}
L
Linus Torvalds 已提交
2976

2977 2978 2979 2980 2981 2982 2983 2984
	/* Add netif status check here to avoid system hang in below case:
	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
	 * After ethx down, fec all clocks are gated off and then register
	 * access causes system hang.
	 */
	if (!netif_running(ndev))
		return 0;

2985 2986
	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
S
Sascha Hauer 已提交
2987
		fep->hwp + FEC_ADDR_LOW);
2988
	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
2989
		fep->hwp + FEC_ADDR_HIGH);
S
Sascha Hauer 已提交
2990
	return 0;
L
Linus Torvalds 已提交
2991 2992
}

2993
#ifdef CONFIG_NET_POLL_CONTROLLER
2994 2995
/**
 * fec_poll_controller - FEC Poll controller function
2996 2997 2998 2999 3000
 * @dev: The FEC network adapter
 *
 * Polled functionality used by netconsole and others in non interrupt mode
 *
 */
3001
static void fec_poll_controller(struct net_device *dev)
3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
{
	int i;
	struct fec_enet_private *fep = netdev_priv(dev);

	for (i = 0; i < FEC_IRQ_NUM; i++) {
		if (fep->irq[i] > 0) {
			disable_irq(fep->irq[i]);
			fec_enet_interrupt(fep->irq[i], dev);
			enable_irq(fep->irq[i]);
		}
	}
}
#endif

3016
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
	netdev_features_t features)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;

	netdev->features = features;

	/* Receive checksum has been changed */
	if (changed & NETIF_F_RXCSUM) {
		if (features & NETIF_F_RXCSUM)
			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
		else
			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3030
	}
3031 3032 3033 3034 3035 3036 3037
}

static int fec_set_features(struct net_device *netdev,
	netdev_features_t features)
{
	struct fec_enet_private *fep = netdev_priv(netdev);
	netdev_features_t changed = features ^ netdev->features;
3038

3039
	if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3040 3041 3042 3043
		napi_disable(&fep->napi);
		netif_tx_lock_bh(netdev);
		fec_stop(netdev);
		fec_enet_set_netdev_features(netdev, features);
3044
		fec_restart(netdev);
3045
		netif_tx_wake_all_queues(netdev);
3046 3047
		netif_tx_unlock_bh(netdev);
		napi_enable(&fep->napi);
3048 3049
	} else {
		fec_enet_set_netdev_features(netdev, features);
3050 3051 3052 3053 3054
	}

	return 0;
}

S
Sascha Hauer 已提交
3055 3056 3057 3058
static const struct net_device_ops fec_netdev_ops = {
	.ndo_open		= fec_enet_open,
	.ndo_stop		= fec_enet_close,
	.ndo_start_xmit		= fec_enet_start_xmit,
3059
	.ndo_set_rx_mode	= set_multicast_list,
3060
	.ndo_change_mtu		= eth_change_mtu,
S
Sascha Hauer 已提交
3061 3062 3063
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_tx_timeout		= fec_timeout,
	.ndo_set_mac_address	= fec_set_mac_address,
3064
	.ndo_do_ioctl		= fec_enet_ioctl,
3065 3066 3067
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= fec_poll_controller,
#endif
3068
	.ndo_set_features	= fec_set_features,
S
Sascha Hauer 已提交
3069 3070
};

3071 3072 3073 3074 3075 3076 3077 3078
static const unsigned short offset_des_active_rxq[] = {
	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
};

static const unsigned short offset_des_active_txq[] = {
	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
};

L
Linus Torvalds 已提交
3079 3080
 /*
  * XXX:  We need to clean up on failure exits here.
3081
  *
L
Linus Torvalds 已提交
3082
  */
3083
static int fec_enet_init(struct net_device *ndev)
L
Linus Torvalds 已提交
3084
{
3085
	struct fec_enet_private *fep = netdev_priv(ndev);
S
Sascha Hauer 已提交
3086
	struct bufdesc *cbd_base;
3087
	dma_addr_t bd_dma;
3088
	int bd_size;
3089
	unsigned int i;
T
Troy Kisky 已提交
3090 3091 3092
	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
			sizeof(struct bufdesc);
	unsigned dsize_log2 = __fls(dsize);
3093

T
Troy Kisky 已提交
3094
	WARN_ON(dsize != (1 << dsize_log2));
3095 3096 3097 3098 3099 3100 3101 3102
#if defined(CONFIG_ARM)
	fep->rx_align = 0xf;
	fep->tx_align = 0xf;
#else
	fep->rx_align = 0x3;
	fep->tx_align = 0x3;
#endif

3103
	fec_enet_alloc_queue(ndev);
N
Nimrod Andy 已提交
3104

T
Troy Kisky 已提交
3105
	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
L
Linus Torvalds 已提交
3106

S
Sascha Hauer 已提交
3107
	/* Allocate memory for buffer descriptors. */
3108 3109
	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
				       GFP_KERNEL);
3110
	if (!cbd_base) {
N
Nimrod Andy 已提交
3111 3112 3113
		return -ENOMEM;
	}

3114
	memset(cbd_base, 0, bd_size);
L
Linus Torvalds 已提交
3115

3116
	/* Get the Ethernet address */
3117
	fec_get_mac(ndev);
3118 3119
	/* make sure MAC we just acquired is programmed into the hw */
	fec_set_mac_address(ndev, NULL);
L
Linus Torvalds 已提交
3120

S
Sascha Hauer 已提交
3121
	/* Set receive and transmit descriptor base. */
3122
	for (i = 0; i < fep->num_rx_queues; i++) {
T
Troy Kisky 已提交
3123 3124 3125 3126 3127 3128 3129 3130 3131
		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
		unsigned size = dsize * rxq->bd.ring_size;

		rxq->bd.qid = i;
		rxq->bd.base = cbd_base;
		rxq->bd.cur = cbd_base;
		rxq->bd.dma = bd_dma;
		rxq->bd.dsize = dsize;
		rxq->bd.dsize_log2 = dsize_log2;
3132
		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
T
Troy Kisky 已提交
3133 3134 3135
		bd_dma += size;
		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3136 3137 3138
	}

	for (i = 0; i < fep->num_tx_queues; i++) {
T
Troy Kisky 已提交
3139 3140 3141 3142 3143 3144 3145 3146 3147
		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
		unsigned size = dsize * txq->bd.ring_size;

		txq->bd.qid = i;
		txq->bd.base = cbd_base;
		txq->bd.cur = cbd_base;
		txq->bd.dma = bd_dma;
		txq->bd.dsize = dsize;
		txq->bd.dsize_log2 = dsize_log2;
3148
		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
T
Troy Kisky 已提交
3149 3150 3151
		bd_dma += size;
		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
3152
	}
3153

L
Linus Torvalds 已提交
3154

S
Sascha Hauer 已提交
3155
	/* The FEC Ethernet specific entries in the device structure */
3156 3157 3158
	ndev->watchdog_timeo = TX_TIMEOUT;
	ndev->netdev_ops = &fec_netdev_ops;
	ndev->ethtool_ops = &fec_enet_ethtool_ops;
3159

3160
	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
F
Fabio Estevam 已提交
3161
	netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
3162

3163
	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
3164 3165 3166
		/* enable hw VLAN support */
		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;

3167
	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
N
Nimrod Andy 已提交
3168 3169
		ndev->gso_max_segs = FEC_MAX_TSO_SEGS;

3170 3171
		/* enable hw accelerator */
		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
N
Nimrod Andy 已提交
3172
				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
3173 3174
		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
	}
3175

3176
	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
3177 3178 3179 3180
		fep->tx_align = 0;
		fep->rx_align = 0x3f;
	}

3181 3182
	ndev->hw_features = ndev->features;

3183
	fec_restart(ndev);
L
Linus Torvalds 已提交
3184 3185 3186 3187

	return 0;
}

3188
#ifdef CONFIG_OF
3189
static void fec_reset_phy(struct platform_device *pdev)
3190 3191
{
	int err, phy_reset;
3192
	bool active_high = false;
3193
	int msec = 1;
3194 3195 3196
	struct device_node *np = pdev->dev.of_node;

	if (!np)
3197
		return;
3198

3199 3200 3201 3202 3203
	of_property_read_u32(np, "phy-reset-duration", &msec);
	/* A sane reset duration should not be longer than 1s */
	if (msec > 1000)
		msec = 1;

3204
	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
3205 3206 3207
	if (!gpio_is_valid(phy_reset))
		return;

3208
	active_high = of_property_read_bool(np, "phy-reset-active-high");
3209

3210
	err = devm_gpio_request_one(&pdev->dev, phy_reset,
3211
			active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
3212
			"phy-reset");
3213
	if (err) {
3214
		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
3215
		return;
3216
	}
3217 3218 3219 3220 3221 3222

	if (msec > 20)
		msleep(msec);
	else
		usleep_range(msec * 1000, msec * 1000 + 1000);

3223
	gpio_set_value_cansleep(phy_reset, !active_high);
3224 3225
}
#else /* CONFIG_OF */
3226
static void fec_reset_phy(struct platform_device *pdev)
3227 3228 3229 3230 3231 3232 3233 3234
{
	/*
	 * In case of platform probe, the reset has been done
	 * by machine code.
	 */
}
#endif /* CONFIG_OF */

3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245
static void
fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
{
	struct device_node *np = pdev->dev.of_node;

	*num_tx = *num_rx = 1;

	if (!np || !of_device_is_available(np))
		return;

	/* parse the num of tx and rx queues */
3246
	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
3247

3248
	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
3249 3250

	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
3251 3252
		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
			 *num_tx);
3253 3254 3255 3256 3257
		*num_tx = 1;
		return;
	}

	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
3258 3259
		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
			 *num_rx);
3260 3261 3262 3263 3264 3265
		*num_rx = 1;
		return;
	}

}

3266
static int
3267 3268 3269
fec_probe(struct platform_device *pdev)
{
	struct fec_enet_private *fep;
3270
	struct fec_platform_data *pdata;
3271 3272 3273
	struct net_device *ndev;
	int i, irq, ret = 0;
	struct resource *r;
3274
	const struct of_device_id *of_id;
S
Shawn Guo 已提交
3275
	static int dev_id;
3276
	struct device_node *np = pdev->dev.of_node, *phy_node;
3277 3278
	int num_tx_qs;
	int num_rx_qs;
3279

3280 3281
	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);

3282
	/* Init network device */
3283 3284
	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
				  num_tx_qs, num_rx_qs);
3285 3286
	if (!ndev)
		return -ENOMEM;
3287 3288 3289 3290 3291 3292

	SET_NETDEV_DEV(ndev, &pdev->dev);

	/* setup board info structure */
	fep = netdev_priv(ndev);

3293 3294 3295 3296 3297
	of_id = of_match_device(fec_dt_ids, &pdev->dev);
	if (of_id)
		pdev->id_entry = of_id->data;
	fep->quirks = pdev->id_entry->driver_data;

3298
	fep->netdev = ndev;
3299 3300 3301
	fep->num_rx_queues = num_rx_qs;
	fep->num_tx_queues = num_tx_qs;

G
Guenter Roeck 已提交
3302
#if !defined(CONFIG_M5272)
3303
	/* default enable pause frame auto negotiation */
3304
	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
3305
		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
G
Guenter Roeck 已提交
3306
#endif
3307

N
Nimrod Andy 已提交
3308 3309 3310
	/* Select default pin state */
	pinctrl_pm_select_default_state(&pdev->dev);

3311
	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3312 3313 3314 3315 3316 3317
	fep->hwp = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(fep->hwp)) {
		ret = PTR_ERR(fep->hwp);
		goto failed_ioremap;
	}

3318
	fep->pdev = pdev;
S
Shawn Guo 已提交
3319
	fep->dev_id = dev_id++;
3320 3321 3322

	platform_set_drvdata(pdev, ndev);

3323 3324 3325 3326 3327
	if ((of_machine_is_compatible("fsl,imx6q") ||
	     of_machine_is_compatible("fsl,imx6dl")) &&
	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
		fep->quirks |= FEC_QUIRK_ERR006687;

N
Nimrod Andy 已提交
3328 3329 3330
	if (of_get_property(np, "fsl,magic-packet", NULL))
		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;

3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
	phy_node = of_parse_phandle(np, "phy-handle", 0);
	if (!phy_node && of_phy_is_fixed_link(np)) {
		ret = of_phy_register_fixed_link(np);
		if (ret < 0) {
			dev_err(&pdev->dev,
				"broken fixed-link specification\n");
			goto failed_phy;
		}
		phy_node = of_node_get(np);
	}
	fep->phy_node = phy_node;

3343
	ret = of_get_phy_mode(pdev->dev.of_node);
3344
	if (ret < 0) {
J
Jingoo Han 已提交
3345
		pdata = dev_get_platdata(&pdev->dev);
3346 3347 3348 3349 3350 3351 3352 3353
		if (pdata)
			fep->phy_interface = pdata->phy;
		else
			fep->phy_interface = PHY_INTERFACE_MODE_MII;
	} else {
		fep->phy_interface = ret;
	}

3354 3355 3356
	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(fep->clk_ipg)) {
		ret = PTR_ERR(fep->clk_ipg);
3357 3358
		goto failed_clk;
	}
3359 3360 3361 3362 3363 3364 3365

	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(fep->clk_ahb)) {
		ret = PTR_ERR(fep->clk_ahb);
		goto failed_clk;
	}

3366 3367
	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);

3368 3369 3370 3371 3372
	/* enet_out is optional, depends on board */
	fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
	if (IS_ERR(fep->clk_enet_out))
		fep->clk_enet_out = NULL;

3373 3374
	fep->ptp_clk_on = false;
	mutex_init(&fep->ptp_clk_mutex);
3375 3376 3377 3378 3379 3380

	/* clk_ref is optional, depends on board */
	fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
	if (IS_ERR(fep->clk_ref))
		fep->clk_ref = NULL;

3381
	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
3382 3383
	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
	if (IS_ERR(fep->clk_ptp)) {
3384
		fep->clk_ptp = NULL;
3385
		fep->bufdesc_ex = false;
3386 3387
	}

3388
	ret = fec_enet_clk_enable(ndev, true);
3389 3390 3391
	if (ret)
		goto failed_clk;

3392 3393 3394 3395
	ret = clk_prepare_enable(fep->clk_ipg);
	if (ret)
		goto failed_clk_ipg;

3396 3397 3398
	fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
	if (!IS_ERR(fep->reg_phy)) {
		ret = regulator_enable(fep->reg_phy);
3399 3400 3401 3402 3403
		if (ret) {
			dev_err(&pdev->dev,
				"Failed to enable phy regulator: %d\n", ret);
			goto failed_regulator;
		}
3404 3405
	} else {
		fep->reg_phy = NULL;
3406 3407
	}

3408 3409
	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);
3410
	pm_runtime_get_noresume(&pdev->dev);
3411 3412 3413
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

3414 3415
	fec_reset_phy(pdev);

F
Fabio Estevam 已提交
3416
	if (fep->bufdesc_ex)
3417
		fec_ptp_init(pdev);
F
Fabio Estevam 已提交
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430

	ret = fec_enet_init(ndev);
	if (ret)
		goto failed_init;

	for (i = 0; i < FEC_IRQ_NUM; i++) {
		irq = platform_get_irq(pdev, i);
		if (irq < 0) {
			if (i)
				break;
			ret = irq;
			goto failed_irq;
		}
F
Fabio Estevam 已提交
3431
		ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
3432
				       0, pdev->name, ndev);
F
Fabio Estevam 已提交
3433
		if (ret)
F
Fabio Estevam 已提交
3434
			goto failed_irq;
N
Nimrod Andy 已提交
3435 3436

		fep->irq[i] = irq;
F
Fabio Estevam 已提交
3437 3438
	}

3439
	init_completion(&fep->mdio_done);
3440 3441 3442 3443
	ret = fec_enet_mii_init(pdev);
	if (ret)
		goto failed_mii_init;

3444 3445
	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(ndev);
3446
	fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
3447
	pinctrl_pm_select_sleep_state(&pdev->dev);
3448

3449 3450 3451 3452
	ret = register_netdev(ndev);
	if (ret)
		goto failed_register;

N
Nimrod Andy 已提交
3453 3454 3455
	device_init_wakeup(&ndev->dev, fep->wol_flag &
			   FEC_WOL_HAS_MAGIC_PACKET);

F
Fabio Estevam 已提交
3456 3457 3458
	if (fep->bufdesc_ex && fep->ptp_clock)
		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);

3459
	fep->rx_copybreak = COPYBREAK_DEFAULT;
3460
	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3461 3462 3463 3464

	pm_runtime_mark_last_busy(&pdev->dev);
	pm_runtime_put_autosuspend(&pdev->dev);

3465 3466 3467
	return 0;

failed_register:
3468 3469
	fec_enet_mii_remove(fep);
failed_mii_init:
3470 3471
failed_irq:
failed_init:
3472
	fec_ptp_stop(pdev);
3473 3474
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
3475
failed_regulator:
3476 3477
	clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
3478
	fec_enet_clk_enable(ndev, false);
3479
failed_clk:
3480 3481
failed_phy:
	of_node_put(phy_node);
3482 3483 3484 3485 3486 3487
failed_ioremap:
	free_netdev(ndev);

	return ret;
}

3488
static int
3489 3490 3491 3492 3493
fec_drv_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct fec_enet_private *fep = netdev_priv(ndev);

3494
	cancel_work_sync(&fep->tx_timeout_work);
3495
	fec_ptp_stop(pdev);
L
Lothar Waßmann 已提交
3496
	unregister_netdev(ndev);
3497
	fec_enet_mii_remove(fep);
3498 3499
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
3500
	of_node_put(fep->phy_node);
3501
	free_netdev(ndev);
3502

3503 3504 3505
	return 0;
}

3506
static int __maybe_unused fec_suspend(struct device *dev)
3507
{
E
Eric Benard 已提交
3508
	struct net_device *ndev = dev_get_drvdata(dev);
3509
	struct fec_enet_private *fep = netdev_priv(ndev);
3510

3511
	rtnl_lock();
3512
	if (netif_running(ndev)) {
N
Nimrod Andy 已提交
3513 3514
		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
3515
		phy_stop(ndev->phydev);
3516 3517
		napi_disable(&fep->napi);
		netif_tx_lock_bh(ndev);
3518
		netif_device_detach(ndev);
3519 3520
		netif_tx_unlock_bh(ndev);
		fec_stop(ndev);
3521
		fec_enet_clk_enable(ndev, false);
N
Nimrod Andy 已提交
3522 3523
		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3524
	}
3525 3526
	rtnl_unlock();

N
Nimrod Andy 已提交
3527
	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
3528 3529
		regulator_disable(fep->reg_phy);

3530 3531 3532 3533 3534 3535
	/* SOC supply clock to phy, when clock is disabled, phy link down
	 * SOC control phy regulator, when regulator is disabled, phy link down
	 */
	if (fep->clk_enet_out || fep->reg_phy)
		fep->link = 0;

3536 3537 3538
	return 0;
}

3539
static int __maybe_unused fec_resume(struct device *dev)
3540
{
E
Eric Benard 已提交
3541
	struct net_device *ndev = dev_get_drvdata(dev);
3542
	struct fec_enet_private *fep = netdev_priv(ndev);
N
Nimrod Andy 已提交
3543
	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
3544
	int ret;
N
Nimrod Andy 已提交
3545
	int val;
3546

N
Nimrod Andy 已提交
3547
	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
3548 3549 3550 3551
		ret = regulator_enable(fep->reg_phy);
		if (ret)
			return ret;
	}
3552

3553
	rtnl_lock();
3554
	if (netif_running(ndev)) {
3555 3556 3557 3558 3559
		ret = fec_enet_clk_enable(ndev, true);
		if (ret) {
			rtnl_unlock();
			goto failed_clk;
		}
N
Nimrod Andy 已提交
3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
			if (pdata && pdata->sleep_mode_enable)
				pdata->sleep_mode_enable(false);
			val = readl(fep->hwp + FEC_ECNTRL);
			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
			writel(val, fep->hwp + FEC_ECNTRL);
			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
		} else {
			pinctrl_pm_select_default_state(&fep->pdev->dev);
		}
3570
		fec_restart(ndev);
3571
		netif_tx_lock_bh(ndev);
3572
		netif_device_attach(ndev);
3573
		netif_tx_unlock_bh(ndev);
3574
		napi_enable(&fep->napi);
3575
		phy_start(ndev->phydev);
3576
	}
3577
	rtnl_unlock();
3578

3579
	return 0;
3580

3581
failed_clk:
3582 3583 3584
	if (fep->reg_phy)
		regulator_disable(fep->reg_phy);
	return ret;
3585 3586
}

3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
static int __maybe_unused fec_runtime_suspend(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct fec_enet_private *fep = netdev_priv(ndev);

	clk_disable_unprepare(fep->clk_ipg);

	return 0;
}

static int __maybe_unused fec_runtime_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct fec_enet_private *fep = netdev_priv(ndev);

	return clk_prepare_enable(fep->clk_ipg);
}

static const struct dev_pm_ops fec_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
	SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
};
3609

3610 3611
static struct platform_driver fec_driver = {
	.driver	= {
3612
		.name	= DRIVER_NAME,
E
Eric Benard 已提交
3613
		.pm	= &fec_pm_ops,
3614
		.of_match_table = fec_dt_ids,
3615
	},
3616
	.id_table = fec_devtype,
E
Eric Benard 已提交
3617
	.probe	= fec_probe,
3618
	.remove	= fec_drv_remove,
3619 3620
};

3621
module_platform_driver(fec_driver);
L
Linus Torvalds 已提交
3622

F
Fabio Estevam 已提交
3623
MODULE_ALIAS("platform:"DRIVER_NAME);
L
Linus Torvalds 已提交
3624
MODULE_LICENSE("GPL");