stmmac_main.c 129.6 KB
Newer Older
1 2 3 4
/*******************************************************************************
  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
  ST Ethernet IPs are built around a Synopsys IP Core.

5
	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>

  Documentation available at:
	http://www.stlinux.com
  Support available at:
	https://bugzilla.stlinux.com/
*******************************************************************************/

27
#include <linux/clk.h>
28 29 30 31 32 33 34 35 36
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
37
#include <linux/if.h>
38 39
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
40
#include <linux/slab.h>
41
#include <linux/prefetch.h>
42
#include <linux/pinctrl/consumer.h>
43
#ifdef CONFIG_DEBUG_FS
44 45
#include <linux/debugfs.h>
#include <linux/seq_file.h>
46
#endif /* CONFIG_DEBUG_FS */
47
#include <linux/net_tstamp.h>
48
#include <linux/phylink.h>
49
#include <net/pkt_cls.h>
50
#include "stmmac_ptp.h"
51
#include "stmmac.h"
52
#include <linux/reset.h>
53
#include <linux/of_mdio.h>
54
#include "dwmac1000.h"
55
#include "dwxgmac2.h"
56
#include "hwif.h"
57

58
#define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
A
Alexandre TORGUE 已提交
59
#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
60 61

/* Module parameters */
62
#define TX_TIMEO	5000
63
static int watchdog = TX_TIMEO;
64
module_param(watchdog, int, 0644);
65
MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
66

67
static int debug = -1;
68
module_param(debug, int, 0644);
69
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
70

71
static int phyaddr = -1;
72
module_param(phyaddr, int, 0444);
73 74
MODULE_PARM_DESC(phyaddr, "Physical device address");

75
#define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
76
#define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
77

78
static int flow_ctrl = FLOW_AUTO;
79
module_param(flow_ctrl, int, 0644);
80 81 82
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");

static int pause = PAUSE_TIME;
83
module_param(pause, int, 0644);
84 85 86 87
MODULE_PARM_DESC(pause, "Flow Control Pause Time");

#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
88
module_param(tc, int, 0644);
89 90
MODULE_PARM_DESC(tc, "DMA threshold control value");

91 92
#define	DEFAULT_BUFSIZE	1536
static int buf_sz = DEFAULT_BUFSIZE;
93
module_param(buf_sz, int, 0644);
94 95
MODULE_PARM_DESC(buf_sz, "DMA buffer size");

96 97
#define	STMMAC_RX_COPYBREAK	256

98 99 100 101
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);

102 103
#define STMMAC_DEFAULT_LPI_TIMER	1000
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
104
module_param(eee_timer, int, 0644);
105
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
G
Giuseppe CAVALLARO 已提交
106
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
107

108 109
/* By default the driver will use the ring mode to manage tx and rx descriptors,
 * but allow user to force to use the chain instead of the ring
110 111
 */
static unsigned int chain_mode;
112
module_param(chain_mode, int, 0444);
113 114
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");

115 116
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);

117
#ifdef CONFIG_DEBUG_FS
118
static int stmmac_init_fs(struct net_device *dev);
119
static void stmmac_exit_fs(struct net_device *dev);
120 121
#endif

122 123
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))

124 125
/**
 * stmmac_verify_args - verify the driver parameters.
126 127
 * Description: it checks the driver parameters and set a default in case of
 * errors.
128 129 130 131 132
 */
static void stmmac_verify_args(void)
{
	if (unlikely(watchdog < 0))
		watchdog = TX_TIMEO;
133 134
	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
		buf_sz = DEFAULT_BUFSIZE;
135 136 137 138 139 140
	if (unlikely(flow_ctrl > 1))
		flow_ctrl = FLOW_AUTO;
	else if (likely(flow_ctrl < 0))
		flow_ctrl = FLOW_OFF;
	if (unlikely((pause < 0) || (pause > 0xffff)))
		pause = PAUSE_TIME;
141 142
	if (eee_timer < 0)
		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
143 144
}

145 146 147 148 149 150 151
/**
 * stmmac_disable_all_queues - Disable all queues
 * @priv: driver private structure
 */
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
152 153
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
154 155
	u32 queue;

156 157
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
158

159 160 161 162
		if (queue < rx_queues_cnt)
			napi_disable(&ch->rx_napi);
		if (queue < tx_queues_cnt)
			napi_disable(&ch->tx_napi);
163 164 165 166 167 168 169 170 171 172
	}
}

/**
 * stmmac_enable_all_queues - Enable all queues
 * @priv: driver private structure
 */
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
{
	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
173 174
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
175 176
	u32 queue;

177 178
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
179

180 181 182 183
		if (queue < rx_queues_cnt)
			napi_enable(&ch->rx_napi);
		if (queue < tx_queues_cnt)
			napi_enable(&ch->tx_napi);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	}
}

/**
 * stmmac_stop_all_queues - Stop all queues
 * @priv: driver private structure
 */
static void stmmac_stop_all_queues(struct stmmac_priv *priv)
{
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	for (queue = 0; queue < tx_queues_cnt; queue++)
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}

/**
 * stmmac_start_all_queues - Start all queues
 * @priv: driver private structure
 */
static void stmmac_start_all_queues(struct stmmac_priv *priv)
{
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	for (queue = 0; queue < tx_queues_cnt; queue++)
		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
}

213 214 215 216 217 218 219 220 221 222 223 224 225 226
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
	if (!test_bit(STMMAC_DOWN, &priv->state) &&
	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
		queue_work(priv->wq, &priv->service_task);
}

static void stmmac_global_err(struct stmmac_priv *priv)
{
	netif_carrier_off(priv->dev);
	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
	stmmac_service_event_schedule(priv);
}

227 228 229 230 231 232 233 234 235 236 237 238
/**
 * stmmac_clk_csr_set - dynamically set the MDC clock
 * @priv: driver private structure
 * Description: this is to dynamically set the MDC clock according to the csr
 * clock input.
 * Note:
 *	If a specific clk_csr value is passed from the platform
 *	this means that the CSR Clock Range selection cannot be
 *	changed at run-time and it is fixed (as reported in the driver
 *	documentation). Viceversa the driver will try to set the MDC
 *	clock dynamically according to the actual clock input.
 */
239 240 241 242
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
	u32 clk_rate;

243
	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
244 245

	/* Platform provided default clk_csr would be assumed valid
G
Giuseppe CAVALLARO 已提交
246 247 248 249 250 251
	 * for all other cases except for the below mentioned ones.
	 * For values higher than the IEEE 802.3 specified frequency
	 * we can not estimate the proper divider as it is not known
	 * the frequency of clk_csr_i. So we do not change the default
	 * divider.
	 */
252 253 254 255 256 257 258 259 260 261 262
	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
		if (clk_rate < CSR_F_35M)
			priv->clk_csr = STMMAC_CSR_20_35M;
		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
			priv->clk_csr = STMMAC_CSR_35_60M;
		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
			priv->clk_csr = STMMAC_CSR_60_100M;
		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
			priv->clk_csr = STMMAC_CSR_100_150M;
		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
			priv->clk_csr = STMMAC_CSR_150_250M;
263
		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
264
			priv->clk_csr = STMMAC_CSR_250_300M;
G
Giuseppe CAVALLARO 已提交
265
	}
266 267 268 269 270 271 272 273 274 275 276

	if (priv->plat->has_sun8i) {
		if (clk_rate > 160000000)
			priv->clk_csr = 0x03;
		else if (clk_rate > 80000000)
			priv->clk_csr = 0x02;
		else if (clk_rate > 40000000)
			priv->clk_csr = 0x01;
		else
			priv->clk_csr = 0;
	}
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

	if (priv->plat->has_xgmac) {
		if (clk_rate > 400000000)
			priv->clk_csr = 0x5;
		else if (clk_rate > 350000000)
			priv->clk_csr = 0x4;
		else if (clk_rate > 300000000)
			priv->clk_csr = 0x3;
		else if (clk_rate > 250000000)
			priv->clk_csr = 0x2;
		else if (clk_rate > 150000000)
			priv->clk_csr = 0x1;
		else
			priv->clk_csr = 0x0;
	}
292 293
}

294 295
static void print_pkt(unsigned char *buf, int len)
{
296 297
	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
298 299
}

300
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
301
{
302
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
303
	u32 avail;
304

305 306
	if (tx_q->dirty_tx > tx_q->cur_tx)
		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
307
	else
308
		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
309 310 311 312

	return avail;
}

313 314 315 316 317 318
/**
 * stmmac_rx_dirty - Get RX queue dirty
 * @priv: driver private structure
 * @queue: RX queue index
 */
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
319
{
320
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
321
	u32 dirty;
322

323 324
	if (rx_q->dirty_rx <= rx_q->cur_rx)
		dirty = rx_q->cur_rx - rx_q->dirty_rx;
325
	else
326
		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
327 328

	return dirty;
329 330
}

331
/**
332
 * stmmac_hw_fix_mac_speed - callback for speed selection
333
 * @priv: driver private structure
334
 * Description: on some platforms (e.g. ST), some HW system configuration
335
 * registers have to be set according to the link speed negotiated.
336 337 338
 */
static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
{
339 340
	struct net_device *ndev = priv->dev;
	struct phy_device *phydev = ndev->phydev;
341 342

	if (likely(priv->plat->fix_mac_speed))
G
Giuseppe CAVALLARO 已提交
343
		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
344 345
}

346
/**
347
 * stmmac_enable_eee_mode - check and enter in LPI mode
348
 * @priv: driver private structure
349 350
 * Description: this function is to verify and enter in LPI mode in case of
 * EEE.
351
 */
352 353
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
354 355 356 357 358 359 360 361 362 363 364
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	/* check if all TX queues have the work finished */
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		if (tx_q->dirty_tx != tx_q->cur_tx)
			return; /* still unfinished work */
	}

365
	/* Check and enter in LPI mode */
366
	if (!priv->tx_path_in_lpi_mode)
367 368
		stmmac_set_eee_mode(priv, priv->hw,
				priv->plat->en_tx_lpi_clockgating);
369 370
}

371
/**
372
 * stmmac_disable_eee_mode - disable and exit from LPI mode
373 374 375 376
 * @priv: driver private structure
 * Description: this function is to exit and disable EEE in case of
 * LPI state is true. This is called by the xmit.
 */
377 378
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
379
	stmmac_reset_eee_mode(priv, priv->hw);
380 381 382 383 384
	del_timer_sync(&priv->eee_ctrl_timer);
	priv->tx_path_in_lpi_mode = false;
}

/**
385
 * stmmac_eee_ctrl_timer - EEE TX SW timer.
386 387
 * @arg : data hook
 * Description:
388
 *  if there is no data transfer and if we are not in LPI state,
389 390
 *  then MAC Transmitter can be moved to LPI state.
 */
391
static void stmmac_eee_ctrl_timer(struct timer_list *t)
392
{
393
	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
394 395

	stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
396
	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
397 398 399
}

/**
400
 * stmmac_eee_init - init EEE
401
 * @priv: driver private structure
402
 * Description:
403 404 405
 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 *  can also manage EEE, this function enable the LPI state and start related
 *  timer.
406 407 408
 */
bool stmmac_eee_init(struct stmmac_priv *priv)
{
409
	struct net_device *ndev = priv->dev;
410
	int interface = priv->plat->interface;
411 412
	bool ret = false;

413 414 415 416 417
	if ((interface != PHY_INTERFACE_MODE_MII) &&
	    (interface != PHY_INTERFACE_MODE_GMII) &&
	    !phy_interface_mode_is_rgmii(interface))
		goto out;

G
Giuseppe CAVALLARO 已提交
418 419 420
	/* Using PCS we cannot dial with the phy registers at this stage
	 * so we do not support extra feature like EEE.
	 */
421 422 423
	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
	    (priv->hw->pcs == STMMAC_PCS_RTBI))
G
Giuseppe CAVALLARO 已提交
424 425
		goto out;

426 427
	/* MAC core supports the EEE feature. */
	if (priv->dma_cap.eee) {
428 429
		int tx_lpi_timer = priv->tx_lpi_timer;

430
		/* Check if the PHY supports EEE */
431
		if (phy_init_eee(ndev->phydev, 1)) {
432 433 434 435 436
			/* To manage at run-time if the EEE cannot be supported
			 * anymore (for example because the lp caps have been
			 * changed).
			 * In that case the driver disable own timers.
			 */
437
			mutex_lock(&priv->lock);
438
			if (priv->eee_active) {
439
				netdev_dbg(priv->dev, "disable EEE\n");
440
				del_timer_sync(&priv->eee_ctrl_timer);
441 442
				stmmac_set_eee_timer(priv, priv->hw, 0,
						tx_lpi_timer);
443 444
			}
			priv->eee_active = 0;
445
			mutex_unlock(&priv->lock);
446
			goto out;
447 448
		}
		/* Activate the EEE and start timers */
449
		mutex_lock(&priv->lock);
G
Giuseppe CAVALLARO 已提交
450 451
		if (!priv->eee_active) {
			priv->eee_active = 1;
452 453
			timer_setup(&priv->eee_ctrl_timer,
				    stmmac_eee_ctrl_timer, 0);
454 455
			mod_timer(&priv->eee_ctrl_timer,
				  STMMAC_LPI_T(eee_timer));
G
Giuseppe CAVALLARO 已提交
456

457 458
			stmmac_set_eee_timer(priv, priv->hw,
					STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
459 460
		}
		/* Set HW EEE according to the speed */
461
		stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
462 463

		ret = true;
464
		mutex_unlock(&priv->lock);
465

466
		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
467 468 469 470 471
	}
out:
	return ret;
}

472
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
473
 * @priv: driver private structure
474
 * @p : descriptor pointer
475 476 477 478 479 480
 * @skb : the socket buffer
 * Description :
 * This function will read timestamp from the descriptor & pass it to stack.
 * and also perform some sanity checks.
 */
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
481
				   struct dma_desc *p, struct sk_buff *skb)
482 483
{
	struct skb_shared_hwtstamps shhwtstamp;
484
	u64 ns = 0;
485 486 487 488

	if (!priv->hwts_tx_en)
		return;

G
Giuseppe CAVALLARO 已提交
489
	/* exit if skb doesn't support hw tstamp */
490
	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
491 492 493
		return;

	/* check tx tstamp status */
494
	if (stmmac_get_tx_timestamp_status(priv, p)) {
495
		/* get the valid tstamp */
496
		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
497

498 499
		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp.hwtstamp = ns_to_ktime(ns);
500

501
		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
502 503 504
		/* pass tstamp to stack */
		skb_tstamp_tx(skb, &shhwtstamp);
	}
505 506 507 508

	return;
}

509
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
510
 * @priv: driver private structure
511 512
 * @p : descriptor pointer
 * @np : next descriptor pointer
513 514 515 516 517
 * @skb : the socket buffer
 * Description :
 * This function will read received packet's timestamp from the descriptor
 * and pass it to stack. It also perform some sanity checks.
 */
518 519
static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
				   struct dma_desc *np, struct sk_buff *skb)
520 521
{
	struct skb_shared_hwtstamps *shhwtstamp = NULL;
522
	struct dma_desc *desc = p;
523
	u64 ns = 0;
524 525 526

	if (!priv->hwts_rx_en)
		return;
527
	/* For GMAC4, the valid timestamp is from CTX next desc. */
528
	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
529
		desc = np;
530

531
	/* Check if timestamp is available */
532 533
	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
534
		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
535 536 537 538
		shhwtstamp = skb_hwtstamps(skb);
		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp->hwtstamp = ns_to_ktime(ns);
	} else  {
539
		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
540
	}
541 542 543
}

/**
544
 *  stmmac_hwtstamp_set - control hardware timestamping.
545
 *  @dev: device pointer.
546
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
547 548 549 550 551 552 553
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function configures the MAC to enable/disable both outgoing(TX)
 *  and incoming(RX) packets time stamping based on user input.
 *  Return Value:
 *  0 on success and an appropriate -ve integer on failure.
 */
554
static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
555 556 557
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config config;
A
Arnd Bergmann 已提交
558
	struct timespec64 now;
559 560 561 562 563 564 565 566 567
	u64 temp = 0;
	u32 ptp_v2 = 0;
	u32 tstamp_all = 0;
	u32 ptp_over_ipv4_udp = 0;
	u32 ptp_over_ipv6_udp = 0;
	u32 ptp_over_ethernet = 0;
	u32 snap_type_sel = 0;
	u32 ts_master_en = 0;
	u32 ts_event_en = 0;
568
	u32 sec_inc = 0;
569
	u32 value = 0;
570 571 572
	bool xmac;

	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
573 574 575 576 577 578 579 580 581 582

	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
		netdev_alert(priv->dev, "No support for HW time stamping\n");
		priv->hwts_tx_en = 0;
		priv->hwts_rx_en = 0;

		return -EOPNOTSUPP;
	}

	if (copy_from_user(&config, ifr->ifr_data,
583
			   sizeof(config)))
584 585
		return -EFAULT;

586 587
	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
		   __func__, config.flags, config.tx_type, config.rx_filter);
588 589 590 591 592

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

593 594
	if (config.tx_type != HWTSTAMP_TX_OFF &&
	    config.tx_type != HWTSTAMP_TX_ON)
595 596 597 598 599
		return -ERANGE;

	if (priv->adv_ts) {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
G
Giuseppe CAVALLARO 已提交
600
			/* time stamp no incoming packet at all */
601 602 603 604
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
605
			/* PTP v1, UDP, any kind of event packet */
606
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
607 608 609 610 611 612 613
			/* 'xmac' hardware can support Sync, Pdelay_Req and
			 * Pdelay_resp by setting bit14 and bits17/16 to 01
			 * This leaves Delay_Req timestamps out.
			 * Enable all events *and* general purpose message
			 * timestamping
			 */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
614 615 616 617 618
			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
619
			/* PTP v1, UDP, Sync packet */
620 621 622 623 624 625 626 627 628
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
629
			/* PTP v1, UDP, Delay_req packet */
630 631 632 633 634 635 636 637 638 639
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
640
			/* PTP v2, UDP, any kind of event packet */
641 642 643
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
644
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
645 646 647 648 649 650

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
651
			/* PTP v2, UDP, Sync packet */
652 653 654 655 656 657 658 659 660 661
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
662
			/* PTP v2, UDP, Delay_req packet */
663 664 665 666 667 668 669 670 671 672 673
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_EVENT:
G
Giuseppe CAVALLARO 已提交
674
			/* PTP v2/802.AS1 any layer, any kind of event packet */
675 676
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
677
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
678 679 680 681 682 683
			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_SYNC:
G
Giuseppe CAVALLARO 已提交
684
			/* PTP v2/802.AS1, any layer, Sync packet */
685 686 687 688 689 690 691 692 693 694 695
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
696
			/* PTP v2/802.AS1, any layer, Delay_req packet */
697 698 699 700 701 702 703 704 705 706 707
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

708
		case HWTSTAMP_FILTER_NTP_ALL:
709
		case HWTSTAMP_FILTER_ALL:
G
Giuseppe CAVALLARO 已提交
710
			/* time stamp any incoming packet */
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
			config.rx_filter = HWTSTAMP_FILTER_ALL;
			tstamp_all = PTP_TCR_TSENALL;
			break;

		default:
			return -ERANGE;
		}
	} else {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;
		default:
			/* PTP v1, UDP, any kind of event packet */
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			break;
		}
	}
	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
730
	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
731 732

	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
733
		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
734 735
	else {
		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
G
Giuseppe CAVALLARO 已提交
736 737 738
			 tstamp_all | ptp_v2 | ptp_over_ethernet |
			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
			 ts_master_en | snap_type_sel);
739
		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
740 741

		/* program Sub Second Increment reg */
742 743
		stmmac_config_sub_second_increment(priv,
				priv->ptpaddr, priv->plat->clk_ptp_rate,
744
				xmac, &sec_inc);
745
		temp = div_u64(1000000000ULL, sec_inc);
746

747 748 749 750
		/* Store sub second increment and flags for later use */
		priv->sub_second_inc = sec_inc;
		priv->systime_flags = value;

751 752 753
		/* calculate default added value:
		 * formula is :
		 * addend = (2^32)/freq_div_ratio;
754
		 * where, freq_div_ratio = 1e9ns/sec_inc
755
		 */
756
		temp = (u64)(temp << 32);
757
		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
758
		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
759 760

		/* initialize system time */
A
Arnd Bergmann 已提交
761 762 763
		ktime_get_real_ts64(&now);

		/* lower 32 bits of tv_sec are safe until y2106 */
764 765
		stmmac_init_systime(priv, priv->ptpaddr,
				(u32)now.tv_sec, now.tv_nsec);
766 767
	}

768 769
	memcpy(&priv->tstamp_config, &config, sizeof(config));

770
	return copy_to_user(ifr->ifr_data, &config,
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
			    sizeof(config)) ? -EFAULT : 0;
}

/**
 *  stmmac_hwtstamp_get - read hardware timestamping.
 *  @dev: device pointer.
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function obtain the current hardware timestamping settings
    as requested.
 */
static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config *config = &priv->tstamp_config;

	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

	return copy_to_user(ifr->ifr_data, config,
			    sizeof(*config)) ? -EFAULT : 0;
793 794
}

795
/**
796
 * stmmac_init_ptp - init PTP
797
 * @priv: driver private structure
798
 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
799
 * This is done by looking at the HW cap. register.
800
 * This function also registers the ptp driver.
801
 */
802
static int stmmac_init_ptp(struct stmmac_priv *priv)
803
{
804 805
	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;

806 807 808
	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

809
	priv->adv_ts = 0;
810 811
	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
	if (xmac && priv->dma_cap.atime_stamp)
812 813 814
		priv->adv_ts = 1;
	/* Dwmac 3.x core with extend_desc can support adv_ts */
	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
815 816
		priv->adv_ts = 1;

817 818
	if (priv->dma_cap.time_stamp)
		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
819

820 821 822
	if (priv->adv_ts)
		netdev_info(priv->dev,
			    "IEEE 1588-2008 Advanced Timestamp supported\n");
823 824 825

	priv->hwts_tx_en = 0;
	priv->hwts_rx_en = 0;
826

827 828 829
	stmmac_ptp_register(priv);

	return 0;
830 831 832 833
}

static void stmmac_release_ptp(struct stmmac_priv *priv)
{
834 835
	if (priv->plat->clk_ptp_ref)
		clk_disable_unprepare(priv->plat->clk_ptp_ref);
836
	stmmac_ptp_unregister(priv);
837 838
}

839 840 841 842 843 844 845 846 847
/**
 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 *  @priv: driver private structure
 *  Description: It is used for configuring the flow control in all queues
 */
static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
{
	u32 tx_cnt = priv->plat->tx_queues_to_use;

848 849
	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
			priv->pause, tx_cnt);
850 851
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
static void stmmac_validate(struct phylink_config *config,
			    unsigned long *supported,
			    struct phylink_link_state *state)
{
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
	int tx_cnt = priv->plat->tx_queues_to_use;
	int max_speed = priv->plat->max_speed;

	/* Cut down 1G if asked to */
	if ((max_speed > 0) && (max_speed < 1000)) {
		phylink_set(mask, 1000baseT_Full);
		phylink_set(mask, 1000baseX_Full);
	}

	/* Half-Duplex can only work with single queue */
	if (tx_cnt > 1) {
		phylink_set(mask, 10baseT_Half);
		phylink_set(mask, 100baseT_Half);
		phylink_set(mask, 1000baseT_Half);
	}

	bitmap_andnot(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
	bitmap_andnot(state->advertising, state->advertising, mask,
		      __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static int stmmac_mac_link_state(struct phylink_config *config,
				 struct phylink_link_state *state)
{
	return -EOPNOTSUPP;
}

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
static void stmmac_mac_config(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct phy_device *phydev = dev->phydev;
	u32 ctrl;

	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);

	if (phydev->speed != priv->speed) {
		ctrl &= ~priv->hw->link.speed_mask;

		switch (phydev->speed) {
		case SPEED_1000:
			ctrl |= priv->hw->link.speed1000;
			break;
		case SPEED_100:
			ctrl |= priv->hw->link.speed100;
			break;
		case SPEED_10:
			ctrl |= priv->hw->link.speed10;
			break;
		default:
			netif_warn(priv, link, priv->dev,
				   "broken speed: %d\n", phydev->speed);
			phydev->speed = SPEED_UNKNOWN;
			break;
		}

		if (phydev->speed != SPEED_UNKNOWN)
			stmmac_hw_fix_mac_speed(priv);

		priv->speed = phydev->speed;
	}

	/* Now we make sure that we can be in full duplex mode.
	 * If not, we operate in half-duplex mode. */
	if (phydev->duplex != priv->oldduplex) {
		if (!phydev->duplex)
			ctrl &= ~priv->hw->link.duplex;
		else
			ctrl |= priv->hw->link.duplex;

		priv->oldduplex = phydev->duplex;
	}

	/* Flow Control operation */
	if (phydev->pause)
		stmmac_mac_flow_ctrl(priv, phydev->duplex);

	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
}

937 938 939 940 941
static void stmmac_mac_an_restart(struct phylink_config *config)
{
	/* Not Supported */
}

942 943 944 945 946 947 948 949 950 951 952 953 954 955
static void stmmac_mac_link_down(struct net_device *dev, bool autoneg)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	stmmac_mac_set(priv, priv->ioaddr, false);
}

static void stmmac_mac_link_up(struct net_device *dev, bool autoneg)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	stmmac_mac_set(priv, priv->ioaddr, true);
}

956 957 958 959 960 961 962 963 964
static const struct phylink_mac_ops __maybe_unused stmmac_phylink_mac_ops = {
	.validate = stmmac_validate,
	.mac_link_state = stmmac_mac_link_state,
	.mac_config = NULL, /* TO BE FILLED */
	.mac_an_restart = stmmac_mac_an_restart,
	.mac_link_down = NULL, /* TO BE FILLED */
	.mac_link_up = NULL, /* TO BE FILLED */
};

965
/**
966
 * stmmac_adjust_link - adjusts the link parameters
967
 * @dev: net device structure
968 969 970 971 972
 * Description: this is the helper called by the physical abstraction layer
 * drivers to communicate the phy link status. According the speed and duplex
 * this driver can invoke registered glue-logic as well.
 * It also invoke the eee initialization because it could happen when switch
 * on different networks (that are eee capable).
973 974 975 976
 */
static void stmmac_adjust_link(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
977
	struct phy_device *phydev = dev->phydev;
978
	bool new_state = false;
979

980
	if (!phydev)
981 982
		return;

983
	mutex_lock(&priv->lock);
984

985
	if (phydev->link) {
986
		stmmac_mac_config(dev);
987 988

		if (!priv->oldlink) {
989
			new_state = true;
990
			priv->oldlink = true;
991 992
		}
	} else if (priv->oldlink) {
993
		new_state = true;
994
		priv->oldlink = false;
995 996
		priv->speed = SPEED_UNKNOWN;
		priv->oldduplex = DUPLEX_UNKNOWN;
997 998
	}

999 1000 1001 1002 1003
	if (phydev->link)
		stmmac_mac_link_up(dev, false);
	else
		stmmac_mac_link_down(dev, false);

1004 1005 1006
	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);

1007
	mutex_unlock(&priv->lock);
1008

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	if (phydev->is_pseudo_fixed_link)
		/* Stop PHY layer to call the hook to adjust the link in case
		 * of a switch is attached to the stmmac driver.
		 */
		phydev->irq = PHY_IGNORE_INTERRUPT;
	else
		/* At this stage, init the EEE if supported.
		 * Never called in case of fixed_link.
		 */
		priv->eee_enabled = stmmac_eee_init(priv);
1019 1020
}

1021
/**
1022
 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1023 1024 1025 1026 1027
 * @priv: driver private structure
 * Description: this is to verify if the HW supports the PCS.
 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 * configured for the TBI, RTBI, or SGMII PHY interface.
 */
1028 1029 1030 1031 1032
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
	int interface = priv->plat->interface;

	if (priv->dma_cap.pcs) {
B
Byungho An 已提交
1033 1034 1035 1036
		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1037
			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1038
			priv->hw->pcs = STMMAC_PCS_RGMII;
B
Byungho An 已提交
1039
		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1040
			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1041
			priv->hw->pcs = STMMAC_PCS_SGMII;
1042 1043 1044 1045
		}
	}
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
/**
 * stmmac_init_phy - PHY initialization
 * @dev: net device structure
 * Description: it initializes the driver's PHY state, and attaches the PHY
 * to the mac driver.
 *  Return value:
 *  0 on success
 */
static int stmmac_init_phy(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
1057
	u32 tx_cnt = priv->plat->tx_queues_to_use;
1058
	struct phy_device *phydev;
1059
	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
1060
	char bus_id[MII_BUS_ID_SIZE];
1061
	int interface = priv->plat->interface;
1062
	int max_speed = priv->plat->max_speed;
1063
	priv->oldlink = false;
1064 1065
	priv->speed = SPEED_UNKNOWN;
	priv->oldduplex = DUPLEX_UNKNOWN;
1066

1067 1068 1069 1070
	if (priv->plat->phy_node) {
		phydev = of_phy_connect(dev, priv->plat->phy_node,
					&stmmac_adjust_link, 0, interface);
	} else {
G
Giuseppe CAVALLARO 已提交
1071 1072
		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
			 priv->plat->bus_id);
1073 1074 1075

		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
			 priv->plat->phy_addr);
1076
		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
1077
			   phy_id_fmt);
1078 1079 1080 1081

		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
				     interface);
	}
1082

1083
	if (IS_ERR_OR_NULL(phydev)) {
1084
		netdev_err(priv->dev, "Could not attach to PHY\n");
1085 1086 1087
		if (!phydev)
			return -ENODEV;

1088 1089 1090
		return PTR_ERR(phydev);
	}

1091
	/* Stop Advertising 1000BASE Capability if interface is not GMII */
1092
	if ((interface == PHY_INTERFACE_MODE_MII) ||
1093
	    (interface == PHY_INTERFACE_MODE_RMII) ||
P
Pavel Machek 已提交
1094
		(max_speed < 1000 && max_speed > 0))
1095
		phy_set_max_speed(phydev, SPEED_100);
1096

1097 1098 1099 1100
	/*
	 * Half-duplex mode not supported with multiqueue
	 * half-duplex can only works with single queue
	 */
1101 1102 1103 1104 1105 1106 1107 1108
	if (tx_cnt > 1) {
		phy_remove_link_mode(phydev,
				     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
		phy_remove_link_mode(phydev,
				     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
		phy_remove_link_mode(phydev,
				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
	}
1109

1110 1111 1112 1113 1114 1115 1116
	/*
	 * Broken HW is sometimes missing the pull-up resistor on the
	 * MDIO line, which results in reads to non-existent devices returning
	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
	 * device as well.
	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
	 */
1117
	if (!priv->plat->phy_node && phydev->phy_id == 0) {
1118 1119 1120
		phy_disconnect(phydev);
		return -ENODEV;
	}
1121

1122 1123 1124 1125 1126 1127 1128
	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
	 * subsequent PHY polling, make sure we force a link transition if
	 * we have a UP/DOWN/UP transition
	 */
	if (phydev->is_pseudo_fixed_link)
		phydev->irq = PHY_POLL;

1129
	phy_attached_info(phydev);
1130 1131 1132
	return 0;
}

1133
static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1134
{
1135
	u32 rx_cnt = priv->plat->rx_queues_to_use;
1136
	void *head_rx;
1137
	u32 queue;
1138

1139 1140 1141
	/* Display RX rings */
	for (queue = 0; queue < rx_cnt; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1142

1143 1144 1145 1146 1147 1148 1149 1150
		pr_info("\tRX Queue %u rings\n", queue);

		if (priv->extend_desc)
			head_rx = (void *)rx_q->dma_erx;
		else
			head_rx = (void *)rx_q->dma_rx;

		/* Display RX ring */
1151
		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1152
	}
1153 1154 1155 1156
}

static void stmmac_display_tx_rings(struct stmmac_priv *priv)
{
1157
	u32 tx_cnt = priv->plat->tx_queues_to_use;
1158
	void *head_tx;
1159
	u32 queue;
1160

1161 1162 1163
	/* Display TX rings */
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1164

1165 1166 1167 1168 1169 1170 1171
		pr_info("\tTX Queue %d rings\n", queue);

		if (priv->extend_desc)
			head_tx = (void *)tx_q->dma_etx;
		else
			head_tx = (void *)tx_q->dma_tx;

1172
		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1173
	}
1174 1175
}

1176 1177 1178 1179 1180 1181 1182 1183 1184
static void stmmac_display_rings(struct stmmac_priv *priv)
{
	/* Display RX ring */
	stmmac_display_rx_rings(priv);

	/* Display TX ring */
	stmmac_display_tx_rings(priv);
}

1185 1186 1187 1188 1189 1190 1191 1192
static int stmmac_set_bfsize(int mtu, int bufsize)
{
	int ret = bufsize;

	if (mtu >= BUF_SIZE_4KiB)
		ret = BUF_SIZE_8KiB;
	else if (mtu >= BUF_SIZE_2KiB)
		ret = BUF_SIZE_4KiB;
1193
	else if (mtu > DEFAULT_BUFSIZE)
1194 1195
		ret = BUF_SIZE_2KiB;
	else
1196
		ret = DEFAULT_BUFSIZE;
1197 1198 1199 1200

	return ret;
}

1201
/**
1202
 * stmmac_clear_rx_descriptors - clear RX descriptors
1203
 * @priv: driver private structure
1204
 * @queue: RX queue index
1205
 * Description: this function is called to clear the RX descriptors
1206 1207
 * in case of both basic and extended descriptors are used.
 */
1208
static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1209
{
1210
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1211
	int i;
1212

1213
	/* Clear the RX descriptors */
1214
	for (i = 0; i < DMA_RX_SIZE; i++)
1215
		if (priv->extend_desc)
1216 1217
			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
					priv->use_riwt, priv->mode,
1218 1219
					(i == DMA_RX_SIZE - 1),
					priv->dma_buf_sz);
1220
		else
1221 1222
			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
					priv->use_riwt, priv->mode,
1223 1224
					(i == DMA_RX_SIZE - 1),
					priv->dma_buf_sz);
1225 1226 1227 1228 1229
}

/**
 * stmmac_clear_tx_descriptors - clear tx descriptors
 * @priv: driver private structure
1230
 * @queue: TX queue index.
1231 1232 1233
 * Description: this function is called to clear the TX descriptors
 * in case of both basic and extended descriptors are used.
 */
1234
static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1235
{
1236
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1237 1238 1239
	int i;

	/* Clear the TX descriptors */
1240
	for (i = 0; i < DMA_TX_SIZE; i++)
1241
		if (priv->extend_desc)
1242 1243
			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
					priv->mode, (i == DMA_TX_SIZE - 1));
1244
		else
1245 1246
			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
					priv->mode, (i == DMA_TX_SIZE - 1));
1247 1248
}

1249 1250 1251 1252 1253 1254 1255 1256
/**
 * stmmac_clear_descriptors - clear descriptors
 * @priv: driver private structure
 * Description: this function is called to clear the TX and RX descriptors
 * in case of both basic and extended descriptors are used.
 */
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
1257
	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1258
	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1259 1260
	u32 queue;

1261
	/* Clear the RX descriptors */
1262 1263
	for (queue = 0; queue < rx_queue_cnt; queue++)
		stmmac_clear_rx_descriptors(priv, queue);
1264 1265

	/* Clear the TX descriptors */
1266 1267
	for (queue = 0; queue < tx_queue_cnt; queue++)
		stmmac_clear_tx_descriptors(priv, queue);
1268 1269
}

1270 1271 1272 1273 1274
/**
 * stmmac_init_rx_buffers - init the RX descriptor buffer.
 * @priv: driver private structure
 * @p: descriptor pointer
 * @i: descriptor index
1275 1276
 * @flags: gfp flag
 * @queue: RX queue index
1277 1278 1279
 * Description: this function is called to allocate a receive buffer, perform
 * the DMA mapping and init the descriptor.
 */
1280
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1281
				  int i, gfp_t flags, u32 queue)
1282
{
1283
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1284 1285
	struct sk_buff *skb;

1286
	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1287
	if (!skb) {
1288 1289
		netdev_err(priv->dev,
			   "%s: Rx init fails; skb is NULL\n", __func__);
1290
		return -ENOMEM;
1291
	}
1292 1293
	rx_q->rx_skbuff[i] = skb;
	rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1294 1295
						priv->dma_buf_sz,
						DMA_FROM_DEVICE);
1296
	if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1297
		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1298 1299 1300
		dev_kfree_skb_any(skb);
		return -EINVAL;
	}
1301

1302
	stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1303

1304 1305
	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
		stmmac_init_desc3(priv, p);
1306 1307 1308 1309

	return 0;
}

1310 1311 1312
/**
 * stmmac_free_rx_buffer - free RX dma buffers
 * @priv: private structure
1313
 * @queue: RX queue index
1314 1315
 * @i: buffer index.
 */
1316
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1317
{
1318 1319 1320 1321
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

	if (rx_q->rx_skbuff[i]) {
		dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1322
				 priv->dma_buf_sz, DMA_FROM_DEVICE);
1323
		dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1324
	}
1325
	rx_q->rx_skbuff[i] = NULL;
1326 1327 1328
}

/**
1329 1330
 * stmmac_free_tx_buffer - free RX dma buffers
 * @priv: private structure
1331
 * @queue: RX queue index
1332 1333
 * @i: buffer index.
 */
1334
static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1335
{
1336 1337 1338 1339
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

	if (tx_q->tx_skbuff_dma[i].buf) {
		if (tx_q->tx_skbuff_dma[i].map_as_page)
1340
			dma_unmap_page(priv->device,
1341 1342
				       tx_q->tx_skbuff_dma[i].buf,
				       tx_q->tx_skbuff_dma[i].len,
1343 1344 1345
				       DMA_TO_DEVICE);
		else
			dma_unmap_single(priv->device,
1346 1347
					 tx_q->tx_skbuff_dma[i].buf,
					 tx_q->tx_skbuff_dma[i].len,
1348 1349 1350
					 DMA_TO_DEVICE);
	}

1351 1352 1353 1354 1355
	if (tx_q->tx_skbuff[i]) {
		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
		tx_q->tx_skbuff[i] = NULL;
		tx_q->tx_skbuff_dma[i].buf = 0;
		tx_q->tx_skbuff_dma[i].map_as_page = false;
1356 1357 1358 1359 1360
	}
}

/**
 * init_dma_rx_desc_rings - init the RX descriptor rings
1361
 * @dev: net device structure
1362
 * @flags: gfp flag.
1363
 * Description: this function initializes the DMA RX descriptors
1364
 * and allocates the socket buffers. It supports the chained and ring
1365
 * modes.
1366
 */
1367
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1368 1369
{
	struct stmmac_priv *priv = netdev_priv(dev);
1370
	u32 rx_count = priv->plat->rx_queues_to_use;
1371
	int ret = -ENOMEM;
1372
	int bfsize = 0;
1373
	int queue;
1374
	int i;
1375

1376 1377 1378
	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
	if (bfsize < 0)
		bfsize = 0;
1379

1380
	if (bfsize < BUF_SIZE_16KiB)
1381
		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1382

1383 1384
	priv->dma_buf_sz = bfsize;

1385
	/* RX INITIALIZATION */
1386 1387
	netif_dbg(priv, probe, priv->dev,
		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1388

1389 1390
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1391

1392 1393 1394
		netif_dbg(priv, probe, priv->dev,
			  "(%s) dma_rx_phy=0x%08x\n", __func__,
			  (u32)rx_q->dma_rx_phy);
A
Alexandre TORGUE 已提交
1395

1396 1397
		for (i = 0; i < DMA_RX_SIZE; i++) {
			struct dma_desc *p;
1398

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
			if (priv->extend_desc)
				p = &((rx_q->dma_erx + i)->basic);
			else
				p = rx_q->dma_rx + i;

			ret = stmmac_init_rx_buffers(priv, p, i, flags,
						     queue);
			if (ret)
				goto err_init_rx_buffers;

			netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
				  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
				  (unsigned int)rx_q->rx_skbuff_dma[i]);
		}

		rx_q->cur_rx = 0;
		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);

		stmmac_clear_rx_descriptors(priv, queue);

		/* Setup the chained descriptor addresses */
		if (priv->mode == STMMAC_CHAIN_MODE) {
			if (priv->extend_desc)
1422 1423
				stmmac_mode_init(priv, rx_q->dma_erx,
						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1424
			else
1425 1426
				stmmac_mode_init(priv, rx_q->dma_rx,
						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1427
		}
1428 1429
	}

1430 1431
	buf_sz = bfsize;

1432
	return 0;
1433

1434
err_init_rx_buffers:
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
	while (queue >= 0) {
		while (--i >= 0)
			stmmac_free_rx_buffer(priv, queue, i);

		if (queue == 0)
			break;

		i = DMA_RX_SIZE;
		queue--;
	}

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	return ret;
}

/**
 * init_dma_tx_desc_rings - init the TX descriptor rings
 * @dev: net device structure.
 * Description: this function initializes the DMA TX descriptors
 * and allocates the socket buffers. It supports the chained and ring
 * modes.
 */
static int init_dma_tx_desc_rings(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
1459 1460
	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
	u32 queue;
1461 1462
	int i;

1463 1464
	for (queue = 0; queue < tx_queue_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1465

1466 1467 1468
		netif_dbg(priv, probe, priv->dev,
			  "(%s) dma_tx_phy=0x%08x\n", __func__,
			 (u32)tx_q->dma_tx_phy);
A
Alexandre TORGUE 已提交
1469

1470 1471 1472
		/* Setup the chained descriptor addresses */
		if (priv->mode == STMMAC_CHAIN_MODE) {
			if (priv->extend_desc)
1473 1474
				stmmac_mode_init(priv, tx_q->dma_etx,
						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1475
			else
1476 1477
				stmmac_mode_init(priv, tx_q->dma_tx,
						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1478
		}
1479

1480 1481 1482 1483 1484 1485 1486
		for (i = 0; i < DMA_TX_SIZE; i++) {
			struct dma_desc *p;
			if (priv->extend_desc)
				p = &((tx_q->dma_etx + i)->basic);
			else
				p = tx_q->dma_tx + i;

1487
			stmmac_clear_desc(priv, p);
1488 1489 1490 1491 1492 1493

			tx_q->tx_skbuff_dma[i].buf = 0;
			tx_q->tx_skbuff_dma[i].map_as_page = false;
			tx_q->tx_skbuff_dma[i].len = 0;
			tx_q->tx_skbuff_dma[i].last_segment = false;
			tx_q->tx_skbuff[i] = NULL;
1494
		}
1495

1496 1497
		tx_q->dirty_tx = 0;
		tx_q->cur_tx = 0;
1498
		tx_q->mss = 0;
1499

1500 1501
		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
	}
1502

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
	return 0;
}

/**
 * init_dma_desc_rings - init the RX/TX descriptor rings
 * @dev: net device structure
 * @flags: gfp flag.
 * Description: this function initializes the DMA RX/TX descriptors
 * and allocates the socket buffers. It supports the chained and ring
 * modes.
 */
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

	ret = init_dma_rx_desc_rings(dev, flags);
	if (ret)
		return ret;

	ret = init_dma_tx_desc_rings(dev);

1525
	stmmac_clear_descriptors(priv);
1526

1527 1528
	if (netif_msg_hw(priv))
		stmmac_display_rings(priv);
1529 1530

	return ret;
1531 1532
}

1533 1534 1535
/**
 * dma_free_rx_skbufs - free RX dma buffers
 * @priv: private structure
1536
 * @queue: RX queue index
1537
 */
1538
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1539 1540 1541
{
	int i;

1542
	for (i = 0; i < DMA_RX_SIZE; i++)
1543
		stmmac_free_rx_buffer(priv, queue, i);
1544 1545
}

1546 1547 1548
/**
 * dma_free_tx_skbufs - free TX dma buffers
 * @priv: private structure
1549
 * @queue: TX queue index
1550
 */
1551
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1552 1553 1554
{
	int i;

1555
	for (i = 0; i < DMA_TX_SIZE; i++)
1556
		stmmac_free_tx_buffer(priv, queue, i);
1557 1558
}

1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
/**
 * free_dma_rx_desc_resources - free RX dma desc resources
 * @priv: private structure
 */
static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;

	/* Free RX queue resources */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		/* Release the DMA RX socket buffers */
		dma_free_rx_skbufs(priv, queue);

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
			dma_free_coherent(priv->device,
					  DMA_RX_SIZE * sizeof(struct dma_desc),
					  rx_q->dma_rx, rx_q->dma_rx_phy);
		else
			dma_free_coherent(priv->device, DMA_RX_SIZE *
					  sizeof(struct dma_extended_desc),
					  rx_q->dma_erx, rx_q->dma_rx_phy);

		kfree(rx_q->rx_skbuff_dma);
		kfree(rx_q->rx_skbuff);
	}
}

1590 1591 1592 1593 1594 1595 1596
/**
 * free_dma_tx_desc_resources - free TX dma desc resources
 * @priv: private structure
 */
static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
1597
	u32 queue;
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620

	/* Free TX queue resources */
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		/* Release the DMA TX socket buffers */
		dma_free_tx_skbufs(priv, queue);

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
			dma_free_coherent(priv->device,
					  DMA_TX_SIZE * sizeof(struct dma_desc),
					  tx_q->dma_tx, tx_q->dma_tx_phy);
		else
			dma_free_coherent(priv->device, DMA_TX_SIZE *
					  sizeof(struct dma_extended_desc),
					  tx_q->dma_etx, tx_q->dma_tx_phy);

		kfree(tx_q->tx_skbuff_dma);
		kfree(tx_q->tx_skbuff);
	}
}

1621
/**
1622
 * alloc_dma_rx_desc_resources - alloc RX resources.
1623 1624
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
1625 1626 1627
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
1628
 */
1629
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1630
{
1631
	u32 rx_count = priv->plat->rx_queues_to_use;
1632
	int ret = -ENOMEM;
1633
	u32 queue;
1634

1635 1636 1637
	/* RX queues buffers and DMA */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1638

1639 1640
		rx_q->queue_index = queue;
		rx_q->priv_data = priv;
1641

1642 1643
		rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
						    sizeof(dma_addr_t),
1644
						    GFP_KERNEL);
1645
		if (!rx_q->rx_skbuff_dma)
1646
			goto err_dma;
1647

1648 1649 1650 1651
		rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
						sizeof(struct sk_buff *),
						GFP_KERNEL);
		if (!rx_q->rx_skbuff)
1652
			goto err_dma;
1653 1654

		if (priv->extend_desc) {
1655 1656 1657 1658
			rx_q->dma_erx = dma_alloc_coherent(priv->device,
							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
							   &rx_q->dma_rx_phy,
							   GFP_KERNEL);
1659 1660 1661 1662
			if (!rx_q->dma_erx)
				goto err_dma;

		} else {
1663 1664 1665 1666
			rx_q->dma_rx = dma_alloc_coherent(priv->device,
							  DMA_RX_SIZE * sizeof(struct dma_desc),
							  &rx_q->dma_rx_phy,
							  GFP_KERNEL);
1667 1668 1669
			if (!rx_q->dma_rx)
				goto err_dma;
		}
1670 1671 1672 1673 1674
	}

	return 0;

err_dma:
1675 1676
	free_dma_rx_desc_resources(priv);

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	return ret;
}

/**
 * alloc_dma_tx_desc_resources - alloc TX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
{
1690
	u32 tx_count = priv->plat->tx_queues_to_use;
1691
	int ret = -ENOMEM;
1692
	u32 queue;
1693

1694 1695 1696
	/* TX queues buffers and DMA */
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1697

1698 1699
		tx_q->queue_index = queue;
		tx_q->priv_data = priv;
1700

1701 1702
		tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
						    sizeof(*tx_q->tx_skbuff_dma),
1703
						    GFP_KERNEL);
1704
		if (!tx_q->tx_skbuff_dma)
1705
			goto err_dma;
1706 1707 1708 1709 1710

		tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
						sizeof(struct sk_buff *),
						GFP_KERNEL);
		if (!tx_q->tx_skbuff)
1711
			goto err_dma;
1712 1713

		if (priv->extend_desc) {
1714 1715 1716 1717
			tx_q->dma_etx = dma_alloc_coherent(priv->device,
							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
							   &tx_q->dma_tx_phy,
							   GFP_KERNEL);
1718
			if (!tx_q->dma_etx)
1719
				goto err_dma;
1720
		} else {
1721 1722 1723 1724
			tx_q->dma_tx = dma_alloc_coherent(priv->device,
							  DMA_TX_SIZE * sizeof(struct dma_desc),
							  &tx_q->dma_tx_phy,
							  GFP_KERNEL);
1725
			if (!tx_q->dma_tx)
1726
				goto err_dma;
1727
		}
1728 1729 1730 1731
	}

	return 0;

1732
err_dma:
1733 1734
	free_dma_tx_desc_resources(priv);

1735 1736 1737
	return ret;
}

1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
/**
 * alloc_dma_desc_resources - alloc TX/RX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
1748
	/* RX Allocation */
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
	int ret = alloc_dma_rx_desc_resources(priv);

	if (ret)
		return ret;

	ret = alloc_dma_tx_desc_resources(priv);

	return ret;
}

/**
 * free_dma_desc_resources - free dma desc resources
 * @priv: private structure
 */
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
	/* Release the DMA RX socket buffers */
	free_dma_rx_desc_resources(priv);

	/* Release the DMA TX socket buffers */
	free_dma_tx_desc_resources(priv);
}

J
jpinto 已提交
1772 1773 1774 1775 1776 1777 1778
/**
 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
 *  @priv: driver private structure
 *  Description: It is used for enabling the rx queues in the MAC
 */
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
1779 1780 1781
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	int queue;
	u8 mode;
J
jpinto 已提交
1782

1783 1784
	for (queue = 0; queue < rx_queues_count; queue++) {
		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1785
		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1786
	}
J
jpinto 已提交
1787 1788
}

1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
/**
 * stmmac_start_rx_dma - start RX DMA channel
 * @priv: driver private structure
 * @chan: RX channel index
 * Description:
 * This starts a RX DMA channel
 */
static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1799
	stmmac_start_rx(priv, priv->ioaddr, chan);
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
}

/**
 * stmmac_start_tx_dma - start TX DMA channel
 * @priv: driver private structure
 * @chan: TX channel index
 * Description:
 * This starts a TX DMA channel
 */
static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1812
	stmmac_start_tx(priv, priv->ioaddr, chan);
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
}

/**
 * stmmac_stop_rx_dma - stop RX DMA channel
 * @priv: driver private structure
 * @chan: RX channel index
 * Description:
 * This stops a RX DMA channel
 */
static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1825
	stmmac_stop_rx(priv, priv->ioaddr, chan);
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
}

/**
 * stmmac_stop_tx_dma - stop TX DMA channel
 * @priv: driver private structure
 * @chan: TX channel index
 * Description:
 * This stops a TX DMA channel
 */
static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1838
	stmmac_stop_tx(priv, priv->ioaddr, chan);
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
}

/**
 * stmmac_start_all_dma - start all RX and TX DMA channels
 * @priv: driver private structure
 * Description:
 * This starts all the RX and TX DMA channels
 */
static void stmmac_start_all_dma(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_start_rx_dma(priv, chan);

	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_start_tx_dma(priv, chan);
}

/**
 * stmmac_stop_all_dma - stop all RX and TX DMA channels
 * @priv: driver private structure
 * Description:
 * This stops the RX and TX DMA channels
 */
static void stmmac_stop_all_dma(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_stop_rx_dma(priv, chan);

	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_stop_tx_dma(priv, chan);
}

1879 1880
/**
 *  stmmac_dma_operation_mode - HW DMA operation mode
1881
 *  @priv: driver private structure
1882 1883
 *  Description: it is used for configuring the DMA operation mode register in
 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1884 1885 1886
 */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
1887 1888
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1889
	int rxfifosz = priv->plat->rx_fifo_size;
1890
	int txfifosz = priv->plat->tx_fifo_size;
1891 1892 1893
	u32 txmode = 0;
	u32 rxmode = 0;
	u32 chan = 0;
1894
	u8 qmode = 0;
1895

1896 1897
	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;
1898 1899 1900 1901 1902 1903
	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	/* Adjust for real per queue fifo size */
	rxfifosz /= rx_channels_count;
	txfifosz /= tx_channels_count;
1904

1905 1906 1907 1908
	if (priv->plat->force_thresh_dma_mode) {
		txmode = tc;
		rxmode = tc;
	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1909 1910 1911
		/*
		 * In case of GMAC, SF mode can be enabled
		 * to perform the TX COE in HW. This depends on:
1912 1913 1914 1915
		 * 1) TX COE if actually supported
		 * 2) There is no bugged Jumbo frame support
		 *    that needs to not insert csum in the TDES.
		 */
1916 1917
		txmode = SF_DMA_MODE;
		rxmode = SF_DMA_MODE;
1918
		priv->xstats.threshold = SF_DMA_MODE;
1919 1920 1921 1922 1923 1924
	} else {
		txmode = tc;
		rxmode = SF_DMA_MODE;
	}

	/* configure all channels */
1925 1926
	for (chan = 0; chan < rx_channels_count; chan++) {
		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1927

1928 1929
		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
				rxfifosz, qmode);
1930 1931
		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
				chan);
1932
	}
1933

1934 1935
	for (chan = 0; chan < tx_channels_count; chan++) {
		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1936

1937 1938
		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
				txfifosz, qmode);
1939
	}
1940 1941 1942
}

/**
1943
 * stmmac_tx_clean - to manage the transmission completion
1944
 * @priv: driver private structure
1945
 * @queue: TX queue index
1946
 * Description: it reclaims the transmit resources after transmission completes.
1947
 */
1948
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1949
{
1950
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
B
Beniamino Galvani 已提交
1951
	unsigned int bytes_compl = 0, pkts_compl = 0;
1952
	unsigned int entry, count = 0;
1953

1954
	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1955

1956 1957
	priv->xstats.tx_clean++;

1958
	entry = tx_q->dirty_tx;
1959
	while ((entry != tx_q->cur_tx) && (count < budget)) {
1960
		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1961
		struct dma_desc *p;
1962
		int status;
1963 1964

		if (priv->extend_desc)
1965
			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1966
		else
1967
			p = tx_q->dma_tx + entry;
1968

1969 1970
		status = stmmac_tx_status(priv, &priv->dev->stats,
				&priv->xstats, p, priv->ioaddr);
1971 1972 1973 1974
		/* Check if the descriptor is owned by the DMA */
		if (unlikely(status & tx_dma_own))
			break;

1975 1976
		count++;

1977 1978 1979 1980 1981
		/* Make sure descriptor fields are read after reading
		 * the own bit.
		 */
		dma_rmb();

1982 1983 1984 1985 1986 1987
		/* Just consider the last segment and ...*/
		if (likely(!(status & tx_not_ls))) {
			/* ... verify the status error condition */
			if (unlikely(status & tx_err)) {
				priv->dev->stats.tx_errors++;
			} else {
1988 1989
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
1990
			}
1991
			stmmac_get_tx_hwtstamp(priv, p, skb);
1992 1993
		}

1994 1995
		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
			if (tx_q->tx_skbuff_dma[entry].map_as_page)
G
Giuseppe CAVALLARO 已提交
1996
				dma_unmap_page(priv->device,
1997 1998
					       tx_q->tx_skbuff_dma[entry].buf,
					       tx_q->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1999 2000 2001
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
2002 2003
						 tx_q->tx_skbuff_dma[entry].buf,
						 tx_q->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
2004
						 DMA_TO_DEVICE);
2005 2006 2007
			tx_q->tx_skbuff_dma[entry].buf = 0;
			tx_q->tx_skbuff_dma[entry].len = 0;
			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2008
		}
A
Alexandre TORGUE 已提交
2009

2010
		stmmac_clean_desc3(priv, tx_q, p);
A
Alexandre TORGUE 已提交
2011

2012 2013
		tx_q->tx_skbuff_dma[entry].last_segment = false;
		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2014 2015

		if (likely(skb != NULL)) {
B
Beniamino Galvani 已提交
2016 2017
			pkts_compl++;
			bytes_compl += skb->len;
2018
			dev_consume_skb_any(skb);
2019
			tx_q->tx_skbuff[entry] = NULL;
2020 2021
		}

2022
		stmmac_release_tx_desc(priv, p, priv->mode);
2023

2024
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2025
	}
2026
	tx_q->dirty_tx = entry;
B
Beniamino Galvani 已提交
2027

2028 2029 2030 2031 2032 2033
	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
				  pkts_compl, bytes_compl);

	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
								queue))) &&
	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
B
Beniamino Galvani 已提交
2034

2035 2036
		netif_dbg(priv, tx_done, priv->dev,
			  "%s: restart transmit\n", __func__);
2037
		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2038
	}
2039 2040 2041

	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
		stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
2042
		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2043
	}
2044

2045 2046 2047 2048
	/* We still have pending packets, let's call for a new scheduling */
	if (tx_q->dirty_tx != tx_q->cur_tx)
		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));

2049 2050 2051
	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));

	return count;
2052 2053 2054
}

/**
2055
 * stmmac_tx_err - to manage the tx error
2056
 * @priv: driver private structure
2057
 * @chan: channel index
2058
 * Description: it cleans the descriptors and restarts the transmission
2059
 * in case of transmission errors.
2060
 */
2061
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2062
{
2063
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2064
	int i;
2065

2066
	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2067

2068
	stmmac_stop_tx_dma(priv, chan);
2069
	dma_free_tx_skbufs(priv, chan);
2070
	for (i = 0; i < DMA_TX_SIZE; i++)
2071
		if (priv->extend_desc)
2072 2073
			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
					priv->mode, (i == DMA_TX_SIZE - 1));
2074
		else
2075 2076
			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
					priv->mode, (i == DMA_TX_SIZE - 1));
2077 2078
	tx_q->dirty_tx = 0;
	tx_q->cur_tx = 0;
2079
	tx_q->mss = 0;
2080
	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2081
	stmmac_start_tx_dma(priv, chan);
2082 2083

	priv->dev->stats.tx_errors++;
2084
	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2085 2086
}

2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
/**
 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
 *  @priv: driver private structure
 *  @txmode: TX operating mode
 *  @rxmode: RX operating mode
 *  @chan: channel index
 *  Description: it is used for configuring of the DMA operation mode in
 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
 *  mode.
 */
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
					  u32 rxmode, u32 chan)
{
2100 2101
	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2102 2103
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2104
	int rxfifosz = priv->plat->rx_fifo_size;
2105
	int txfifosz = priv->plat->tx_fifo_size;
2106 2107 2108

	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;
2109 2110 2111 2112 2113 2114
	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	/* Adjust for real per queue fifo size */
	rxfifosz /= rx_channels_count;
	txfifosz /= tx_channels_count;
2115

2116 2117
	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2118 2119
}

2120 2121
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
2122
	int ret;
2123

2124 2125 2126
	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
	if (ret && (ret != -EINVAL)) {
2127
		stmmac_global_err(priv);
2128 2129 2130 2131
		return true;
	}

	return false;
2132 2133
}

2134 2135 2136 2137 2138 2139
static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
{
	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
						 &priv->xstats, chan);
	struct stmmac_channel *ch = &priv->channel[chan];

2140 2141 2142
	if (status)
		status |= handle_rx | handle_tx;

2143 2144 2145
	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
		napi_schedule_irqoff(&ch->rx_napi);
2146 2147
	}

2148
	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2149
		stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2150
		napi_schedule_irqoff(&ch->tx_napi);
2151 2152 2153 2154 2155
	}

	return status;
}

2156
/**
2157
 * stmmac_dma_interrupt - DMA ISR
2158 2159
 * @priv: driver private structure
 * Description: this is the DMA ISR. It is called by the main ISR.
2160 2161
 * It calls the dwmac dma routine and schedule poll method in case of some
 * work can be done.
2162
 */
2163 2164
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
2165
	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2166 2167 2168
	u32 rx_channel_count = priv->plat->rx_queues_to_use;
	u32 channels_to_check = tx_channel_count > rx_channel_count ?
				tx_channel_count : rx_channel_count;
2169
	u32 chan;
K
Kees Cook 已提交
2170 2171 2172 2173 2174
	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];

	/* Make sure we never check beyond our status buffer. */
	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
		channels_to_check = ARRAY_SIZE(status);
2175 2176

	for (chan = 0; chan < channels_to_check; chan++)
2177
		status[chan] = stmmac_napi_check(priv, chan);
2178

2179 2180
	for (chan = 0; chan < tx_channel_count; chan++) {
		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
			/* Try to bump up the dma threshold on this failure */
			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
			    (tc <= 256)) {
				tc += 64;
				if (priv->plat->force_thresh_dma_mode)
					stmmac_set_dma_operation_mode(priv,
								      tc,
								      tc,
								      chan);
				else
					stmmac_set_dma_operation_mode(priv,
								    tc,
								    SF_DMA_MODE,
								    chan);
				priv->xstats.threshold = tc;
			}
2197
		} else if (unlikely(status[chan] == tx_hard_error)) {
2198
			stmmac_tx_err(priv, chan);
2199
		}
2200
	}
2201 2202
}

2203 2204 2205 2206 2207
/**
 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
 * @priv: driver private structure
 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
 */
2208 2209 2210
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2211
			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2212

2213
	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
G
Giuseppe CAVALLARO 已提交
2214 2215

	if (priv->dma_cap.rmon) {
2216
		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
G
Giuseppe CAVALLARO 已提交
2217 2218
		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
	} else
2219
		netdev_info(priv->dev, "No MAC Management Counters available\n");
2220 2221
}

2222
/**
2223
 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2224
 * @priv: driver private structure
2225 2226 2227 2228 2229
 * Description:
 *  new GMAC chip generations have a new register to indicate the
 *  presence of the optional feature/functions.
 *  This can be also used to override the value passed through the
 *  platform and necessary for old MAC10/100 and GMAC chips.
2230 2231 2232
 */
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
2233
	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2234 2235
}

2236
/**
2237
 * stmmac_check_ether_addr - check if the MAC addr is valid
2238 2239 2240 2241 2242
 * @priv: driver private structure
 * Description:
 * it is to verify if the MAC address is valid, in case of failures it
 * generates a random MAC address
 */
2243 2244 2245
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2246
		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
G
Giuseppe CAVALLARO 已提交
2247
		if (!is_valid_ether_addr(priv->dev->dev_addr))
2248
			eth_hw_addr_random(priv->dev);
2249 2250
		dev_info(priv->device, "device MAC address %pM\n",
			 priv->dev->dev_addr);
2251 2252 2253
	}
}

2254
/**
2255
 * stmmac_init_dma_engine - DMA init.
2256 2257 2258 2259 2260 2261
 * @priv: driver private structure
 * Description:
 * It inits the DMA invoking the specific MAC/GMAC callback.
 * Some DMA parameters can be passed from the platform;
 * in case of these are not passed a default is kept for the MAC or GMAC.
 */
2262 2263
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
2264 2265
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2266
	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2267
	struct stmmac_rx_queue *rx_q;
2268
	struct stmmac_tx_queue *tx_q;
2269
	u32 chan = 0;
2270
	int atds = 0;
2271
	int ret = 0;
2272

2273 2274
	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
		dev_err(priv->device, "Invalid DMA configuration\n");
2275
		return -EINVAL;
2276 2277
	}

2278 2279 2280
	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
		atds = 1;

2281
	ret = stmmac_reset(priv, priv->ioaddr);
2282 2283 2284 2285 2286
	if (ret) {
		dev_err(priv->device, "Failed to reset the dma\n");
		return ret;
	}

2287 2288 2289 2290 2291 2292
	/* DMA Configuration */
	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);

	if (priv->plat->axi)
		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);

2293 2294 2295 2296
	/* DMA CSR Channel configuration */
	for (chan = 0; chan < dma_csr_ch; chan++)
		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);

2297 2298 2299
	/* DMA RX Channel Configuration */
	for (chan = 0; chan < rx_channels_count; chan++) {
		rx_q = &priv->rx_queue[chan];
2300

2301 2302
		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
				    rx_q->dma_rx_phy, chan);
2303

2304 2305 2306 2307 2308
		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
			    (DMA_RX_SIZE * sizeof(struct dma_desc));
		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
				       rx_q->rx_tail_addr, chan);
	}
2309

2310 2311 2312
	/* DMA TX Channel Configuration */
	for (chan = 0; chan < tx_channels_count; chan++) {
		tx_q = &priv->tx_queue[chan];
2313

2314 2315
		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
				    tx_q->dma_tx_phy, chan);
2316

2317
		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2318 2319 2320
		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
				       tx_q->tx_tail_addr, chan);
	}
2321

2322
	return ret;
2323 2324
}

2325 2326 2327 2328 2329 2330 2331
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
}

2332
/**
2333
 * stmmac_tx_timer - mitigation sw timer for tx.
2334 2335 2336 2337
 * @data: data pointer
 * Description:
 * This is the timer handler to directly invoke the stmmac_tx_clean.
 */
2338
static void stmmac_tx_timer(struct timer_list *t)
2339
{
2340 2341 2342 2343 2344
	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
	struct stmmac_priv *priv = tx_q->priv_data;
	struct stmmac_channel *ch;

	ch = &priv->channel[tx_q->queue_index];
2345

2346 2347 2348 2349 2350 2351 2352 2353
	/*
	 * If NAPI is already running we can miss some events. Let's rearm
	 * the timer and try again.
	 */
	if (likely(napi_schedule_prep(&ch->tx_napi)))
		__napi_schedule(&ch->tx_napi);
	else
		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2354 2355 2356
}

/**
2357
 * stmmac_init_tx_coalesce - init tx mitigation options.
2358
 * @priv: driver private structure
2359 2360 2361 2362 2363 2364 2365
 * Description:
 * This inits the transmit coalesce parameters: i.e. timer rate,
 * timer handler and default threshold used for enabling the
 * interrupt on completion bit.
 */
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
2366 2367 2368
	u32 tx_channel_count = priv->plat->tx_queues_to_use;
	u32 chan;

2369 2370
	priv->tx_coal_frames = STMMAC_TX_FRAMES;
	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2371 2372 2373 2374 2375 2376

	for (chan = 0; chan < tx_channel_count; chan++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];

		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
	}
2377 2378
}

2379 2380 2381 2382 2383 2384 2385
static void stmmac_set_rings_length(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan;

	/* set TX ring length */
2386 2387 2388
	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_set_tx_ring_len(priv, priv->ioaddr,
				(DMA_TX_SIZE - 1), chan);
2389 2390

	/* set RX ring length */
2391 2392 2393
	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_set_rx_ring_len(priv, priv->ioaddr,
				(DMA_RX_SIZE - 1), chan);
2394 2395
}

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
/**
 *  stmmac_set_tx_queue_weight - Set TX queue weight
 *  @priv: driver private structure
 *  Description: It is used for setting TX queues weight
 */
static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 weight;
	u32 queue;

	for (queue = 0; queue < tx_queues_count; queue++) {
		weight = priv->plat->tx_queues_cfg[queue].weight;
2409
		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2410 2411 2412
	}
}

2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
/**
 *  stmmac_configure_cbs - Configure CBS in TX queue
 *  @priv: driver private structure
 *  Description: It is used for configuring CBS in AVB TX queues
 */
static void stmmac_configure_cbs(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 mode_to_use;
	u32 queue;

J
Joao Pinto 已提交
2424 2425
	/* queue 0 is reserved for legacy traffic */
	for (queue = 1; queue < tx_queues_count; queue++) {
2426 2427 2428 2429
		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
		if (mode_to_use == MTL_QUEUE_DCB)
			continue;

2430
		stmmac_config_cbs(priv, priv->hw,
2431 2432 2433 2434 2435 2436 2437 2438
				priv->plat->tx_queues_cfg[queue].send_slope,
				priv->plat->tx_queues_cfg[queue].idle_slope,
				priv->plat->tx_queues_cfg[queue].high_credit,
				priv->plat->tx_queues_cfg[queue].low_credit,
				queue);
	}
}

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
/**
 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
 *  @priv: driver private structure
 *  Description: It is used for mapping RX queues to RX dma channels
 */
static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 chan;

	for (queue = 0; queue < rx_queues_count; queue++) {
		chan = priv->plat->rx_queues_cfg[queue].chan;
2452
		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2453 2454 2455
	}
}

2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
/**
 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
 *  @priv: driver private structure
 *  Description: It is used for configuring the RX Queue Priority
 */
static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 prio;

	for (queue = 0; queue < rx_queues_count; queue++) {
		if (!priv->plat->rx_queues_cfg[queue].use_prio)
			continue;

		prio = priv->plat->rx_queues_cfg[queue].prio;
2472
		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
	}
}

/**
 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
 *  @priv: driver private structure
 *  Description: It is used for configuring the TX Queue Priority
 */
static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 queue;
	u32 prio;

	for (queue = 0; queue < tx_queues_count; queue++) {
		if (!priv->plat->tx_queues_cfg[queue].use_prio)
			continue;

		prio = priv->plat->tx_queues_cfg[queue].prio;
2492
		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2493 2494 2495
	}
}

2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
/**
 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
 *  @priv: driver private structure
 *  Description: It is used for configuring the RX queue routing
 */
static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u8 packet;

	for (queue = 0; queue < rx_queues_count; queue++) {
		/* no specific packet type routing specified for the queue */
		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
			continue;

		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2513
		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2514 2515 2516
	}
}

2517 2518 2519 2520 2521 2522 2523 2524 2525 2526
/**
 *  stmmac_mtl_configuration - Configure MTL
 *  @priv: driver private structure
 *  Description: It is used for configurring MTL
 */
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 tx_queues_count = priv->plat->tx_queues_to_use;

2527
	if (tx_queues_count > 1)
2528 2529
		stmmac_set_tx_queue_weight(priv);

2530
	/* Configure MTL RX algorithms */
2531 2532 2533
	if (rx_queues_count > 1)
		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
				priv->plat->rx_sched_algorithm);
2534 2535

	/* Configure MTL TX algorithms */
2536 2537 2538
	if (tx_queues_count > 1)
		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
				priv->plat->tx_sched_algorithm);
2539

2540
	/* Configure CBS in AVB TX queues */
2541
	if (tx_queues_count > 1)
2542 2543
		stmmac_configure_cbs(priv);

2544
	/* Map RX MTL to DMA channels */
2545
	stmmac_rx_queue_dma_chan_map(priv);
2546

2547
	/* Enable MAC RX Queues */
2548
	stmmac_mac_enable_rx_queues(priv);
2549

2550
	/* Set RX priorities */
2551
	if (rx_queues_count > 1)
2552 2553 2554
		stmmac_mac_config_rx_queues_prio(priv);

	/* Set TX priorities */
2555
	if (tx_queues_count > 1)
2556
		stmmac_mac_config_tx_queues_prio(priv);
2557 2558

	/* Set RX routing */
2559
	if (rx_queues_count > 1)
2560
		stmmac_mac_config_rx_queues_routing(priv);
2561 2562
}

2563 2564
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
{
2565
	if (priv->dma_cap.asp) {
2566
		netdev_info(priv->dev, "Enabling Safety Features\n");
2567
		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2568 2569 2570 2571 2572
	} else {
		netdev_info(priv->dev, "No Safety Features support found\n");
	}
}

2573
/**
2574
 * stmmac_hw_setup - setup mac in a usable state.
2575 2576
 *  @dev : pointer to the device structure.
 *  Description:
2577 2578 2579 2580
 *  this is the main function to setup the HW in a usable state because the
 *  dma engine is reset, the core registers are configured (e.g. AXI,
 *  Checksum features, timers). The DMA is ready to start receiving and
 *  transmitting.
2581 2582 2583 2584
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
2585
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2586 2587
{
	struct stmmac_priv *priv = netdev_priv(dev);
2588
	u32 rx_cnt = priv->plat->rx_queues_to_use;
2589 2590
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 chan;
2591 2592 2593 2594 2595
	int ret;

	/* DMA initialization and SW reset */
	ret = stmmac_init_dma_engine(priv);
	if (ret < 0) {
2596 2597
		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
			   __func__);
2598 2599 2600 2601
		return ret;
	}

	/* Copy the MAC addr into the HW  */
2602
	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2603

2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
	/* PS and related bits will be programmed according to the speed */
	if (priv->hw->pcs) {
		int speed = priv->plat->mac_port_sel_speed;

		if ((speed == SPEED_10) || (speed == SPEED_100) ||
		    (speed == SPEED_1000)) {
			priv->hw->ps = speed;
		} else {
			dev_warn(priv->device, "invalid port speed\n");
			priv->hw->ps = 0;
		}
	}

2617
	/* Initialize the MAC Core */
2618
	stmmac_core_init(priv, priv->hw, dev);
2619

2620
	/* Initialize MTL*/
2621
	stmmac_mtl_configuration(priv);
J
jpinto 已提交
2622

2623
	/* Initialize Safety Features */
2624
	stmmac_safety_feat_configuration(priv);
2625

2626
	ret = stmmac_rx_ipc(priv, priv->hw);
2627
	if (!ret) {
2628
		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2629
		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2630
		priv->hw->rx_csum = 0;
2631 2632
	}

2633
	/* Enable the MAC Rx/Tx */
2634
	stmmac_mac_set(priv, priv->ioaddr, true);
2635

2636 2637 2638
	/* Set the HW DMA mode and the COE */
	stmmac_dma_operation_mode(priv);

2639 2640
	stmmac_mmc_setup(priv);

2641
	if (init_ptp) {
2642 2643 2644 2645
		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
		if (ret < 0)
			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);

2646
		ret = stmmac_init_ptp(priv);
2647 2648 2649 2650
		if (ret == -EOPNOTSUPP)
			netdev_warn(priv->dev, "PTP not supported by HW\n");
		else if (ret)
			netdev_warn(priv->dev, "PTP init failed\n");
2651
	}
2652 2653 2654

	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;

2655 2656 2657 2658
	if (priv->use_riwt) {
		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
		if (!ret)
			priv->rx_riwt = MAX_DMA_RIWT;
2659 2660
	}

2661 2662
	if (priv->hw->pcs)
		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2663

2664 2665 2666
	/* set TX and RX rings length */
	stmmac_set_rings_length(priv);

A
Alexandre TORGUE 已提交
2667
	/* Enable TSO */
2668 2669
	if (priv->tso) {
		for (chan = 0; chan < tx_cnt; chan++)
2670
			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2671
	}
A
Alexandre TORGUE 已提交
2672

2673 2674 2675
	/* Start the ball rolling... */
	stmmac_start_all_dma(priv);

2676 2677 2678
	return 0;
}

2679 2680 2681 2682 2683 2684 2685
static void stmmac_hw_teardown(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	clk_disable_unprepare(priv->plat->clk_ptp_ref);
}

2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
/**
 *  stmmac_open - open entry point of the driver
 *  @dev : pointer to the device structure.
 *  Description:
 *  This function is the open entry point of the driver.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_open(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2698
	u32 chan;
2699 2700
	int ret;

2701 2702 2703
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2704 2705
		ret = stmmac_init_phy(dev);
		if (ret) {
2706 2707 2708
			netdev_err(priv->dev,
				   "%s: Cannot attach to PHY (error: %d)\n",
				   __func__, ret);
2709
			return ret;
2710
		}
2711
	}
2712

2713 2714 2715 2716
	/* Extra statistics */
	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
	priv->xstats.threshold = tc;

2717
	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2718
	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2719

2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
	ret = alloc_dma_desc_resources(priv);
	if (ret < 0) {
		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
			   __func__);
		goto dma_desc_error;
	}

	ret = init_dma_desc_rings(dev, GFP_KERNEL);
	if (ret < 0) {
		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
			   __func__);
		goto init_error;
	}

2734
	ret = stmmac_hw_setup(dev, true);
2735
	if (ret < 0) {
2736
		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2737
		goto init_error;
2738 2739
	}

2740 2741
	stmmac_init_tx_coalesce(priv);

2742 2743
	if (dev->phydev)
		phy_start(dev->phydev);
2744

2745 2746
	/* Request the IRQ lines */
	ret = request_irq(dev->irq, stmmac_interrupt,
G
Giuseppe CAVALLARO 已提交
2747
			  IRQF_SHARED, dev->name, dev);
2748
	if (unlikely(ret < 0)) {
2749 2750 2751
		netdev_err(priv->dev,
			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
			   __func__, dev->irq, ret);
2752
		goto irq_error;
2753 2754
	}

2755 2756 2757 2758 2759
	/* Request the Wake IRQ in case of another line is used for WoL */
	if (priv->wol_irq != dev->irq) {
		ret = request_irq(priv->wol_irq, stmmac_interrupt,
				  IRQF_SHARED, dev->name, dev);
		if (unlikely(ret < 0)) {
2760 2761 2762
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
				   __func__, priv->wol_irq, ret);
2763
			goto wolirq_error;
2764 2765 2766
		}
	}

2767
	/* Request the IRQ lines */
2768
	if (priv->lpi_irq > 0) {
2769 2770 2771
		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
				  dev->name, dev);
		if (unlikely(ret < 0)) {
2772 2773 2774
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
				   __func__, priv->lpi_irq, ret);
2775
			goto lpiirq_error;
2776 2777 2778
		}
	}

2779 2780
	stmmac_enable_all_queues(priv);
	stmmac_start_all_queues(priv);
2781

2782
	return 0;
2783

2784
lpiirq_error:
2785 2786
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2787
wolirq_error:
2788
	free_irq(dev->irq, dev);
2789 2790 2791
irq_error:
	if (dev->phydev)
		phy_stop(dev->phydev);
2792

2793 2794 2795
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);

2796
	stmmac_hw_teardown(dev);
2797 2798
init_error:
	free_dma_desc_resources(priv);
2799
dma_desc_error:
2800 2801
	if (dev->phydev)
		phy_disconnect(dev->phydev);
2802

2803
	return ret;
2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814
}

/**
 *  stmmac_release - close entry point of the driver
 *  @dev : device pointer.
 *  Description:
 *  This is the stop entry point of the driver.
 */
static int stmmac_release(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2815
	u32 chan;
2816

2817 2818 2819
	if (priv->eee_enabled)
		del_timer_sync(&priv->eee_ctrl_timer);

2820
	/* Stop and disconnect the PHY */
2821 2822 2823
	if (dev->phydev) {
		phy_stop(dev->phydev);
		phy_disconnect(dev->phydev);
2824 2825
	}

2826
	stmmac_stop_all_queues(priv);
2827

2828
	stmmac_disable_all_queues(priv);
2829

2830 2831
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);
2832

2833 2834
	/* Free the IRQ lines */
	free_irq(dev->irq, dev);
2835 2836
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2837
	if (priv->lpi_irq > 0)
2838
		free_irq(priv->lpi_irq, dev);
2839 2840

	/* Stop TX/RX DMA and clear the descriptors */
2841
	stmmac_stop_all_dma(priv);
2842 2843 2844 2845

	/* Release and free the Rx/Tx resources */
	free_dma_desc_resources(priv);

2846
	/* Disable the MAC Rx/Tx */
2847
	stmmac_mac_set(priv, priv->ioaddr, false);
2848 2849 2850

	netif_carrier_off(dev);

2851 2852
	stmmac_release_ptp(priv);

2853 2854 2855
	return 0;
}

A
Alexandre TORGUE 已提交
2856 2857 2858 2859 2860 2861
/**
 *  stmmac_tso_allocator - close entry point of the driver
 *  @priv: driver private structure
 *  @des: buffer start address
 *  @total_len: total length to fill in descriptors
 *  @last_segmant: condition for the last descriptor
2862
 *  @queue: TX queue index
A
Alexandre TORGUE 已提交
2863 2864 2865 2866 2867
 *  Description:
 *  This function fills descriptor and request new descriptors according to
 *  buffer length to fill
 */
static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2868
				 int total_len, bool last_segment, u32 queue)
A
Alexandre TORGUE 已提交
2869
{
2870
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
A
Alexandre TORGUE 已提交
2871
	struct dma_desc *desc;
2872
	u32 buff_size;
2873
	int tmp_len;
A
Alexandre TORGUE 已提交
2874 2875 2876 2877

	tmp_len = total_len;

	while (tmp_len > 0) {
2878
		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2879
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2880
		desc = tx_q->dma_tx + tx_q->cur_tx;
A
Alexandre TORGUE 已提交
2881

2882
		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
A
Alexandre TORGUE 已提交
2883 2884 2885
		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
			    TSO_MAX_BUFF_SIZE : tmp_len;

2886 2887 2888 2889
		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
				0, 1,
				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
				0, 0);
A
Alexandre TORGUE 已提交
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923

		tmp_len -= TSO_MAX_BUFF_SIZE;
	}
}

/**
 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
 *  @skb : the socket buffer
 *  @dev : device pointer
 *  Description: this is the transmit function that is called on TSO frames
 *  (support available on GMAC4 and newer chips).
 *  Diagram below show the ring programming in case of TSO frames:
 *
 *  First Descriptor
 *   --------
 *   | DES0 |---> buffer1 = L2/L3/L4 header
 *   | DES1 |---> TCP Payload (can continue on next descr...)
 *   | DES2 |---> buffer 1 and 2 len
 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
 *   --------
 *	|
 *     ...
 *	|
 *   --------
 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
 *   | DES1 | --|
 *   | DES2 | --> buffer 1 and 2 len
 *   | DES3 |
 *   --------
 *
 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
 */
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
2924
	struct dma_desc *desc, *first, *mss_desc = NULL;
A
Alexandre TORGUE 已提交
2925 2926
	struct stmmac_priv *priv = netdev_priv(dev);
	int nfrags = skb_shinfo(skb)->nr_frags;
2927
	u32 queue = skb_get_queue_mapping(skb);
A
Alexandre TORGUE 已提交
2928
	unsigned int first_entry, des;
2929 2930 2931
	struct stmmac_tx_queue *tx_q;
	int tmp_pay_len = 0;
	u32 pay_len, mss;
A
Alexandre TORGUE 已提交
2932 2933 2934
	u8 proto_hdr_len;
	int i;

2935 2936
	tx_q = &priv->tx_queue[queue];

A
Alexandre TORGUE 已提交
2937 2938 2939 2940
	/* Compute header lengths */
	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	/* Desc availability based on threshold should be enough safe */
2941
	if (unlikely(stmmac_tx_avail(priv, queue) <
A
Alexandre TORGUE 已提交
2942
		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2943 2944 2945
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
								queue));
A
Alexandre TORGUE 已提交
2946
			/* This is a hard error, log it. */
2947 2948 2949
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
A
Alexandre TORGUE 已提交
2950 2951 2952 2953 2954 2955 2956 2957 2958
		}
		return NETDEV_TX_BUSY;
	}

	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */

	mss = skb_shinfo(skb)->gso_size;

	/* set new MSS value if needed */
2959
	if (mss != tx_q->mss) {
2960
		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2961
		stmmac_set_mss(priv, mss_desc, mss);
2962
		tx_q->mss = mss;
2963
		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2964
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
A
Alexandre TORGUE 已提交
2965 2966 2967 2968 2969 2970 2971 2972 2973
	}

	if (netif_msg_tx_queued(priv)) {
		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
			skb->data_len);
	}

2974
	first_entry = tx_q->cur_tx;
2975
	WARN_ON(tx_q->tx_skbuff[first_entry]);
A
Alexandre TORGUE 已提交
2976

2977
	desc = tx_q->dma_tx + first_entry;
A
Alexandre TORGUE 已提交
2978 2979 2980 2981 2982 2983 2984 2985
	first = desc;

	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, des))
		goto dma_map_err;

2986 2987
	tx_q->tx_skbuff_dma[first_entry].buf = des;
	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
A
Alexandre TORGUE 已提交
2988

2989
	first->des0 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2990 2991 2992

	/* Fill start of payload in buff2 of first descriptor */
	if (pay_len)
2993
		first->des1 = cpu_to_le32(des + proto_hdr_len);
A
Alexandre TORGUE 已提交
2994 2995 2996 2997

	/* If needed take extra descriptors to fill the remaining payload */
	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;

2998
	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
A
Alexandre TORGUE 已提交
2999 3000 3001 3002 3003 3004 3005 3006

	/* Prepare fragments */
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(priv->device, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
3007 3008
		if (dma_mapping_error(priv->device, des))
			goto dma_map_err;
A
Alexandre TORGUE 已提交
3009 3010

		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3011
				     (i == nfrags - 1), queue);
A
Alexandre TORGUE 已提交
3012

3013 3014 3015
		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
A
Alexandre TORGUE 已提交
3016 3017
	}

3018
	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
A
Alexandre TORGUE 已提交
3019

3020 3021 3022 3023 3024 3025 3026 3027
	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[tx_q->cur_tx] = skb;

	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
3028
	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
A
Alexandre TORGUE 已提交
3029

3030
	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3031 3032
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
3033
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
A
Alexandre TORGUE 已提交
3034 3035 3036 3037 3038 3039 3040
	}

	dev->stats.tx_bytes += skb->len;
	priv->xstats.tx_tso_frames++;
	priv->xstats.tx_tso_nfrags += nfrags;

	/* Manage tx mitigation */
3041 3042
	tx_q->tx_count_frames += nfrags + 1;
	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3043
		stmmac_set_tx_ic(priv, desc);
A
Alexandre TORGUE 已提交
3044
		priv->xstats.tx_set_ic_bit++;
3045 3046 3047
		tx_q->tx_count_frames = 0;
	} else {
		stmmac_tx_timer_arm(priv, queue);
A
Alexandre TORGUE 已提交
3048 3049
	}

3050
	skb_tx_timestamp(skb);
A
Alexandre TORGUE 已提交
3051 3052 3053 3054 3055

	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		     priv->hwts_tx_en)) {
		/* declare that device is doing timestamping */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3056
		stmmac_enable_tx_timestamp(priv, first);
A
Alexandre TORGUE 已提交
3057 3058 3059
	}

	/* Complete the first descriptor before granting the DMA */
3060
	stmmac_prepare_tso_tx_desc(priv, first, 1,
A
Alexandre TORGUE 已提交
3061 3062
			proto_hdr_len,
			pay_len,
3063
			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
A
Alexandre TORGUE 已提交
3064 3065 3066
			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));

	/* If context desc is used to change MSS */
3067 3068 3069 3070 3071 3072 3073
	if (mss_desc) {
		/* Make sure that first descriptor has been completely
		 * written, including its own bit. This is because MSS is
		 * actually before first descriptor, so we need to make
		 * sure that MSS's own bit is the last thing written.
		 */
		dma_wmb();
3074
		stmmac_set_tx_owner(priv, mss_desc);
3075
	}
A
Alexandre TORGUE 已提交
3076 3077 3078 3079 3080

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
3081
	wmb();
A
Alexandre TORGUE 已提交
3082 3083 3084

	if (netif_msg_pktdata(priv)) {
		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3085 3086
			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
			tx_q->cur_tx, first, nfrags);
A
Alexandre TORGUE 已提交
3087

3088
		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
A
Alexandre TORGUE 已提交
3089 3090 3091 3092 3093

		pr_info(">>> frame to be transmitted: ");
		print_pkt(skb->data, skb_headlen(skb));
	}

3094
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
A
Alexandre TORGUE 已提交
3095

3096
	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3097
	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
A
Alexandre TORGUE 已提交
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107

	return NETDEV_TX_OK;

dma_map_err:
	dev_err(priv->device, "Tx dma map failed\n");
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}

3108
/**
3109
 *  stmmac_xmit - Tx entry point of the driver
3110 3111
 *  @skb : the socket buffer
 *  @dev : device pointer
3112 3113 3114
 *  Description : this is the tx entry point of the driver.
 *  It programs the chain or the ring and supports oversized frames
 *  and SG feature.
3115 3116 3117 3118
 */
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
3119
	unsigned int nopaged_len = skb_headlen(skb);
3120
	int i, csum_insertion = 0, is_jumbo = 0;
3121
	u32 queue = skb_get_queue_mapping(skb);
3122
	int nfrags = skb_shinfo(skb)->nr_frags;
3123 3124
	int entry;
	unsigned int first_entry;
3125
	struct dma_desc *desc, *first;
3126
	struct stmmac_tx_queue *tx_q;
3127
	unsigned int enh_desc;
A
Alexandre TORGUE 已提交
3128 3129
	unsigned int des;

3130 3131
	tx_q = &priv->tx_queue[queue];

3132 3133 3134
	if (priv->tx_path_in_lpi_mode)
		stmmac_disable_eee_mode(priv);

A
Alexandre TORGUE 已提交
3135 3136
	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb) && priv->tso) {
3137 3138 3139 3140 3141 3142 3143 3144 3145
		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
			/*
			 * There is no way to determine the number of TSO
			 * capable Queues. Let's use always the Queue 0
			 * because if TSO is supported then at least this
			 * one will be capable.
			 */
			skb_set_queue_mapping(skb, 0);

A
Alexandre TORGUE 已提交
3146
			return stmmac_tso_xmit(skb, dev);
3147
		}
A
Alexandre TORGUE 已提交
3148
	}
3149

3150
	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3151 3152 3153
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
								queue));
3154
			/* This is a hard error, log it. */
3155 3156 3157
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
3158 3159 3160 3161
		}
		return NETDEV_TX_BUSY;
	}

3162
	entry = tx_q->cur_tx;
3163
	first_entry = entry;
3164
	WARN_ON(tx_q->tx_skbuff[first_entry]);
3165

3166
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3167

3168
	if (likely(priv->extend_desc))
3169
		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3170
	else
3171
		desc = tx_q->dma_tx + entry;
3172

3173 3174
	first = desc;

3175
	enh_desc = priv->plat->enh_desc;
3176
	/* To program the descriptors according to the size of the frame */
G
Giuseppe CAVALLARO 已提交
3177
	if (enh_desc)
3178
		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
G
Giuseppe CAVALLARO 已提交
3179

3180
	if (unlikely(is_jumbo)) {
3181
		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3182
		if (unlikely(entry < 0) && (entry != -EINVAL))
G
Giuseppe CAVALLARO 已提交
3183
			goto dma_map_err;
G
Giuseppe CAVALLARO 已提交
3184
	}
3185 3186

	for (i = 0; i < nfrags; i++) {
E
Eric Dumazet 已提交
3187 3188
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);
3189
		bool last_segment = (i == (nfrags - 1));
3190

3191
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3192
		WARN_ON(tx_q->tx_skbuff[entry]);
3193

3194
		if (likely(priv->extend_desc))
3195
			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3196
		else
3197
			desc = tx_q->dma_tx + entry;
3198

A
Alexandre TORGUE 已提交
3199 3200 3201
		des = skb_frag_dma_map(priv->device, frag, 0, len,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
G
Giuseppe CAVALLARO 已提交
3202 3203
			goto dma_map_err; /* should reuse desc w/o issues */

3204
		tx_q->tx_skbuff_dma[entry].buf = des;
3205 3206

		stmmac_set_desc_addr(priv, desc, des);
A
Alexandre TORGUE 已提交
3207

3208 3209 3210
		tx_q->tx_skbuff_dma[entry].map_as_page = true;
		tx_q->tx_skbuff_dma[entry].len = len;
		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3211 3212

		/* Prepare the descriptor and set the own bit too */
3213 3214
		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
				priv->mode, 1, last_segment, skb->len);
3215 3216
	}

3217 3218
	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[entry] = skb;
3219

3220 3221 3222 3223 3224 3225
	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3226
	tx_q->cur_tx = entry;
3227 3228

	if (netif_msg_pktdata(priv)) {
3229 3230
		void *tx_head;

3231 3232
		netdev_dbg(priv->dev,
			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3233
			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3234
			   entry, first, nfrags);
3235

3236
		if (priv->extend_desc)
3237
			tx_head = (void *)tx_q->dma_etx;
3238
		else
3239
			tx_head = (void *)tx_q->dma_tx;
3240

3241
		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3242

3243
		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3244 3245
		print_pkt(skb->data, skb->len);
	}
3246

3247
	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3248 3249
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
3250
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3251 3252 3253 3254
	}

	dev->stats.tx_bytes += skb->len;

3255 3256 3257 3258 3259
	/* According to the coalesce parameter the IC bit for the latest
	 * segment is reset and the timer re-started to clean the tx status.
	 * This approach takes care about the fragments: desc is the first
	 * element in case of no SG.
	 */
3260 3261
	tx_q->tx_count_frames += nfrags + 1;
	if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3262
		stmmac_set_tx_ic(priv, desc);
3263
		priv->xstats.tx_set_ic_bit++;
3264 3265 3266
		tx_q->tx_count_frames = 0;
	} else {
		stmmac_tx_timer_arm(priv, queue);
3267 3268
	}

3269
	skb_tx_timestamp(skb);
3270

3271 3272 3273 3274 3275 3276 3277
	/* Ready to fill the first descriptor and set the OWN bit w/o any
	 * problems because all the descriptors are actually ready to be
	 * passed to the DMA engine.
	 */
	if (likely(!is_jumbo)) {
		bool last_segment = (nfrags == 0);

A
Alexandre TORGUE 已提交
3278 3279 3280
		des = dma_map_single(priv->device, skb->data,
				     nopaged_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
3281 3282
			goto dma_map_err;

3283
		tx_q->tx_skbuff_dma[first_entry].buf = des;
3284 3285

		stmmac_set_desc_addr(priv, first, des);
A
Alexandre TORGUE 已提交
3286

3287 3288
		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3289 3290 3291 3292 3293

		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
			     priv->hwts_tx_en)) {
			/* declare that device is doing timestamping */
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3294
			stmmac_enable_tx_timestamp(priv, first);
3295 3296 3297
		}

		/* Prepare the first descriptor setting the OWN bit too */
3298 3299 3300
		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
				csum_insertion, priv->mode, 1, last_segment,
				skb->len);
3301 3302
	} else {
		stmmac_set_tx_owner(priv, first);
3303 3304
	}

3305 3306 3307 3308 3309 3310
	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
	wmb();

3311
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
A
Alexandre TORGUE 已提交
3312

3313
	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3314

3315
	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3316
	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3317

G
Giuseppe CAVALLARO 已提交
3318
	return NETDEV_TX_OK;
3319

G
Giuseppe CAVALLARO 已提交
3320
dma_map_err:
3321
	netdev_err(priv->dev, "Tx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
3322 3323
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
3324 3325 3326
	return NETDEV_TX_OK;
}

3327 3328
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
3329 3330
	struct vlan_ethhdr *veth;
	__be16 vlan_proto;
3331 3332
	u16 vlanid;

3333 3334 3335 3336 3337 3338 3339
	veth = (struct vlan_ethhdr *)skb->data;
	vlan_proto = veth->h_vlan_proto;

	if ((vlan_proto == htons(ETH_P_8021Q) &&
	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
	    (vlan_proto == htons(ETH_P_8021AD) &&
	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3340
		/* pop the vlan tag */
3341 3342
		vlanid = ntohs(veth->h_vlan_TCI);
		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3343
		skb_pull(skb, VLAN_HLEN);
3344
		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3345 3346 3347 3348
	}
}


3349
static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3350
{
3351
	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3352 3353 3354 3355 3356
		return 0;

	return 1;
}

3357
/**
3358
 * stmmac_rx_refill - refill used skb preallocated buffers
3359
 * @priv: driver private structure
3360
 * @queue: RX queue index
3361 3362 3363
 * Description : this is to reallocate the skb for the reception process
 * that is based on zero-copy.
 */
3364
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3365
{
3366 3367 3368 3369
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
	int dirty = stmmac_rx_dirty(priv, queue);
	unsigned int entry = rx_q->dirty_rx;

3370 3371
	int bfsize = priv->dma_buf_sz;

3372
	while (dirty-- > 0) {
3373 3374 3375
		struct dma_desc *p;

		if (priv->extend_desc)
3376
			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3377
		else
3378
			p = rx_q->dma_rx + entry;
3379

3380
		if (likely(!rx_q->rx_skbuff[entry])) {
3381 3382
			struct sk_buff *skb;

E
Eric Dumazet 已提交
3383
			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3384 3385
			if (unlikely(!skb)) {
				/* so for a while no zero-copy! */
3386
				rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3387 3388 3389 3390
				if (unlikely(net_ratelimit()))
					dev_err(priv->device,
						"fail to alloc skb entry %d\n",
						entry);
3391
				break;
3392
			}
3393

3394 3395
			rx_q->rx_skbuff[entry] = skb;
			rx_q->rx_skbuff_dma[entry] =
3396 3397
			    dma_map_single(priv->device, skb->data, bfsize,
					   DMA_FROM_DEVICE);
G
Giuseppe CAVALLARO 已提交
3398
			if (dma_mapping_error(priv->device,
3399
					      rx_q->rx_skbuff_dma[entry])) {
3400
				netdev_err(priv->dev, "Rx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
3401 3402 3403
				dev_kfree_skb(skb);
				break;
			}
3404

3405
			stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3406
			stmmac_refill_desc3(priv, rx_q, p);
3407

3408 3409
			if (rx_q->rx_zeroc_thresh > 0)
				rx_q->rx_zeroc_thresh--;
3410

3411 3412
			netif_dbg(priv, rx_status, priv->dev,
				  "refill entry #%d\n", entry);
3413
		}
P
Pavel Machek 已提交
3414
		dma_wmb();
A
Alexandre TORGUE 已提交
3415

3416
		stmmac_set_rx_owner(priv, p, priv->use_riwt);
A
Alexandre TORGUE 已提交
3417

P
Pavel Machek 已提交
3418
		dma_wmb();
3419 3420

		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3421
	}
3422
	rx_q->dirty_rx = entry;
3423
	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3424 3425
}

3426
/**
3427
 * stmmac_rx - manage the receive process
3428
 * @priv: driver private structure
3429 3430
 * @limit: napi bugget
 * @queue: RX queue index.
3431 3432 3433
 * Description :  this the function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
3434
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3435
{
3436
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3437
	struct stmmac_channel *ch = &priv->channel[queue];
3438
	unsigned int next_entry = rx_q->cur_rx;
3439
	int coe = priv->hw->rx_csum;
3440
	unsigned int count = 0;
3441 3442 3443
	bool xmac;

	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3444

3445
	if (netif_msg_rx_status(priv)) {
3446 3447
		void *rx_head;

3448
		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3449
		if (priv->extend_desc)
3450
			rx_head = (void *)rx_q->dma_erx;
3451
		else
3452
			rx_head = (void *)rx_q->dma_rx;
3453

3454
		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3455
	}
3456
	while (count < limit) {
3457
		int entry, status;
3458
		struct dma_desc *p;
3459
		struct dma_desc *np;
3460

3461 3462
		entry = next_entry;

3463
		if (priv->extend_desc)
3464
			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3465
		else
3466
			p = rx_q->dma_rx + entry;
3467

3468
		/* read the status of the incoming frame */
3469 3470
		status = stmmac_rx_status(priv, &priv->dev->stats,
				&priv->xstats, p);
3471 3472
		/* check if managed by the DMA otherwise go ahead */
		if (unlikely(status & dma_own))
3473 3474 3475 3476
			break;

		count++;

3477 3478
		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
		next_entry = rx_q->cur_rx;
3479

3480
		if (priv->extend_desc)
3481
			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3482
		else
3483
			np = rx_q->dma_rx + next_entry;
3484 3485

		prefetch(np);
3486

3487 3488 3489
		if (priv->extend_desc)
			stmmac_rx_extended_status(priv, &priv->dev->stats,
					&priv->xstats, rx_q->dma_erx + entry);
3490
		if (unlikely(status == discard_frame)) {
3491
			priv->dev->stats.rx_errors++;
3492
			if (priv->hwts_rx_en && !priv->extend_desc) {
3493
				/* DESC2 & DESC3 will be overwritten by device
3494 3495 3496 3497
				 * with timestamp value, hence reinitialize
				 * them in stmmac_rx_refill() function so that
				 * device can reuse it.
				 */
3498
				dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3499
				rx_q->rx_skbuff[entry] = NULL;
3500
				dma_unmap_single(priv->device,
3501
						 rx_q->rx_skbuff_dma[entry],
G
Giuseppe CAVALLARO 已提交
3502 3503
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
3504 3505
			}
		} else {
3506
			struct sk_buff *skb;
3507
			int frame_len;
A
Alexandre TORGUE 已提交
3508 3509
			unsigned int des;

3510
			stmmac_get_desc_addr(priv, p, &des);
3511
			frame_len = stmmac_get_rx_frame_len(priv, p, coe);
G
Giuseppe CAVALLARO 已提交
3512

3513
			/*  If frame length is greater than skb buffer size
A
Alexandre TORGUE 已提交
3514 3515 3516
			 *  (preallocated during init) then the packet is
			 *  ignored
			 */
3517
			if (frame_len > priv->dma_buf_sz) {
3518 3519 3520 3521
				if (net_ratelimit())
					netdev_err(priv->dev,
						   "len %d larger than size (%d)\n",
						   frame_len, priv->dma_buf_sz);
3522
				priv->dev->stats.rx_length_errors++;
3523
				continue;
3524 3525
			}

3526
			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
G
Giuseppe CAVALLARO 已提交
3527
			 * Type frames (LLC/LLC-SNAP)
3528 3529 3530 3531
			 *
			 * llc_snap is never checked in GMAC >= 4, so this ACS
			 * feature is always disabled and packets need to be
			 * stripped manually.
G
Giuseppe CAVALLARO 已提交
3532
			 */
3533 3534
			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
			    unlikely(status != llc_snap))
3535
				frame_len -= ETH_FCS_LEN;
3536

3537
			if (netif_msg_rx_status(priv)) {
3538 3539
				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
					   p, entry, des);
3540 3541
				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
					   frame_len, status);
3542
			}
3543

A
Alexandre TORGUE 已提交
3544 3545 3546 3547
			/* The zero-copy is always used for all the sizes
			 * in case of GMAC4 because it needs
			 * to refill the used descriptors, always.
			 */
3548
			if (unlikely(!xmac &&
A
Alexandre TORGUE 已提交
3549
				     ((frame_len < priv->rx_copybreak) ||
3550
				     stmmac_rx_threshold_count(rx_q)))) {
3551 3552 3553 3554 3555 3556 3557
				skb = netdev_alloc_skb_ip_align(priv->dev,
								frame_len);
				if (unlikely(!skb)) {
					if (net_ratelimit())
						dev_warn(priv->device,
							 "packet dropped\n");
					priv->dev->stats.rx_dropped++;
3558
					continue;
3559 3560 3561
				}

				dma_sync_single_for_cpu(priv->device,
3562
							rx_q->rx_skbuff_dma
3563 3564 3565
							[entry], frame_len,
							DMA_FROM_DEVICE);
				skb_copy_to_linear_data(skb,
3566
							rx_q->
3567 3568 3569 3570 3571
							rx_skbuff[entry]->data,
							frame_len);

				skb_put(skb, frame_len);
				dma_sync_single_for_device(priv->device,
3572
							   rx_q->rx_skbuff_dma
3573 3574 3575
							   [entry], frame_len,
							   DMA_FROM_DEVICE);
			} else {
3576
				skb = rx_q->rx_skbuff[entry];
3577
				if (unlikely(!skb)) {
3578 3579 3580 3581
					if (net_ratelimit())
						netdev_err(priv->dev,
							   "%s: Inconsistent Rx chain\n",
							   priv->dev->name);
3582
					priv->dev->stats.rx_dropped++;
3583
					continue;
3584 3585
				}
				prefetch(skb->data - NET_IP_ALIGN);
3586 3587
				rx_q->rx_skbuff[entry] = NULL;
				rx_q->rx_zeroc_thresh++;
3588 3589 3590

				skb_put(skb, frame_len);
				dma_unmap_single(priv->device,
3591
						 rx_q->rx_skbuff_dma[entry],
3592 3593
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
3594 3595 3596
			}

			if (netif_msg_pktdata(priv)) {
3597 3598
				netdev_dbg(priv->dev, "frame received (%dbytes)",
					   frame_len);
3599 3600
				print_pkt(skb->data, frame_len);
			}
3601

3602 3603
			stmmac_get_rx_hwtstamp(priv, p, np, skb);

3604 3605
			stmmac_rx_vlan(priv->dev, skb);

3606 3607
			skb->protocol = eth_type_trans(skb, priv->dev);

G
Giuseppe CAVALLARO 已提交
3608
			if (unlikely(!coe))
3609
				skb_checksum_none_assert(skb);
3610
			else
3611
				skb->ip_summed = CHECKSUM_UNNECESSARY;
3612

3613
			napi_gro_receive(&ch->rx_napi, skb);
3614 3615 3616 3617 3618 3619

			priv->dev->stats.rx_packets++;
			priv->dev->stats.rx_bytes += frame_len;
		}
	}

3620
	stmmac_rx_refill(priv, queue);
3621 3622 3623 3624 3625 3626

	priv->xstats.rx_pkt_n += count;

	return count;
}

3627
static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3628
{
3629
	struct stmmac_channel *ch =
3630
		container_of(napi, struct stmmac_channel, rx_napi);
3631 3632
	struct stmmac_priv *priv = ch->priv_data;
	u32 chan = ch->index;
3633
	int work_done;
3634

3635
	priv->xstats.napi_poll++;
3636

3637 3638 3639 3640 3641
	work_done = stmmac_rx(priv, budget, chan);
	if (work_done < budget && napi_complete_done(napi, work_done))
		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
	return work_done;
}
3642

3643 3644 3645 3646 3647 3648 3649 3650
static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
{
	struct stmmac_channel *ch =
		container_of(napi, struct stmmac_channel, tx_napi);
	struct stmmac_priv *priv = ch->priv_data;
	struct stmmac_tx_queue *tx_q;
	u32 chan = ch->index;
	int work_done;
3651

3652 3653 3654 3655
	priv->xstats.napi_poll++;

	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
	work_done = min(work_done, budget);
3656

3657
	if (work_done < budget && napi_complete_done(napi, work_done))
3658
		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3659 3660 3661 3662 3663 3664 3665

	/* Force transmission restart */
	tx_q = &priv->tx_queue[chan];
	if (tx_q->cur_tx != tx_q->dirty_tx) {
		stmmac_enable_dma_transmission(priv, priv->ioaddr);
		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
				       chan);
3666
	}
3667

3668 3669 3670 3671 3672 3673 3674
	return work_done;
}

/**
 *  stmmac_tx_timeout
 *  @dev : Pointer to net device structure
 *  Description: this function is called when a packet transmission fails to
3675
 *   complete within a reasonable time. The driver will mark the error in the
3676 3677 3678 3679 3680 3681 3682
 *   netdev structure and arrange for the device to be reset to a sane state
 *   in order to transmit a new packet.
 */
static void stmmac_tx_timeout(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

3683
	stmmac_global_err(priv);
3684 3685 3686
}

/**
3687
 *  stmmac_set_rx_mode - entry point for multicast addressing
3688 3689 3690 3691 3692 3693 3694
 *  @dev : pointer to the device structure
 *  Description:
 *  This function is a driver entry point which gets called by the kernel
 *  whenever multicast addresses must be enabled/disabled.
 *  Return value:
 *  void.
 */
3695
static void stmmac_set_rx_mode(struct net_device *dev)
3696 3697 3698
{
	struct stmmac_priv *priv = netdev_priv(dev);

3699
	stmmac_set_filter(priv, priv->hw, dev);
3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714
}

/**
 *  stmmac_change_mtu - entry point to change MTU size for the device.
 *  @dev : device pointer.
 *  @new_mtu : the new MTU size for the device.
 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
3715 3716
	struct stmmac_priv *priv = netdev_priv(dev);

3717
	if (netif_running(dev)) {
3718
		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3719 3720 3721
		return -EBUSY;
	}

3722
	dev->mtu = new_mtu;
A
Alexandre TORGUE 已提交
3723

3724 3725 3726 3727 3728
	netdev_update_features(dev);

	return 0;
}

3729
static netdev_features_t stmmac_fix_features(struct net_device *dev,
G
Giuseppe CAVALLARO 已提交
3730
					     netdev_features_t features)
3731 3732 3733
{
	struct stmmac_priv *priv = netdev_priv(dev);

3734
	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3735
		features &= ~NETIF_F_RXCSUM;
3736

3737
	if (!priv->plat->tx_coe)
3738
		features &= ~NETIF_F_CSUM_MASK;
3739

3740 3741 3742
	/* Some GMAC devices have a bugged Jumbo frame support that
	 * needs to have the Tx COE disabled for oversized frames
	 * (due to limited buffer sizes). In this case we disable
3743
	 * the TX csum insertion in the TDES and not use SF.
G
Giuseppe CAVALLARO 已提交
3744
	 */
3745
	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3746
		features &= ~NETIF_F_CSUM_MASK;
3747

A
Alexandre TORGUE 已提交
3748 3749 3750 3751 3752 3753 3754 3755
	/* Disable tso if asked by ethtool */
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		if (features & NETIF_F_TSO)
			priv->tso = true;
		else
			priv->tso = false;
	}

3756
	return features;
3757 3758
}

3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
static int stmmac_set_features(struct net_device *netdev,
			       netdev_features_t features)
{
	struct stmmac_priv *priv = netdev_priv(netdev);

	/* Keep the COE Type in case of csum is supporting */
	if (features & NETIF_F_RXCSUM)
		priv->hw->rx_csum = priv->plat->rx_coe;
	else
		priv->hw->rx_csum = 0;
	/* No check needed because rx_coe has been set before and it will be
	 * fixed in case of issue.
	 */
3772
	stmmac_rx_ipc(priv, priv->hw);
3773 3774 3775 3776

	return 0;
}

3777 3778 3779 3780 3781
/**
 *  stmmac_interrupt - main ISR
 *  @irq: interrupt number.
 *  @dev_id: to pass the net device pointer.
 *  Description: this is the main driver interrupt service routine.
3782 3783 3784 3785 3786
 *  It can call:
 *  o DMA service routine (to manage incoming frame reception and transmission
 *    status)
 *  o Core interrupts to manage: remote wake-up, management counter, LPI
 *    interrupts.
3787
 */
3788 3789 3790 3791
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct stmmac_priv *priv = netdev_priv(dev);
3792 3793 3794 3795
	u32 rx_cnt = priv->plat->rx_queues_to_use;
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 queues_count;
	u32 queue;
3796
	bool xmac;
3797

3798
	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3799
	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3800

3801 3802 3803
	if (priv->irq_wake)
		pm_wakeup_event(priv->device, 0);

3804
	if (unlikely(!dev)) {
3805
		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3806 3807 3808
		return IRQ_NONE;
	}

3809 3810 3811
	/* Check if adapter is up */
	if (test_bit(STMMAC_DOWN, &priv->state))
		return IRQ_HANDLED;
3812 3813 3814
	/* Check if a fatal error happened */
	if (stmmac_safety_feat_interrupt(priv))
		return IRQ_HANDLED;
3815

3816
	/* To handle GMAC own interrupts */
3817
	if ((priv->plat->has_gmac) || xmac) {
3818
		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3819
		int mtl_status;
3820

3821 3822
		if (unlikely(status)) {
			/* For LPI we need to save the tx status */
3823
			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3824
				priv->tx_path_in_lpi_mode = true;
3825
			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3826
				priv->tx_path_in_lpi_mode = false;
3827 3828
		}

3829 3830
		for (queue = 0; queue < queues_count; queue++) {
			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3831

3832 3833 3834 3835
			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
								queue);
			if (mtl_status != -EINVAL)
				status |= mtl_status;
3836

3837 3838 3839 3840
			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
						       rx_q->rx_tail_addr,
						       queue);
3841
		}
3842 3843

		/* PCS link status */
3844
		if (priv->hw->pcs) {
3845 3846 3847 3848 3849
			if (priv->xstats.pcs_link)
				netif_carrier_on(dev);
			else
				netif_carrier_off(dev);
		}
3850
	}
3851

3852
	/* To handle DMA interrupts */
3853
	stmmac_dma_interrupt(priv);
3854 3855 3856 3857 3858 3859

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
G
Giuseppe CAVALLARO 已提交
3860 3861
 * to allow network I/O with interrupts disabled.
 */
3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876
static void stmmac_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	stmmac_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

/**
 *  stmmac_ioctl - Entry point for the Ioctl
 *  @dev: Device pointer.
 *  @rq: An IOCTL specefic structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  @cmd: IOCTL command
 *  Description:
3877
 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3878 3879 3880
 */
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
3881
	int ret = -EOPNOTSUPP;
3882 3883 3884 3885

	if (!netif_running(dev))
		return -EINVAL;

3886 3887 3888 3889
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
3890
		if (!dev->phydev)
3891
			return -EINVAL;
3892
		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3893 3894
		break;
	case SIOCSHWTSTAMP:
3895 3896 3897 3898
		ret = stmmac_hwtstamp_set(dev, rq);
		break;
	case SIOCGHWTSTAMP:
		ret = stmmac_hwtstamp_get(dev, rq);
3899 3900 3901 3902
		break;
	default:
		break;
	}
3903

3904 3905 3906
	return ret;
}

3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936
static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
				    void *cb_priv)
{
	struct stmmac_priv *priv = cb_priv;
	int ret = -EOPNOTSUPP;

	stmmac_disable_all_queues(priv);

	switch (type) {
	case TC_SETUP_CLSU32:
		if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
			ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
		break;
	default:
		break;
	}

	stmmac_enable_all_queues(priv);
	return ret;
}

static int stmmac_setup_tc_block(struct stmmac_priv *priv,
				 struct tc_block_offload *f)
{
	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
		return -EOPNOTSUPP;

	switch (f->command) {
	case TC_BLOCK_BIND:
		return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3937
				priv, priv, f->extack);
3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953
	case TC_BLOCK_UNBIND:
		tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
		return 0;
	default:
		return -EOPNOTSUPP;
	}
}

static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
			   void *type_data)
{
	struct stmmac_priv *priv = netdev_priv(ndev);

	switch (type) {
	case TC_SETUP_BLOCK:
		return stmmac_setup_tc_block(priv, type_data);
3954 3955
	case TC_SETUP_QDISC_CBS:
		return stmmac_tc_setup_cbs(priv, priv, type_data);
3956 3957 3958 3959 3960
	default:
		return -EOPNOTSUPP;
	}
}

3961 3962 3963 3964 3965 3966 3967 3968 3969
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	int ret = 0;

	ret = eth_mac_addr(ndev, addr);
	if (ret)
		return ret;

3970
	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3971 3972 3973 3974

	return ret;
}

3975
#ifdef CONFIG_DEBUG_FS
3976 3977
static struct dentry *stmmac_fs_dir;

3978
static void sysfs_display_ring(void *head, int size, int extend_desc,
G
Giuseppe CAVALLARO 已提交
3979
			       struct seq_file *seq)
3980 3981
{
	int i;
G
Giuseppe CAVALLARO 已提交
3982 3983
	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
	struct dma_desc *p = (struct dma_desc *)head;
3984

3985 3986 3987
	for (i = 0; i < size; i++) {
		if (extend_desc) {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
3988
				   i, (unsigned int)virt_to_phys(ep),
3989 3990 3991 3992
				   le32_to_cpu(ep->basic.des0),
				   le32_to_cpu(ep->basic.des1),
				   le32_to_cpu(ep->basic.des2),
				   le32_to_cpu(ep->basic.des3));
3993 3994 3995
			ep++;
		} else {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3996
				   i, (unsigned int)virt_to_phys(p),
3997 3998
				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3999 4000
			p++;
		}
4001 4002
		seq_printf(seq, "\n");
	}
4003
}
4004

4005
static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4006 4007 4008
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);
4009
	u32 rx_count = priv->plat->rx_queues_to_use;
4010
	u32 tx_count = priv->plat->tx_queues_to_use;
4011 4012
	u32 queue;

4013 4014 4015
	if ((dev->flags & IFF_UP) == 0)
		return 0;

4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		seq_printf(seq, "RX Queue %d:\n", queue);

		if (priv->extend_desc) {
			seq_printf(seq, "Extended descriptor ring:\n");
			sysfs_display_ring((void *)rx_q->dma_erx,
					   DMA_RX_SIZE, 1, seq);
		} else {
			seq_printf(seq, "Descriptor ring:\n");
			sysfs_display_ring((void *)rx_q->dma_rx,
					   DMA_RX_SIZE, 0, seq);
		}
	}
4031

4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		seq_printf(seq, "TX Queue %d:\n", queue);

		if (priv->extend_desc) {
			seq_printf(seq, "Extended descriptor ring:\n");
			sysfs_display_ring((void *)tx_q->dma_etx,
					   DMA_TX_SIZE, 1, seq);
		} else {
			seq_printf(seq, "Descriptor ring:\n");
			sysfs_display_ring((void *)tx_q->dma_tx,
					   DMA_TX_SIZE, 0, seq);
		}
4046 4047 4048 4049
	}

	return 0;
}
4050
DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4051

4052
static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4053 4054 4055 4056
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);

4057
	if (!priv->hw_cap_support) {
4058 4059 4060 4061 4062 4063 4064 4065
		seq_printf(seq, "DMA HW features not supported\n");
		return 0;
	}

	seq_printf(seq, "==============================\n");
	seq_printf(seq, "\tDMA HW features\n");
	seq_printf(seq, "==============================\n");

4066
	seq_printf(seq, "\t10/100 Mbps: %s\n",
4067
		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4068
	seq_printf(seq, "\t1000 Mbps: %s\n",
4069
		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4070
	seq_printf(seq, "\tHalf duplex: %s\n",
4071 4072 4073 4074 4075
		   (priv->dma_cap.half_duplex) ? "Y" : "N");
	seq_printf(seq, "\tHash Filter: %s\n",
		   (priv->dma_cap.hash_filter) ? "Y" : "N");
	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
		   (priv->dma_cap.multi_addr) ? "Y" : "N");
4076
	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087
		   (priv->dma_cap.pcs) ? "Y" : "N");
	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
	seq_printf(seq, "\tPMT Remote wake up: %s\n",
		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
	seq_printf(seq, "\tPMT Magic Frame: %s\n",
		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
	seq_printf(seq, "\tRMON module: %s\n",
		   (priv->dma_cap.rmon) ? "Y" : "N");
	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4088
	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4089
		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4090
	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4091 4092 4093 4094
		   (priv->dma_cap.eee) ? "Y" : "N");
	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
		   (priv->dma_cap.tx_coe) ? "Y" : "N");
A
Alexandre TORGUE 已提交
4095 4096 4097 4098 4099 4100 4101 4102 4103
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
			   (priv->dma_cap.rx_coe) ? "Y" : "N");
	} else {
		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
	}
4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114
	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
		   priv->dma_cap.number_rx_channel);
	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
		   priv->dma_cap.number_tx_channel);
	seq_printf(seq, "\tEnhanced descriptors: %s\n",
		   (priv->dma_cap.enh_desc) ? "Y" : "N");

	return 0;
}
4115
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4116

4117 4118
static int stmmac_init_fs(struct net_device *dev)
{
4119 4120 4121 4122
	struct stmmac_priv *priv = netdev_priv(dev);

	/* Create per netdev entries */
	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4123

4124
	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4125
		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4126 4127 4128 4129 4130

		return -ENOMEM;
	}

	/* Entry to report DMA RX/TX rings */
4131
	priv->dbgfs_rings_status =
4132
		debugfs_create_file("descriptors_status", 0444,
4133 4134
				    priv->dbgfs_dir, dev,
				    &stmmac_rings_status_fops);
4135

4136
	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4137
		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4138
		debugfs_remove_recursive(priv->dbgfs_dir);
4139 4140 4141 4142

		return -ENOMEM;
	}

4143
	/* Entry to report the DMA HW features */
4144 4145 4146
	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
						  priv->dbgfs_dir,
						  dev, &stmmac_dma_cap_fops);
4147

4148
	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4149
		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4150
		debugfs_remove_recursive(priv->dbgfs_dir);
4151 4152 4153 4154

		return -ENOMEM;
	}

4155 4156 4157
	return 0;
}

4158
static void stmmac_exit_fs(struct net_device *dev)
4159
{
4160 4161 4162
	struct stmmac_priv *priv = netdev_priv(dev);

	debugfs_remove_recursive(priv->dbgfs_dir);
4163
}
4164
#endif /* CONFIG_DEBUG_FS */
4165

4166 4167 4168 4169 4170
static const struct net_device_ops stmmac_netdev_ops = {
	.ndo_open = stmmac_open,
	.ndo_start_xmit = stmmac_xmit,
	.ndo_stop = stmmac_release,
	.ndo_change_mtu = stmmac_change_mtu,
4171
	.ndo_fix_features = stmmac_fix_features,
4172
	.ndo_set_features = stmmac_set_features,
4173
	.ndo_set_rx_mode = stmmac_set_rx_mode,
4174 4175
	.ndo_tx_timeout = stmmac_tx_timeout,
	.ndo_do_ioctl = stmmac_ioctl,
4176
	.ndo_setup_tc = stmmac_setup_tc,
4177 4178 4179
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = stmmac_poll_controller,
#endif
4180
	.ndo_set_mac_address = stmmac_set_mac_address,
4181 4182
};

4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198
static void stmmac_reset_subtask(struct stmmac_priv *priv)
{
	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
		return;
	if (test_bit(STMMAC_DOWN, &priv->state))
		return;

	netdev_err(priv->dev, "Reset adapter.\n");

	rtnl_lock();
	netif_trans_update(priv->dev);
	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
		usleep_range(1000, 2000);

	set_bit(STMMAC_DOWN, &priv->state);
	dev_close(priv->dev);
4199
	dev_open(priv->dev, NULL);
4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213
	clear_bit(STMMAC_DOWN, &priv->state);
	clear_bit(STMMAC_RESETING, &priv->state);
	rtnl_unlock();
}

static void stmmac_service_task(struct work_struct *work)
{
	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
			service_task);

	stmmac_reset_subtask(priv);
	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
}

4214 4215
/**
 *  stmmac_hw_init - Init the MAC device
4216
 *  @priv: driver private structure
4217 4218 4219 4220
 *  Description: this function is to configure the MAC device according to
 *  some platform parameters or the HW capability register. It prepares the
 *  driver to use either ring or chain modes and to setup either enhanced or
 *  normal descriptors.
4221 4222 4223
 */
static int stmmac_hw_init(struct stmmac_priv *priv)
{
4224
	int ret;
4225

4226 4227 4228
	/* dwmac-sun8i only work in chain mode */
	if (priv->plat->has_sun8i)
		chain_mode = 1;
4229
	priv->chain_mode = chain_mode;
4230

4231 4232 4233 4234
	/* Initialize HW Interface */
	ret = stmmac_hwif_init(priv);
	if (ret)
		return ret;
4235

4236 4237 4238
	/* Get the HW capability (new GMAC newer than 3.50a) */
	priv->hw_cap_support = stmmac_get_hw_features(priv);
	if (priv->hw_cap_support) {
4239
		dev_info(priv->device, "DMA HW capability register supported\n");
4240 4241 4242 4243 4244 4245 4246 4247

		/* We can override some gmac/dma configuration fields: e.g.
		 * enh_desc, tx_coe (e.g. that are passed through the
		 * platform) with the values from the HW capability
		 * register (if supported).
		 */
		priv->plat->enh_desc = priv->dma_cap.enh_desc;
		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4248
		priv->hw->pmt = priv->plat->pmt;
4249

4250 4251 4252 4253 4254 4255
		/* TXCOE doesn't work in thresh DMA mode */
		if (priv->plat->force_thresh_dma_mode)
			priv->plat->tx_coe = 0;
		else
			priv->plat->tx_coe = priv->dma_cap.tx_coe;

A
Alexandre TORGUE 已提交
4256 4257
		/* In case of GMAC4 rx_coe is from HW cap register. */
		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4258 4259 4260 4261 4262 4263

		if (priv->dma_cap.rx_coe_type2)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
		else if (priv->dma_cap.rx_coe_type1)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;

4264 4265 4266
	} else {
		dev_info(priv->device, "No HW DMA feature register supported\n");
	}
4267

4268 4269
	if (priv->plat->rx_coe) {
		priv->hw->rx_csum = priv->plat->rx_coe;
4270
		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
A
Alexandre TORGUE 已提交
4271
		if (priv->synopsys_id < DWMAC_CORE_4_00)
4272
			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4273
	}
4274
	if (priv->plat->tx_coe)
4275
		dev_info(priv->device, "TX Checksum insertion supported\n");
4276 4277

	if (priv->plat->pmt) {
4278
		dev_info(priv->device, "Wake-Up On Lan supported\n");
4279 4280 4281
		device_set_wakeup_capable(priv->device, 1);
	}

A
Alexandre TORGUE 已提交
4282
	if (priv->dma_cap.tsoen)
4283
		dev_info(priv->device, "TSO supported\n");
A
Alexandre TORGUE 已提交
4284

4285 4286 4287 4288 4289 4290 4291
	/* Run HW quirks, if any */
	if (priv->hwif_quirks) {
		ret = priv->hwif_quirks(priv);
		if (ret)
			return ret;
	}

4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303
	/* Rx Watchdog is available in the COREs newer than the 3.40.
	 * In some case, for example on bugged HW this feature
	 * has to be disable and this can be done by passing the
	 * riwt_off field from the platform.
	 */
	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
		priv->use_riwt = 1;
		dev_info(priv->device,
			 "Enable RX Mitigation via HW Watchdog Timer\n");
	}

4304
	return 0;
4305 4306
}

4307
/**
4308 4309
 * stmmac_dvr_probe
 * @device: device pointer
4310
 * @plat_dat: platform data pointer
4311
 * @res: stmmac resource pointer
4312 4313
 * Description: this is the main probe function used to
 * call the alloc_etherdev, allocate the priv structure.
4314
 * Return:
4315
 * returns 0 on success, otherwise errno.
4316
 */
4317 4318 4319
int stmmac_dvr_probe(struct device *device,
		     struct plat_stmmacenet_data *plat_dat,
		     struct stmmac_resources *res)
4320
{
4321 4322
	struct net_device *ndev = NULL;
	struct stmmac_priv *priv;
4323
	u32 queue, maxq;
4324
	int ret = 0;
4325

4326 4327
	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4328
	if (!ndev)
4329
		return -ENOMEM;
4330 4331 4332 4333 4334 4335

	SET_NETDEV_DEV(ndev, device);

	priv = netdev_priv(ndev);
	priv->device = device;
	priv->dev = ndev;
4336

4337
	stmmac_set_ethtool_ops(ndev);
4338 4339
	priv->pause = pause;
	priv->plat = plat_dat;
4340 4341 4342 4343 4344 4345 4346
	priv->ioaddr = res->addr;
	priv->dev->base_addr = (unsigned long)res->addr;

	priv->dev->irq = res->irq;
	priv->wol_irq = res->wol_irq;
	priv->lpi_irq = res->lpi_irq;

4347
	if (!IS_ERR_OR_NULL(res->mac))
4348
		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4349

4350
	dev_set_drvdata(device, priv->dev);
4351

4352 4353
	/* Verify driver arguments */
	stmmac_verify_args();
4354

4355 4356 4357 4358
	/* Allocate workqueue */
	priv->wq = create_singlethread_workqueue("stmmac_wq");
	if (!priv->wq) {
		dev_err(priv->device, "failed to create workqueue\n");
4359
		return -ENOMEM;
4360 4361 4362 4363
	}

	INIT_WORK(&priv->service_task, stmmac_service_task);

4364
	/* Override with kernel parameters if supplied XXX CRS XXX
G
Giuseppe CAVALLARO 已提交
4365 4366
	 * this needs to have multiple instances
	 */
4367 4368 4369
	if ((phyaddr >= 0) && (phyaddr <= 31))
		priv->plat->phy_addr = phyaddr;

4370 4371
	if (priv->plat->stmmac_rst) {
		ret = reset_control_assert(priv->plat->stmmac_rst);
4372
		reset_control_deassert(priv->plat->stmmac_rst);
4373 4374 4375 4376 4377 4378
		/* Some reset controllers have only reset callback instead of
		 * assert + deassert callbacks pair.
		 */
		if (ret == -ENOTSUPP)
			reset_control_reset(priv->plat->stmmac_rst);
	}
4379

4380
	/* Init MAC and get the capabilities */
4381 4382
	ret = stmmac_hw_init(priv);
	if (ret)
4383
		goto error_hw_init;
4384

4385 4386
	stmmac_check_ether_addr(priv);

4387
	/* Configure real RX and TX queues */
4388 4389
	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4390

4391
	ndev->netdev_ops = &stmmac_netdev_ops;
4392

4393 4394
	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			    NETIF_F_RXCSUM;
A
Alexandre TORGUE 已提交
4395

4396 4397 4398 4399 4400
	ret = stmmac_tc_init(priv, priv);
	if (!ret) {
		ndev->hw_features |= NETIF_F_HW_TC;
	}

A
Alexandre TORGUE 已提交
4401
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
N
Niklas Cassel 已提交
4402
		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
A
Alexandre TORGUE 已提交
4403
		priv->tso = true;
4404
		dev_info(priv->device, "TSO feature enabled\n");
A
Alexandre TORGUE 已提交
4405
	}
4406 4407
	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4408 4409
#ifdef STMMAC_VLAN_TAG_USED
	/* Both mac100 and gmac support receive VLAN tag detection */
4410
	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4411 4412 4413
#endif
	priv->msg_enable = netif_msg_init(debug, default_msg_level);

4414 4415 4416 4417
	/* MTU range: 46 - hw-specific max */
	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
		ndev->max_mtu = JUMBO_LEN;
4418 4419
	else if (priv->plat->has_xgmac)
		ndev->max_mtu = XGMAC_JUMBO_LEN;
4420 4421
	else
		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4422 4423 4424 4425 4426
	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
	 */
	if ((priv->plat->maxmtu < ndev->max_mtu) &&
	    (priv->plat->maxmtu >= ndev->min_mtu))
4427
		ndev->max_mtu = priv->plat->maxmtu;
4428
	else if (priv->plat->maxmtu < ndev->min_mtu)
4429 4430 4431
		dev_warn(priv->device,
			 "%s: warning: maxmtu having invalid value (%d)\n",
			 __func__, priv->plat->maxmtu);
4432

4433 4434 4435
	if (flow_ctrl)
		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */

4436 4437
	/* Setup channels NAPI */
	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4438

4439 4440 4441 4442 4443 4444
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];

		ch->priv_data = priv;
		ch->index = queue;

4445 4446 4447 4448 4449 4450 4451 4452
		if (queue < priv->plat->rx_queues_to_use) {
			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
				       NAPI_POLL_WEIGHT);
		}
		if (queue < priv->plat->tx_queues_to_use) {
			netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
				       NAPI_POLL_WEIGHT);
		}
4453
	}
4454

4455
	mutex_init(&priv->lock);
4456

4457 4458 4459 4460 4461 4462
	/* If a specific clk_csr value is passed from the platform
	 * this means that the CSR Clock Range selection cannot be
	 * changed at run-time and it is fixed. Viceversa the driver'll try to
	 * set the MDC clock dynamically according to the csr actual
	 * clock input.
	 */
4463
	if (priv->plat->clk_csr >= 0)
4464
		priv->clk_csr = priv->plat->clk_csr;
4465 4466
	else
		stmmac_clk_csr_set(priv);
4467

4468 4469
	stmmac_check_pcs_mode(priv);

4470 4471 4472
	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4473 4474 4475
		/* MDIO bus Registration */
		ret = stmmac_mdio_register(ndev);
		if (ret < 0) {
4476 4477 4478
			dev_err(priv->device,
				"%s: MDIO bus (id: %d) registration failed",
				__func__, priv->plat->bus_id);
4479 4480
			goto error_mdio_register;
		}
4481 4482
	}

4483
	ret = register_netdev(ndev);
4484
	if (ret) {
4485 4486
		dev_err(priv->device, "%s: ERROR %i registering the device\n",
			__func__, ret);
4487 4488
		goto error_netdev_register;
	}
4489

4490 4491 4492 4493 4494 4495 4496
#ifdef CONFIG_DEBUG_FS
	ret = stmmac_init_fs(ndev);
	if (ret < 0)
		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
			    __func__);
#endif

4497
	return ret;
4498

4499
error_netdev_register:
4500 4501 4502 4503
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
		stmmac_mdio_unregister(ndev);
4504
error_mdio_register:
4505 4506
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
4507

4508 4509 4510 4511
		if (queue < priv->plat->rx_queues_to_use)
			netif_napi_del(&ch->rx_napi);
		if (queue < priv->plat->tx_queues_to_use)
			netif_napi_del(&ch->tx_napi);
4512
	}
4513
error_hw_init:
4514
	destroy_workqueue(priv->wq);
4515

4516
	return ret;
4517
}
4518
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4519 4520 4521

/**
 * stmmac_dvr_remove
4522
 * @dev: device pointer
4523
 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4524
 * changes the link status, releases the DMA descriptor rings.
4525
 */
4526
int stmmac_dvr_remove(struct device *dev)
4527
{
4528
	struct net_device *ndev = dev_get_drvdata(dev);
4529
	struct stmmac_priv *priv = netdev_priv(ndev);
4530

4531
	netdev_info(priv->dev, "%s: removing driver", __func__);
4532

4533 4534 4535
#ifdef CONFIG_DEBUG_FS
	stmmac_exit_fs(ndev);
#endif
4536
	stmmac_stop_all_dma(priv);
4537

4538
	stmmac_mac_set(priv, priv->ioaddr, false);
4539 4540
	netif_carrier_off(ndev);
	unregister_netdev(ndev);
4541 4542 4543 4544
	if (priv->plat->stmmac_rst)
		reset_control_assert(priv->plat->stmmac_rst);
	clk_disable_unprepare(priv->plat->pclk);
	clk_disable_unprepare(priv->plat->stmmac_clk);
4545 4546 4547
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
4548
		stmmac_mdio_unregister(ndev);
4549
	destroy_workqueue(priv->wq);
4550
	mutex_destroy(&priv->lock);
4551 4552 4553

	return 0;
}
4554
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4555

4556 4557
/**
 * stmmac_suspend - suspend callback
4558
 * @dev: device pointer
4559 4560 4561 4562
 * Description: this is the function to suspend the device and it is called
 * by the platform driver to stop the network queue, release the resources,
 * program the PMT register (for WoL), clean and release driver resources.
 */
4563
int stmmac_suspend(struct device *dev)
4564
{
4565
	struct net_device *ndev = dev_get_drvdata(dev);
4566
	struct stmmac_priv *priv = netdev_priv(ndev);
4567

4568
	if (!ndev || !netif_running(ndev))
4569 4570
		return 0;

4571 4572
	if (ndev->phydev)
		phy_stop(ndev->phydev);
4573

4574
	mutex_lock(&priv->lock);
4575

4576
	netif_device_detach(ndev);
4577
	stmmac_stop_all_queues(priv);
4578

4579
	stmmac_disable_all_queues(priv);
4580 4581

	/* Stop TX/RX DMA */
4582
	stmmac_stop_all_dma(priv);
4583

4584
	/* Enable Power down mode by programming the PMT regs */
4585
	if (device_may_wakeup(priv->device)) {
4586
		stmmac_pmt(priv, priv->hw, priv->wolopts);
4587 4588
		priv->irq_wake = 1;
	} else {
4589
		stmmac_mac_set(priv, priv->ioaddr, false);
4590
		pinctrl_pm_select_sleep_state(priv->device);
4591
		/* Disable clock in case of PWM is off */
4592 4593
		clk_disable(priv->plat->pclk);
		clk_disable(priv->plat->stmmac_clk);
4594
	}
4595
	mutex_unlock(&priv->lock);
4596

4597
	priv->oldlink = false;
4598 4599
	priv->speed = SPEED_UNKNOWN;
	priv->oldduplex = DUPLEX_UNKNOWN;
4600 4601
	return 0;
}
4602
EXPORT_SYMBOL_GPL(stmmac_suspend);
4603

4604 4605 4606 4607 4608 4609 4610
/**
 * stmmac_reset_queues_param - reset queue parameters
 * @dev: device pointer
 */
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{
	u32 rx_cnt = priv->plat->rx_queues_to_use;
4611
	u32 tx_cnt = priv->plat->tx_queues_to_use;
4612 4613 4614 4615 4616 4617 4618 4619 4620
	u32 queue;

	for (queue = 0; queue < rx_cnt; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		rx_q->cur_rx = 0;
		rx_q->dirty_rx = 0;
	}

4621 4622 4623 4624 4625
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		tx_q->cur_tx = 0;
		tx_q->dirty_tx = 0;
4626
		tx_q->mss = 0;
4627
	}
4628 4629
}

4630 4631
/**
 * stmmac_resume - resume callback
4632
 * @dev: device pointer
4633 4634 4635
 * Description: when resume this function is invoked to setup the DMA and CORE
 * in a usable state.
 */
4636
int stmmac_resume(struct device *dev)
4637
{
4638
	struct net_device *ndev = dev_get_drvdata(dev);
4639
	struct stmmac_priv *priv = netdev_priv(ndev);
4640

4641
	if (!netif_running(ndev))
4642 4643 4644 4645 4646 4647
		return 0;

	/* Power Down bit, into the PM register, is cleared
	 * automatically as soon as a magic packet or a Wake-up frame
	 * is received. Anyway, it's better to manually clear
	 * this bit because it can generate problems while resuming
G
Giuseppe CAVALLARO 已提交
4648 4649
	 * from another devices (e.g. serial console).
	 */
4650
	if (device_may_wakeup(priv->device)) {
4651
		mutex_lock(&priv->lock);
4652
		stmmac_pmt(priv, priv->hw, 0);
4653
		mutex_unlock(&priv->lock);
4654
		priv->irq_wake = 0;
4655
	} else {
4656
		pinctrl_pm_select_default_state(priv->device);
4657
		/* enable the clk previously disabled */
4658 4659
		clk_enable(priv->plat->stmmac_clk);
		clk_enable(priv->plat->pclk);
4660 4661 4662 4663
		/* reset the phy so that it's ready */
		if (priv->mii)
			stmmac_mdio_reset(priv->mii);
	}
4664

4665
	netif_device_attach(ndev);
4666

4667
	mutex_lock(&priv->lock);
4668

4669 4670
	stmmac_reset_queues_param(priv);

4671 4672
	stmmac_clear_descriptors(priv);

4673
	stmmac_hw_setup(ndev, false);
4674
	stmmac_init_tx_coalesce(priv);
4675
	stmmac_set_rx_mode(ndev);
4676

4677
	stmmac_enable_all_queues(priv);
4678

4679
	stmmac_start_all_queues(priv);
4680

4681
	mutex_unlock(&priv->lock);
4682

4683 4684
	if (ndev->phydev)
		phy_start(ndev->phydev);
4685

4686 4687
	return 0;
}
4688
EXPORT_SYMBOL_GPL(stmmac_resume);
4689

4690 4691 4692 4693 4694 4695 4696 4697
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
	char *opt;

	if (!str || !*str)
		return -EINVAL;
	while ((opt = strsep(&str, ",")) != NULL) {
4698
		if (!strncmp(opt, "debug:", 6)) {
4699
			if (kstrtoint(opt + 6, 0, &debug))
4700 4701
				goto err;
		} else if (!strncmp(opt, "phyaddr:", 8)) {
4702
			if (kstrtoint(opt + 8, 0, &phyaddr))
4703 4704
				goto err;
		} else if (!strncmp(opt, "buf_sz:", 7)) {
4705
			if (kstrtoint(opt + 7, 0, &buf_sz))
4706 4707
				goto err;
		} else if (!strncmp(opt, "tc:", 3)) {
4708
			if (kstrtoint(opt + 3, 0, &tc))
4709 4710
				goto err;
		} else if (!strncmp(opt, "watchdog:", 9)) {
4711
			if (kstrtoint(opt + 9, 0, &watchdog))
4712 4713
				goto err;
		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4714
			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4715 4716
				goto err;
		} else if (!strncmp(opt, "pause:", 6)) {
4717
			if (kstrtoint(opt + 6, 0, &pause))
4718
				goto err;
4719
		} else if (!strncmp(opt, "eee_timer:", 10)) {
4720 4721
			if (kstrtoint(opt + 10, 0, &eee_timer))
				goto err;
4722 4723 4724
		} else if (!strncmp(opt, "chain_mode:", 11)) {
			if (kstrtoint(opt + 11, 0, &chain_mode))
				goto err;
4725
		}
4726 4727
	}
	return 0;
4728 4729 4730 4731

err:
	pr_err("%s: ERROR broken module parameter conversion", __func__);
	return -EINVAL;
4732 4733 4734
}

__setup("stmmaceth=", stmmac_cmdline_opt);
G
Giuseppe CAVALLARO 已提交
4735
#endif /* MODULE */
4736

4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765
static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
	/* Create debugfs main directory if it doesn't exist yet */
	if (!stmmac_fs_dir) {
		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);

		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
			pr_err("ERROR %s, debugfs create directory failed\n",
			       STMMAC_RESOURCE_NAME);

			return -ENOMEM;
		}
	}
#endif

	return 0;
}

static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
	debugfs_remove_recursive(stmmac_fs_dir);
#endif
}

module_init(stmmac_init)
module_exit(stmmac_exit)

4766 4767 4768
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");