stmmac_main.c 145.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*******************************************************************************
  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
  ST Ethernet IPs are built around a Synopsys IP Core.

6
	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 8 9 10 11 12 13 14 15 16


  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>

  Documentation available at:
	http://www.stlinux.com
  Support available at:
	https://bugzilla.stlinux.com/
*******************************************************************************/

17
#include <linux/clk.h>
18 19 20 21 22 23 24 25 26
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
27
#include <linux/if.h>
28 29
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
30
#include <linux/slab.h>
31
#include <linux/prefetch.h>
32
#include <linux/pinctrl/consumer.h>
33
#ifdef CONFIG_DEBUG_FS
34 35
#include <linux/debugfs.h>
#include <linux/seq_file.h>
36
#endif /* CONFIG_DEBUG_FS */
37
#include <linux/net_tstamp.h>
38
#include <linux/phylink.h>
39
#include <linux/udp.h>
40
#include <net/pkt_cls.h>
41
#include "stmmac_ptp.h"
42
#include "stmmac.h"
43
#include <linux/reset.h>
44
#include <linux/of_mdio.h>
45
#include "dwmac1000.h"
46
#include "dwxgmac2.h"
47
#include "hwif.h"
48

49
#define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
A
Alexandre TORGUE 已提交
50
#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
51 52

/* Module parameters */
53
#define TX_TIMEO	5000
54
static int watchdog = TX_TIMEO;
55
module_param(watchdog, int, 0644);
56
MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
57

58
static int debug = -1;
59
module_param(debug, int, 0644);
60
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
61

62
static int phyaddr = -1;
63
module_param(phyaddr, int, 0444);
64 65
MODULE_PARM_DESC(phyaddr, "Physical device address");

66 67
#define STMMAC_TX_THRESH(x)	((x)->dma_tx_size / 4)
#define STMMAC_RX_THRESH(x)	((x)->dma_rx_size / 4)
68

69
static int flow_ctrl = FLOW_AUTO;
70
module_param(flow_ctrl, int, 0644);
71 72 73
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");

static int pause = PAUSE_TIME;
74
module_param(pause, int, 0644);
75 76 77 78
MODULE_PARM_DESC(pause, "Flow Control Pause Time");

#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
79
module_param(tc, int, 0644);
80 81
MODULE_PARM_DESC(tc, "DMA threshold control value");

82 83
#define	DEFAULT_BUFSIZE	1536
static int buf_sz = DEFAULT_BUFSIZE;
84
module_param(buf_sz, int, 0644);
85 86
MODULE_PARM_DESC(buf_sz, "DMA buffer size");

87 88
#define	STMMAC_RX_COPYBREAK	256

89 90 91 92
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);

93 94
#define STMMAC_DEFAULT_LPI_TIMER	1000
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95
module_param(eee_timer, int, 0644);
96
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97
#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
98

99 100
/* By default the driver will use the ring mode to manage tx and rx descriptors,
 * but allow user to force to use the chain instead of the ring
101 102
 */
static unsigned int chain_mode;
103
module_param(chain_mode, int, 0444);
104 105
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");

106 107
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);

108
#ifdef CONFIG_DEBUG_FS
109
static const struct net_device_ops stmmac_netdev_ops;
110
static void stmmac_init_fs(struct net_device *dev);
111
static void stmmac_exit_fs(struct net_device *dev);
112 113
#endif

114 115
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))

116 117
/**
 * stmmac_verify_args - verify the driver parameters.
118 119
 * Description: it checks the driver parameters and set a default in case of
 * errors.
120 121 122 123 124
 */
static void stmmac_verify_args(void)
{
	if (unlikely(watchdog < 0))
		watchdog = TX_TIMEO;
125 126
	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
		buf_sz = DEFAULT_BUFSIZE;
127 128 129 130 131 132
	if (unlikely(flow_ctrl > 1))
		flow_ctrl = FLOW_AUTO;
	else if (likely(flow_ctrl < 0))
		flow_ctrl = FLOW_OFF;
	if (unlikely((pause < 0) || (pause > 0xffff)))
		pause = PAUSE_TIME;
133 134
	if (eee_timer < 0)
		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
135 136
}

137 138 139 140 141 142 143
/**
 * stmmac_disable_all_queues - Disable all queues
 * @priv: driver private structure
 */
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
144 145
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146 147
	u32 queue;

148 149
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
150

151 152 153 154
		if (queue < rx_queues_cnt)
			napi_disable(&ch->rx_napi);
		if (queue < tx_queues_cnt)
			napi_disable(&ch->tx_napi);
155 156 157 158 159 160 161 162 163 164
	}
}

/**
 * stmmac_enable_all_queues - Enable all queues
 * @priv: driver private structure
 */
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
{
	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
165 166
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167 168
	u32 queue;

169 170
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
171

172 173 174 175
		if (queue < rx_queues_cnt)
			napi_enable(&ch->rx_napi);
		if (queue < tx_queues_cnt)
			napi_enable(&ch->tx_napi);
176 177 178
	}
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
	if (!test_bit(STMMAC_DOWN, &priv->state) &&
	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
		queue_work(priv->wq, &priv->service_task);
}

static void stmmac_global_err(struct stmmac_priv *priv)
{
	netif_carrier_off(priv->dev);
	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
	stmmac_service_event_schedule(priv);
}

193 194 195 196 197 198 199 200 201 202 203 204
/**
 * stmmac_clk_csr_set - dynamically set the MDC clock
 * @priv: driver private structure
 * Description: this is to dynamically set the MDC clock according to the csr
 * clock input.
 * Note:
 *	If a specific clk_csr value is passed from the platform
 *	this means that the CSR Clock Range selection cannot be
 *	changed at run-time and it is fixed (as reported in the driver
 *	documentation). Viceversa the driver will try to set the MDC
 *	clock dynamically according to the actual clock input.
 */
205 206 207 208
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
	u32 clk_rate;

209
	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
210 211

	/* Platform provided default clk_csr would be assumed valid
G
Giuseppe CAVALLARO 已提交
212 213 214 215 216 217
	 * for all other cases except for the below mentioned ones.
	 * For values higher than the IEEE 802.3 specified frequency
	 * we can not estimate the proper divider as it is not known
	 * the frequency of clk_csr_i. So we do not change the default
	 * divider.
	 */
218 219 220 221 222 223 224 225 226 227 228
	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
		if (clk_rate < CSR_F_35M)
			priv->clk_csr = STMMAC_CSR_20_35M;
		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
			priv->clk_csr = STMMAC_CSR_35_60M;
		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
			priv->clk_csr = STMMAC_CSR_60_100M;
		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
			priv->clk_csr = STMMAC_CSR_100_150M;
		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
			priv->clk_csr = STMMAC_CSR_150_250M;
229
		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
230
			priv->clk_csr = STMMAC_CSR_250_300M;
G
Giuseppe CAVALLARO 已提交
231
	}
232 233 234 235 236 237 238 239 240 241 242

	if (priv->plat->has_sun8i) {
		if (clk_rate > 160000000)
			priv->clk_csr = 0x03;
		else if (clk_rate > 80000000)
			priv->clk_csr = 0x02;
		else if (clk_rate > 40000000)
			priv->clk_csr = 0x01;
		else
			priv->clk_csr = 0;
	}
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

	if (priv->plat->has_xgmac) {
		if (clk_rate > 400000000)
			priv->clk_csr = 0x5;
		else if (clk_rate > 350000000)
			priv->clk_csr = 0x4;
		else if (clk_rate > 300000000)
			priv->clk_csr = 0x3;
		else if (clk_rate > 250000000)
			priv->clk_csr = 0x2;
		else if (clk_rate > 150000000)
			priv->clk_csr = 0x1;
		else
			priv->clk_csr = 0x0;
	}
258 259
}

260 261
static void print_pkt(unsigned char *buf, int len)
{
262 263
	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
264 265
}

266
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
267
{
268
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
269
	u32 avail;
270

271 272
	if (tx_q->dirty_tx > tx_q->cur_tx)
		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
273
	else
274
		avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
275 276 277 278

	return avail;
}

279 280 281 282 283 284
/**
 * stmmac_rx_dirty - Get RX queue dirty
 * @priv: driver private structure
 * @queue: RX queue index
 */
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
285
{
286
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
287
	u32 dirty;
288

289 290
	if (rx_q->dirty_rx <= rx_q->cur_rx)
		dirty = rx_q->cur_rx - rx_q->dirty_rx;
291
	else
292
		dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
293 294

	return dirty;
295 296
}

297
/**
298
 * stmmac_enable_eee_mode - check and enter in LPI mode
299
 * @priv: driver private structure
300 301
 * Description: this function is to verify and enter in LPI mode in case of
 * EEE.
302
 */
303 304
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
305 306 307 308 309 310 311 312 313 314 315
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	/* check if all TX queues have the work finished */
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		if (tx_q->dirty_tx != tx_q->cur_tx)
			return; /* still unfinished work */
	}

316
	/* Check and enter in LPI mode */
317
	if (!priv->tx_path_in_lpi_mode)
318 319
		stmmac_set_eee_mode(priv, priv->hw,
				priv->plat->en_tx_lpi_clockgating);
320 321
}

322
/**
323
 * stmmac_disable_eee_mode - disable and exit from LPI mode
324 325 326 327
 * @priv: driver private structure
 * Description: this function is to exit and disable EEE in case of
 * LPI state is true. This is called by the xmit.
 */
328 329
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
330
	stmmac_reset_eee_mode(priv, priv->hw);
331 332 333 334 335
	del_timer_sync(&priv->eee_ctrl_timer);
	priv->tx_path_in_lpi_mode = false;
}

/**
336
 * stmmac_eee_ctrl_timer - EEE TX SW timer.
337
 * @t:  timer_list struct containing private info
338
 * Description:
339
 *  if there is no data transfer and if we are not in LPI state,
340 341
 *  then MAC Transmitter can be moved to LPI state.
 */
342
static void stmmac_eee_ctrl_timer(struct timer_list *t)
343
{
344
	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
345 346

	stmmac_enable_eee_mode(priv);
347
	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
348 349 350
}

/**
351
 * stmmac_eee_init - init EEE
352
 * @priv: driver private structure
353
 * Description:
354 355 356
 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 *  can also manage EEE, this function enable the LPI state and start related
 *  timer.
357 358 359
 */
bool stmmac_eee_init(struct stmmac_priv *priv)
{
360
	int eee_tw_timer = priv->eee_tw_timer;
361

G
Giuseppe CAVALLARO 已提交
362 363 364
	/* Using PCS we cannot dial with the phy registers at this stage
	 * so we do not support extra feature like EEE.
	 */
365 366
	if (priv->hw->pcs == STMMAC_PCS_TBI ||
	    priv->hw->pcs == STMMAC_PCS_RTBI)
367
		return false;
368

369 370 371 372 373
	/* Check if MAC core supports the EEE feature. */
	if (!priv->dma_cap.eee)
		return false;

	mutex_lock(&priv->lock);
374

375
	/* Check if it needs to be deactivated */
376 377 378 379
	if (!priv->eee_active) {
		if (priv->eee_enabled) {
			netdev_dbg(priv->dev, "disable EEE\n");
			del_timer_sync(&priv->eee_ctrl_timer);
380
			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
381
		}
382
		mutex_unlock(&priv->lock);
383
		return false;
384
	}
385 386 387 388

	if (priv->eee_active && !priv->eee_enabled) {
		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
389
				     eee_tw_timer);
390 391
	}

392 393
	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));

394 395 396
	mutex_unlock(&priv->lock);
	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
	return true;
397 398
}

399
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
400
 * @priv: driver private structure
401
 * @p : descriptor pointer
402 403 404 405 406 407
 * @skb : the socket buffer
 * Description :
 * This function will read timestamp from the descriptor & pass it to stack.
 * and also perform some sanity checks.
 */
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
408
				   struct dma_desc *p, struct sk_buff *skb)
409 410
{
	struct skb_shared_hwtstamps shhwtstamp;
411
	bool found = false;
412
	u64 ns = 0;
413 414 415 416

	if (!priv->hwts_tx_en)
		return;

G
Giuseppe CAVALLARO 已提交
417
	/* exit if skb doesn't support hw tstamp */
418
	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
419 420 421
		return;

	/* check tx tstamp status */
422 423
	if (stmmac_get_tx_timestamp_status(priv, p)) {
		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
424 425 426 427
		found = true;
	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
		found = true;
	}
428

429
	if (found) {
430 431
		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp.hwtstamp = ns_to_ktime(ns);
432

433
		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
434 435 436
		/* pass tstamp to stack */
		skb_tstamp_tx(skb, &shhwtstamp);
	}
437 438
}

439
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
440
 * @priv: driver private structure
441 442
 * @p : descriptor pointer
 * @np : next descriptor pointer
443 444 445 446 447
 * @skb : the socket buffer
 * Description :
 * This function will read received packet's timestamp from the descriptor
 * and pass it to stack. It also perform some sanity checks.
 */
448 449
static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
				   struct dma_desc *np, struct sk_buff *skb)
450 451
{
	struct skb_shared_hwtstamps *shhwtstamp = NULL;
452
	struct dma_desc *desc = p;
453
	u64 ns = 0;
454 455 456

	if (!priv->hwts_rx_en)
		return;
457
	/* For GMAC4, the valid timestamp is from CTX next desc. */
458
	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
459
		desc = np;
460

461
	/* Check if timestamp is available */
462 463
	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
464
		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
465 466 467 468
		shhwtstamp = skb_hwtstamps(skb);
		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp->hwtstamp = ns_to_ktime(ns);
	} else  {
469
		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
470
	}
471 472 473
}

/**
474
 *  stmmac_hwtstamp_set - control hardware timestamping.
475
 *  @dev: device pointer.
476
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
477 478 479 480 481 482 483
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function configures the MAC to enable/disable both outgoing(TX)
 *  and incoming(RX) packets time stamping based on user input.
 *  Return Value:
 *  0 on success and an appropriate -ve integer on failure.
 */
484
static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
485 486 487
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config config;
A
Arnd Bergmann 已提交
488
	struct timespec64 now;
489 490 491 492 493 494 495 496 497
	u64 temp = 0;
	u32 ptp_v2 = 0;
	u32 tstamp_all = 0;
	u32 ptp_over_ipv4_udp = 0;
	u32 ptp_over_ipv6_udp = 0;
	u32 ptp_over_ethernet = 0;
	u32 snap_type_sel = 0;
	u32 ts_master_en = 0;
	u32 ts_event_en = 0;
498
	u32 sec_inc = 0;
499
	u32 value = 0;
500 501 502
	bool xmac;

	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
503 504 505 506 507 508 509 510 511 512

	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
		netdev_alert(priv->dev, "No support for HW time stamping\n");
		priv->hwts_tx_en = 0;
		priv->hwts_rx_en = 0;

		return -EOPNOTSUPP;
	}

	if (copy_from_user(&config, ifr->ifr_data,
513
			   sizeof(config)))
514 515
		return -EFAULT;

516 517
	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
		   __func__, config.flags, config.tx_type, config.rx_filter);
518 519 520 521 522

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

523 524
	if (config.tx_type != HWTSTAMP_TX_OFF &&
	    config.tx_type != HWTSTAMP_TX_ON)
525 526 527 528 529
		return -ERANGE;

	if (priv->adv_ts) {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
G
Giuseppe CAVALLARO 已提交
530
			/* time stamp no incoming packet at all */
531 532 533 534
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
535
			/* PTP v1, UDP, any kind of event packet */
536
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
537 538 539 540 541 542 543
			/* 'xmac' hardware can support Sync, Pdelay_Req and
			 * Pdelay_resp by setting bit14 and bits17/16 to 01
			 * This leaves Delay_Req timestamps out.
			 * Enable all events *and* general purpose message
			 * timestamping
			 */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
544 545 546 547 548
			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
549
			/* PTP v1, UDP, Sync packet */
550 551 552 553 554 555 556 557 558
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
559
			/* PTP v1, UDP, Delay_req packet */
560 561 562 563 564 565 566 567 568 569
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
570
			/* PTP v2, UDP, any kind of event packet */
571 572 573
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
574
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
575 576 577 578 579 580

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
581
			/* PTP v2, UDP, Sync packet */
582 583 584 585 586 587 588 589 590 591
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
592
			/* PTP v2, UDP, Delay_req packet */
593 594 595 596 597 598 599 600 601 602 603
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_EVENT:
G
Giuseppe CAVALLARO 已提交
604
			/* PTP v2/802.AS1 any layer, any kind of event packet */
605 606
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
607
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
608 609
			if (priv->synopsys_id != DWMAC_CORE_5_10)
				ts_event_en = PTP_TCR_TSEVNTENA;
610 611 612 613 614 615
			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_SYNC:
G
Giuseppe CAVALLARO 已提交
616
			/* PTP v2/802.AS1, any layer, Sync packet */
617 618 619 620 621 622 623 624 625 626 627
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
628
			/* PTP v2/802.AS1, any layer, Delay_req packet */
629 630 631 632 633 634 635 636 637 638 639
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

640
		case HWTSTAMP_FILTER_NTP_ALL:
641
		case HWTSTAMP_FILTER_ALL:
G
Giuseppe CAVALLARO 已提交
642
			/* time stamp any incoming packet */
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			config.rx_filter = HWTSTAMP_FILTER_ALL;
			tstamp_all = PTP_TCR_TSENALL;
			break;

		default:
			return -ERANGE;
		}
	} else {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;
		default:
			/* PTP v1, UDP, any kind of event packet */
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			break;
		}
	}
	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
662
	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
663 664

	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
665
		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
666 667
	else {
		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
G
Giuseppe CAVALLARO 已提交
668 669 670
			 tstamp_all | ptp_v2 | ptp_over_ethernet |
			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
			 ts_master_en | snap_type_sel);
671
		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
672 673

		/* program Sub Second Increment reg */
674 675
		stmmac_config_sub_second_increment(priv,
				priv->ptpaddr, priv->plat->clk_ptp_rate,
676
				xmac, &sec_inc);
677
		temp = div_u64(1000000000ULL, sec_inc);
678

679 680 681 682
		/* Store sub second increment and flags for later use */
		priv->sub_second_inc = sec_inc;
		priv->systime_flags = value;

683 684 685
		/* calculate default added value:
		 * formula is :
		 * addend = (2^32)/freq_div_ratio;
686
		 * where, freq_div_ratio = 1e9ns/sec_inc
687
		 */
688
		temp = (u64)(temp << 32);
689
		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
690
		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
691 692

		/* initialize system time */
A
Arnd Bergmann 已提交
693 694 695
		ktime_get_real_ts64(&now);

		/* lower 32 bits of tv_sec are safe until y2106 */
696 697
		stmmac_init_systime(priv, priv->ptpaddr,
				(u32)now.tv_sec, now.tv_nsec);
698 699
	}

700 701
	memcpy(&priv->tstamp_config, &config, sizeof(config));

702
	return copy_to_user(ifr->ifr_data, &config,
703 704 705 706 707 708 709 710 711 712
			    sizeof(config)) ? -EFAULT : 0;
}

/**
 *  stmmac_hwtstamp_get - read hardware timestamping.
 *  @dev: device pointer.
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function obtain the current hardware timestamping settings
713
 *  as requested.
714 715 716 717 718 719 720 721 722 723 724
 */
static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config *config = &priv->tstamp_config;

	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

	return copy_to_user(ifr->ifr_data, config,
			    sizeof(*config)) ? -EFAULT : 0;
725 726
}

727
/**
728
 * stmmac_init_ptp - init PTP
729
 * @priv: driver private structure
730
 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
731
 * This is done by looking at the HW cap. register.
732
 * This function also registers the ptp driver.
733
 */
734
static int stmmac_init_ptp(struct stmmac_priv *priv)
735
{
736 737
	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;

738 739 740
	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

741
	priv->adv_ts = 0;
742 743
	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
	if (xmac && priv->dma_cap.atime_stamp)
744 745 746
		priv->adv_ts = 1;
	/* Dwmac 3.x core with extend_desc can support adv_ts */
	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
747 748
		priv->adv_ts = 1;

749 750
	if (priv->dma_cap.time_stamp)
		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
751

752 753 754
	if (priv->adv_ts)
		netdev_info(priv->dev,
			    "IEEE 1588-2008 Advanced Timestamp supported\n");
755 756 757

	priv->hwts_tx_en = 0;
	priv->hwts_rx_en = 0;
758

759 760 761
	stmmac_ptp_register(priv);

	return 0;
762 763 764 765
}

static void stmmac_release_ptp(struct stmmac_priv *priv)
{
766
	clk_disable_unprepare(priv->plat->clk_ptp_ref);
767
	stmmac_ptp_unregister(priv);
768 769
}

770 771 772
/**
 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 *  @priv: driver private structure
773
 *  @duplex: duplex passed to the next function
774 775 776 777 778 779
 *  Description: It is used for configuring the flow control in all queues
 */
static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
{
	u32 tx_cnt = priv->plat->tx_queues_to_use;

780 781
	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
			priv->pause, tx_cnt);
782 783
}

784 785 786 787 788
static void stmmac_validate(struct phylink_config *config,
			    unsigned long *supported,
			    struct phylink_link_state *state)
{
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
789
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
790 791 792 793
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
	int tx_cnt = priv->plat->tx_queues_to_use;
	int max_speed = priv->plat->max_speed;

794 795 796 797
	phylink_set(mac_supported, 10baseT_Half);
	phylink_set(mac_supported, 10baseT_Full);
	phylink_set(mac_supported, 100baseT_Half);
	phylink_set(mac_supported, 100baseT_Full);
798 799 800
	phylink_set(mac_supported, 1000baseT_Half);
	phylink_set(mac_supported, 1000baseT_Full);
	phylink_set(mac_supported, 1000baseKX_Full);
801 802 803 804 805 806

	phylink_set(mac_supported, Autoneg);
	phylink_set(mac_supported, Pause);
	phylink_set(mac_supported, Asym_Pause);
	phylink_set_port_modes(mac_supported);

807 808 809 810
	/* Cut down 1G if asked to */
	if ((max_speed > 0) && (max_speed < 1000)) {
		phylink_set(mask, 1000baseT_Full);
		phylink_set(mask, 1000baseX_Full);
811
	} else if (priv->plat->has_xgmac) {
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
		if (!max_speed || (max_speed >= 2500)) {
			phylink_set(mac_supported, 2500baseT_Full);
			phylink_set(mac_supported, 2500baseX_Full);
		}
		if (!max_speed || (max_speed >= 5000)) {
			phylink_set(mac_supported, 5000baseT_Full);
		}
		if (!max_speed || (max_speed >= 10000)) {
			phylink_set(mac_supported, 10000baseSR_Full);
			phylink_set(mac_supported, 10000baseLR_Full);
			phylink_set(mac_supported, 10000baseER_Full);
			phylink_set(mac_supported, 10000baseLRM_Full);
			phylink_set(mac_supported, 10000baseT_Full);
			phylink_set(mac_supported, 10000baseKX4_Full);
			phylink_set(mac_supported, 10000baseKR_Full);
		}
J
Jose Abreu 已提交
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
		if (!max_speed || (max_speed >= 25000)) {
			phylink_set(mac_supported, 25000baseCR_Full);
			phylink_set(mac_supported, 25000baseKR_Full);
			phylink_set(mac_supported, 25000baseSR_Full);
		}
		if (!max_speed || (max_speed >= 40000)) {
			phylink_set(mac_supported, 40000baseKR4_Full);
			phylink_set(mac_supported, 40000baseCR4_Full);
			phylink_set(mac_supported, 40000baseSR4_Full);
			phylink_set(mac_supported, 40000baseLR4_Full);
		}
		if (!max_speed || (max_speed >= 50000)) {
			phylink_set(mac_supported, 50000baseCR2_Full);
			phylink_set(mac_supported, 50000baseKR2_Full);
			phylink_set(mac_supported, 50000baseSR2_Full);
			phylink_set(mac_supported, 50000baseKR_Full);
			phylink_set(mac_supported, 50000baseSR_Full);
			phylink_set(mac_supported, 50000baseCR_Full);
			phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
			phylink_set(mac_supported, 50000baseDR_Full);
		}
		if (!max_speed || (max_speed >= 100000)) {
			phylink_set(mac_supported, 100000baseKR4_Full);
			phylink_set(mac_supported, 100000baseSR4_Full);
			phylink_set(mac_supported, 100000baseCR4_Full);
			phylink_set(mac_supported, 100000baseLR4_ER4_Full);
			phylink_set(mac_supported, 100000baseKR2_Full);
			phylink_set(mac_supported, 100000baseSR2_Full);
			phylink_set(mac_supported, 100000baseCR2_Full);
			phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
			phylink_set(mac_supported, 100000baseDR2_Full);
		}
860 861 862 863 864 865 866 867 868
	}

	/* Half-Duplex can only work with single queue */
	if (tx_cnt > 1) {
		phylink_set(mask, 10baseT_Half);
		phylink_set(mask, 100baseT_Half);
		phylink_set(mask, 1000baseT_Half);
	}

869 870 871 872 873
	linkmode_and(supported, supported, mac_supported);
	linkmode_andnot(supported, supported, mask);

	linkmode_and(state->advertising, state->advertising, mac_supported);
	linkmode_andnot(state->advertising, state->advertising, mask);
874 875 876

	/* If PCS is supported, check which modes it supports. */
	stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
877 878
}

879 880
static void stmmac_mac_pcs_get_state(struct phylink_config *config,
				     struct phylink_link_state *state)
881
{
882 883
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));

884
	state->link = 0;
885
	stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
886 887
}

888 889
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
			      const struct phylink_link_state *state)
890
{
891 892 893
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));

	stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
894 895 896 897 898 899 900 901 902 903 904 905 906 907
}

static void stmmac_mac_an_restart(struct phylink_config *config)
{
	/* Not Supported */
}

static void stmmac_mac_link_down(struct phylink_config *config,
				 unsigned int mode, phy_interface_t interface)
{
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));

	stmmac_mac_set(priv, priv->ioaddr, false);
	priv->eee_active = false;
908
	priv->tx_lpi_enabled = false;
909 910 911 912 913 914 915 916 917
	stmmac_eee_init(priv);
	stmmac_set_eee_pls(priv, priv->hw, false);
}

static void stmmac_mac_link_up(struct phylink_config *config,
			       struct phy_device *phy,
			       unsigned int mode, phy_interface_t interface,
			       int speed, int duplex,
			       bool tx_pause, bool rx_pause)
918
{
919
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
920 921
	u32 ctrl;

922 923
	stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);

924
	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
925
	ctrl &= ~priv->hw->link.speed_mask;
926

927 928
	if (interface == PHY_INTERFACE_MODE_USXGMII) {
		switch (speed) {
929 930 931 932 933 934 935 936 937 938 939 940
		case SPEED_10000:
			ctrl |= priv->hw->link.xgmii.speed10000;
			break;
		case SPEED_5000:
			ctrl |= priv->hw->link.xgmii.speed5000;
			break;
		case SPEED_2500:
			ctrl |= priv->hw->link.xgmii.speed2500;
			break;
		default:
			return;
		}
J
Jose Abreu 已提交
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
		switch (speed) {
		case SPEED_100000:
			ctrl |= priv->hw->link.xlgmii.speed100000;
			break;
		case SPEED_50000:
			ctrl |= priv->hw->link.xlgmii.speed50000;
			break;
		case SPEED_40000:
			ctrl |= priv->hw->link.xlgmii.speed40000;
			break;
		case SPEED_25000:
			ctrl |= priv->hw->link.xlgmii.speed25000;
			break;
		case SPEED_10000:
			ctrl |= priv->hw->link.xgmii.speed10000;
			break;
		case SPEED_2500:
			ctrl |= priv->hw->link.speed2500;
			break;
		case SPEED_1000:
			ctrl |= priv->hw->link.speed1000;
			break;
		default:
			return;
		}
967
	} else {
968
		switch (speed) {
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
		case SPEED_2500:
			ctrl |= priv->hw->link.speed2500;
			break;
		case SPEED_1000:
			ctrl |= priv->hw->link.speed1000;
			break;
		case SPEED_100:
			ctrl |= priv->hw->link.speed100;
			break;
		case SPEED_10:
			ctrl |= priv->hw->link.speed10;
			break;
		default:
			return;
		}
984 985
	}

986
	priv->speed = speed;
987

988
	if (priv->plat->fix_mac_speed)
989
		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
990

991
	if (!duplex)
992 993 994
		ctrl &= ~priv->hw->link.duplex;
	else
		ctrl |= priv->hw->link.duplex;
995 996

	/* Flow Control operation */
997 998
	if (tx_pause && rx_pause)
		stmmac_mac_flow_ctrl(priv, duplex);
999 1000 1001 1002

	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);

	stmmac_mac_set(priv, priv->ioaddr, true);
1003
	if (phy && priv->dma_cap.eee) {
1004 1005
		priv->eee_active = phy_init_eee(phy, 1) >= 0;
		priv->eee_enabled = stmmac_eee_init(priv);
1006
		priv->tx_lpi_enabled = priv->eee_enabled;
1007 1008
		stmmac_set_eee_pls(priv, priv->hw, true);
	}
1009 1010
}

1011
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1012
	.validate = stmmac_validate,
1013
	.mac_pcs_get_state = stmmac_mac_pcs_get_state,
1014
	.mac_config = stmmac_mac_config,
1015
	.mac_an_restart = stmmac_mac_an_restart,
1016 1017
	.mac_link_down = stmmac_mac_link_down,
	.mac_link_up = stmmac_mac_link_up,
1018 1019
};

1020
/**
1021
 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1022 1023 1024 1025 1026
 * @priv: driver private structure
 * Description: this is to verify if the HW supports the PCS.
 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 * configured for the TBI, RTBI, or SGMII PHY interface.
 */
1027 1028 1029 1030 1031
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
	int interface = priv->plat->interface;

	if (priv->dma_cap.pcs) {
B
Byungho An 已提交
1032 1033 1034 1035
		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1036
			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1037
			priv->hw->pcs = STMMAC_PCS_RGMII;
B
Byungho An 已提交
1038
		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1039
			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1040
			priv->hw->pcs = STMMAC_PCS_SGMII;
1041 1042 1043 1044
		}
	}
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
/**
 * stmmac_init_phy - PHY initialization
 * @dev: net device structure
 * Description: it initializes the driver's PHY state, and attaches the PHY
 * to the mac driver.
 *  Return value:
 *  0 on success
 */
static int stmmac_init_phy(struct net_device *dev)
{
1055
	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1056
	struct stmmac_priv *priv = netdev_priv(dev);
1057 1058
	struct device_node *node;
	int ret;
1059

1060
	node = priv->plat->phylink_node;
1061

1062
	if (node)
1063
		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1064 1065 1066 1067 1068

	/* Some DT bindings do not set-up the PHY handle. Let's try to
	 * manually parse it
	 */
	if (!node || ret) {
1069 1070
		int addr = priv->plat->phy_addr;
		struct phy_device *phydev;
1071

1072 1073 1074
		phydev = mdiobus_get_phy(priv->mii, addr);
		if (!phydev) {
			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1075
			return -ENODEV;
1076
		}
1077

1078
		ret = phylink_connect_phy(priv->phylink, phydev);
1079 1080
	}

1081 1082 1083
	phylink_ethtool_get_wol(priv->phylink, &wol);
	device_set_wakeup_capable(priv->device, !!wol.supported);

1084 1085
	return ret;
}
1086

1087 1088
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
1089
	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1090
	int mode = priv->plat->phy_interface;
1091
	struct phylink *phylink;
1092

1093 1094
	priv->phylink_config.dev = &priv->dev->dev;
	priv->phylink_config.type = PHYLINK_NETDEV;
1095
	priv->phylink_config.pcs_poll = true;
1096

1097 1098 1099
	if (!fwnode)
		fwnode = dev_fwnode(priv->device);

1100
	phylink = phylink_create(&priv->phylink_config, fwnode,
1101 1102 1103
				 mode, &stmmac_phylink_mac_ops);
	if (IS_ERR(phylink))
		return PTR_ERR(phylink);
1104

1105
	priv->phylink = phylink;
1106 1107 1108
	return 0;
}

1109
static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1110
{
1111
	u32 rx_cnt = priv->plat->rx_queues_to_use;
1112
	void *head_rx;
1113
	u32 queue;
1114

1115 1116 1117
	/* Display RX rings */
	for (queue = 0; queue < rx_cnt; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1118

1119 1120 1121 1122 1123 1124 1125 1126
		pr_info("\tRX Queue %u rings\n", queue);

		if (priv->extend_desc)
			head_rx = (void *)rx_q->dma_erx;
		else
			head_rx = (void *)rx_q->dma_rx;

		/* Display RX ring */
1127
		stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
1128
	}
1129 1130 1131 1132
}

static void stmmac_display_tx_rings(struct stmmac_priv *priv)
{
1133
	u32 tx_cnt = priv->plat->tx_queues_to_use;
1134
	void *head_tx;
1135
	u32 queue;
1136

1137 1138 1139
	/* Display TX rings */
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1140

1141 1142 1143 1144
		pr_info("\tTX Queue %d rings\n", queue);

		if (priv->extend_desc)
			head_tx = (void *)tx_q->dma_etx;
1145 1146
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			head_tx = (void *)tx_q->dma_entx;
1147 1148 1149
		else
			head_tx = (void *)tx_q->dma_tx;

1150
		stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
1151
	}
1152 1153
}

1154 1155 1156 1157 1158 1159 1160 1161 1162
static void stmmac_display_rings(struct stmmac_priv *priv)
{
	/* Display RX ring */
	stmmac_display_rx_rings(priv);

	/* Display TX ring */
	stmmac_display_tx_rings(priv);
}

1163 1164 1165 1166
static int stmmac_set_bfsize(int mtu, int bufsize)
{
	int ret = bufsize;

J
Jose Abreu 已提交
1167 1168 1169
	if (mtu >= BUF_SIZE_8KiB)
		ret = BUF_SIZE_16KiB;
	else if (mtu >= BUF_SIZE_4KiB)
1170 1171 1172
		ret = BUF_SIZE_8KiB;
	else if (mtu >= BUF_SIZE_2KiB)
		ret = BUF_SIZE_4KiB;
1173
	else if (mtu > DEFAULT_BUFSIZE)
1174 1175
		ret = BUF_SIZE_2KiB;
	else
1176
		ret = DEFAULT_BUFSIZE;
1177 1178 1179 1180

	return ret;
}

1181
/**
1182
 * stmmac_clear_rx_descriptors - clear RX descriptors
1183
 * @priv: driver private structure
1184
 * @queue: RX queue index
1185
 * Description: this function is called to clear the RX descriptors
1186 1187
 * in case of both basic and extended descriptors are used.
 */
1188
static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1189
{
1190
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1191
	int i;
1192

1193
	/* Clear the RX descriptors */
1194
	for (i = 0; i < priv->dma_rx_size; i++)
1195
		if (priv->extend_desc)
1196 1197
			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
					priv->use_riwt, priv->mode,
1198
					(i == priv->dma_rx_size - 1),
1199
					priv->dma_buf_sz);
1200
		else
1201 1202
			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
					priv->use_riwt, priv->mode,
1203
					(i == priv->dma_rx_size - 1),
1204
					priv->dma_buf_sz);
1205 1206 1207 1208 1209
}

/**
 * stmmac_clear_tx_descriptors - clear tx descriptors
 * @priv: driver private structure
1210
 * @queue: TX queue index.
1211 1212 1213
 * Description: this function is called to clear the TX descriptors
 * in case of both basic and extended descriptors are used.
 */
1214
static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1215
{
1216
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1217 1218 1219
	int i;

	/* Clear the TX descriptors */
1220 1221
	for (i = 0; i < priv->dma_tx_size; i++) {
		int last = (i == (priv->dma_tx_size - 1));
1222 1223
		struct dma_desc *p;

1224
		if (priv->extend_desc)
1225 1226 1227
			p = &tx_q->dma_etx[i].basic;
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			p = &tx_q->dma_entx[i].basic;
1228
		else
1229 1230 1231 1232
			p = &tx_q->dma_tx[i];

		stmmac_init_tx_desc(priv, p, priv->mode, last);
	}
1233 1234
}

1235 1236 1237 1238 1239 1240 1241 1242
/**
 * stmmac_clear_descriptors - clear descriptors
 * @priv: driver private structure
 * Description: this function is called to clear the TX and RX descriptors
 * in case of both basic and extended descriptors are used.
 */
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
1243
	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1244
	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1245 1246
	u32 queue;

1247
	/* Clear the RX descriptors */
1248 1249
	for (queue = 0; queue < rx_queue_cnt; queue++)
		stmmac_clear_rx_descriptors(priv, queue);
1250 1251

	/* Clear the TX descriptors */
1252 1253
	for (queue = 0; queue < tx_queue_cnt; queue++)
		stmmac_clear_tx_descriptors(priv, queue);
1254 1255
}

1256 1257 1258 1259 1260
/**
 * stmmac_init_rx_buffers - init the RX descriptor buffer.
 * @priv: driver private structure
 * @p: descriptor pointer
 * @i: descriptor index
1261 1262
 * @flags: gfp flag
 * @queue: RX queue index
1263 1264 1265
 * Description: this function is called to allocate a receive buffer, perform
 * the DMA mapping and init the descriptor.
 */
1266
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1267
				  int i, gfp_t flags, u32 queue)
1268
{
1269
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1270
	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1271

1272 1273
	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
	if (!buf->page)
1274
		return -ENOMEM;
1275

1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
	if (priv->sph) {
		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
		if (!buf->sec_page)
			return -ENOMEM;

		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
	} else {
		buf->sec_page = NULL;
	}

1287 1288
	buf->addr = page_pool_get_dma_addr(buf->page);
	stmmac_set_desc_addr(priv, p, buf->addr);
1289 1290
	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
		stmmac_init_desc3(priv, p);
1291 1292 1293 1294

	return 0;
}

1295 1296 1297
/**
 * stmmac_free_rx_buffer - free RX dma buffers
 * @priv: private structure
1298
 * @queue: RX queue index
1299 1300
 * @i: buffer index.
 */
1301
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1302
{
1303
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1304
	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1305

1306
	if (buf->page)
1307
		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1308
	buf->page = NULL;
1309 1310

	if (buf->sec_page)
1311
		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1312
	buf->sec_page = NULL;
1313 1314 1315
}

/**
1316 1317
 * stmmac_free_tx_buffer - free RX dma buffers
 * @priv: private structure
1318
 * @queue: RX queue index
1319 1320
 * @i: buffer index.
 */
1321
static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1322
{
1323 1324 1325 1326
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

	if (tx_q->tx_skbuff_dma[i].buf) {
		if (tx_q->tx_skbuff_dma[i].map_as_page)
1327
			dma_unmap_page(priv->device,
1328 1329
				       tx_q->tx_skbuff_dma[i].buf,
				       tx_q->tx_skbuff_dma[i].len,
1330 1331 1332
				       DMA_TO_DEVICE);
		else
			dma_unmap_single(priv->device,
1333 1334
					 tx_q->tx_skbuff_dma[i].buf,
					 tx_q->tx_skbuff_dma[i].len,
1335 1336 1337
					 DMA_TO_DEVICE);
	}

1338 1339 1340 1341 1342
	if (tx_q->tx_skbuff[i]) {
		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
		tx_q->tx_skbuff[i] = NULL;
		tx_q->tx_skbuff_dma[i].buf = 0;
		tx_q->tx_skbuff_dma[i].map_as_page = false;
1343 1344 1345 1346 1347
	}
}

/**
 * init_dma_rx_desc_rings - init the RX descriptor rings
1348
 * @dev: net device structure
1349
 * @flags: gfp flag.
1350
 * Description: this function initializes the DMA RX descriptors
1351
 * and allocates the socket buffers. It supports the chained and ring
1352
 * modes.
1353
 */
1354
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1355 1356
{
	struct stmmac_priv *priv = netdev_priv(dev);
1357
	u32 rx_count = priv->plat->rx_queues_to_use;
1358
	int ret = -ENOMEM;
1359
	int queue;
1360
	int i;
1361

1362
	/* RX INITIALIZATION */
1363 1364
	netif_dbg(priv, probe, priv->dev,
		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1365

1366 1367
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1368

1369 1370 1371
		netif_dbg(priv, probe, priv->dev,
			  "(%s) dma_rx_phy=0x%08x\n", __func__,
			  (u32)rx_q->dma_rx_phy);
A
Alexandre TORGUE 已提交
1372

1373 1374
		stmmac_clear_rx_descriptors(priv, queue);

1375
		for (i = 0; i < priv->dma_rx_size; i++) {
1376
			struct dma_desc *p;
1377

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
			if (priv->extend_desc)
				p = &((rx_q->dma_erx + i)->basic);
			else
				p = rx_q->dma_rx + i;

			ret = stmmac_init_rx_buffers(priv, p, i, flags,
						     queue);
			if (ret)
				goto err_init_rx_buffers;
		}

		rx_q->cur_rx = 0;
1390
		rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1391 1392 1393 1394

		/* Setup the chained descriptor addresses */
		if (priv->mode == STMMAC_CHAIN_MODE) {
			if (priv->extend_desc)
1395
				stmmac_mode_init(priv, rx_q->dma_erx,
1396 1397
						 rx_q->dma_rx_phy,
						 priv->dma_rx_size, 1);
1398
			else
1399
				stmmac_mode_init(priv, rx_q->dma_rx,
1400 1401
						 rx_q->dma_rx_phy,
						 priv->dma_rx_size, 0);
1402
		}
1403 1404 1405
	}

	return 0;
1406

1407
err_init_rx_buffers:
1408 1409 1410 1411 1412 1413 1414
	while (queue >= 0) {
		while (--i >= 0)
			stmmac_free_rx_buffer(priv, queue, i);

		if (queue == 0)
			break;

1415
		i = priv->dma_rx_size;
1416 1417 1418
		queue--;
	}

1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	return ret;
}

/**
 * init_dma_tx_desc_rings - init the TX descriptor rings
 * @dev: net device structure.
 * Description: this function initializes the DMA TX descriptors
 * and allocates the socket buffers. It supports the chained and ring
 * modes.
 */
static int init_dma_tx_desc_rings(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
1432 1433
	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
	u32 queue;
1434 1435
	int i;

1436 1437
	for (queue = 0; queue < tx_queue_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1438

1439 1440 1441
		netif_dbg(priv, probe, priv->dev,
			  "(%s) dma_tx_phy=0x%08x\n", __func__,
			 (u32)tx_q->dma_tx_phy);
A
Alexandre TORGUE 已提交
1442

1443 1444 1445
		/* Setup the chained descriptor addresses */
		if (priv->mode == STMMAC_CHAIN_MODE) {
			if (priv->extend_desc)
1446
				stmmac_mode_init(priv, tx_q->dma_etx,
1447 1448
						 tx_q->dma_tx_phy,
						 priv->dma_tx_size, 1);
1449
			else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1450
				stmmac_mode_init(priv, tx_q->dma_tx,
1451 1452
						 tx_q->dma_tx_phy,
						 priv->dma_tx_size, 0);
1453
		}
1454

1455
		for (i = 0; i < priv->dma_tx_size; i++) {
1456 1457 1458
			struct dma_desc *p;
			if (priv->extend_desc)
				p = &((tx_q->dma_etx + i)->basic);
1459 1460
			else if (tx_q->tbs & STMMAC_TBS_AVAIL)
				p = &((tx_q->dma_entx + i)->basic);
1461 1462 1463
			else
				p = tx_q->dma_tx + i;

1464
			stmmac_clear_desc(priv, p);
1465 1466 1467 1468 1469 1470

			tx_q->tx_skbuff_dma[i].buf = 0;
			tx_q->tx_skbuff_dma[i].map_as_page = false;
			tx_q->tx_skbuff_dma[i].len = 0;
			tx_q->tx_skbuff_dma[i].last_segment = false;
			tx_q->tx_skbuff[i] = NULL;
1471
		}
1472

1473 1474
		tx_q->dirty_tx = 0;
		tx_q->cur_tx = 0;
1475
		tx_q->mss = 0;
1476

1477 1478
		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
	}
1479

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
	return 0;
}

/**
 * init_dma_desc_rings - init the RX/TX descriptor rings
 * @dev: net device structure
 * @flags: gfp flag.
 * Description: this function initializes the DMA RX/TX descriptors
 * and allocates the socket buffers. It supports the chained and ring
 * modes.
 */
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

	ret = init_dma_rx_desc_rings(dev, flags);
	if (ret)
		return ret;

	ret = init_dma_tx_desc_rings(dev);

1502
	stmmac_clear_descriptors(priv);
1503

1504 1505
	if (netif_msg_hw(priv))
		stmmac_display_rings(priv);
1506 1507

	return ret;
1508 1509
}

1510 1511 1512
/**
 * dma_free_rx_skbufs - free RX dma buffers
 * @priv: private structure
1513
 * @queue: RX queue index
1514
 */
1515
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1516 1517 1518
{
	int i;

1519
	for (i = 0; i < priv->dma_rx_size; i++)
1520
		stmmac_free_rx_buffer(priv, queue, i);
1521 1522
}

1523 1524 1525
/**
 * dma_free_tx_skbufs - free TX dma buffers
 * @priv: private structure
1526
 * @queue: TX queue index
1527
 */
1528
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1529 1530 1531
{
	int i;

1532
	for (i = 0; i < priv->dma_tx_size; i++)
1533
		stmmac_free_tx_buffer(priv, queue, i);
1534 1535
}

1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
/**
 * free_dma_rx_desc_resources - free RX dma desc resources
 * @priv: private structure
 */
static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;

	/* Free RX queue resources */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		/* Release the DMA RX socket buffers */
		dma_free_rx_skbufs(priv, queue);

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
1554 1555
			dma_free_coherent(priv->device, priv->dma_rx_size *
					  sizeof(struct dma_desc),
1556 1557
					  rx_q->dma_rx, rx_q->dma_rx_phy);
		else
1558
			dma_free_coherent(priv->device, priv->dma_rx_size *
1559 1560 1561
					  sizeof(struct dma_extended_desc),
					  rx_q->dma_erx, rx_q->dma_rx_phy);

1562
		kfree(rx_q->buf_pool);
1563
		if (rx_q->page_pool)
1564
			page_pool_destroy(rx_q->page_pool);
1565 1566 1567
	}
}

1568 1569 1570 1571 1572 1573 1574
/**
 * free_dma_tx_desc_resources - free TX dma desc resources
 * @priv: private structure
 */
static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
1575
	u32 queue;
1576 1577 1578 1579

	/* Free TX queue resources */
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1580 1581
		size_t size;
		void *addr;
1582 1583 1584 1585

		/* Release the DMA TX socket buffers */
		dma_free_tx_skbufs(priv, queue);

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		if (priv->extend_desc) {
			size = sizeof(struct dma_extended_desc);
			addr = tx_q->dma_etx;
		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
			size = sizeof(struct dma_edesc);
			addr = tx_q->dma_entx;
		} else {
			size = sizeof(struct dma_desc);
			addr = tx_q->dma_tx;
		}

1597
		size *= priv->dma_tx_size;
1598 1599

		dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1600 1601 1602 1603 1604 1605

		kfree(tx_q->tx_skbuff_dma);
		kfree(tx_q->tx_skbuff);
	}
}

1606
/**
1607
 * alloc_dma_rx_desc_resources - alloc RX resources.
1608 1609
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
1610 1611 1612
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
1613
 */
1614
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1615
{
1616
	u32 rx_count = priv->plat->rx_queues_to_use;
1617
	int ret = -ENOMEM;
1618
	u32 queue;
1619

1620 1621 1622
	/* RX queues buffers and DMA */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1623
		struct page_pool_params pp_params = { 0 };
T
Thierry Reding 已提交
1624
		unsigned int num_pages;
1625

1626 1627
		rx_q->queue_index = queue;
		rx_q->priv_data = priv;
1628

1629
		pp_params.flags = PP_FLAG_DMA_MAP;
1630
		pp_params.pool_size = priv->dma_rx_size;
T
Thierry Reding 已提交
1631 1632
		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
		pp_params.order = ilog2(num_pages);
1633 1634 1635 1636 1637 1638 1639 1640
		pp_params.nid = dev_to_node(priv->device);
		pp_params.dev = priv->device;
		pp_params.dma_dir = DMA_FROM_DEVICE;

		rx_q->page_pool = page_pool_create(&pp_params);
		if (IS_ERR(rx_q->page_pool)) {
			ret = PTR_ERR(rx_q->page_pool);
			rx_q->page_pool = NULL;
1641
			goto err_dma;
1642
		}
1643

1644 1645
		rx_q->buf_pool = kcalloc(priv->dma_rx_size,
					 sizeof(*rx_q->buf_pool),
1646
					 GFP_KERNEL);
1647
		if (!rx_q->buf_pool)
1648
			goto err_dma;
1649 1650

		if (priv->extend_desc) {
1651
			rx_q->dma_erx = dma_alloc_coherent(priv->device,
1652 1653
							   priv->dma_rx_size *
							   sizeof(struct dma_extended_desc),
1654 1655
							   &rx_q->dma_rx_phy,
							   GFP_KERNEL);
1656 1657 1658 1659
			if (!rx_q->dma_erx)
				goto err_dma;

		} else {
1660
			rx_q->dma_rx = dma_alloc_coherent(priv->device,
1661 1662
							  priv->dma_rx_size *
							  sizeof(struct dma_desc),
1663 1664
							  &rx_q->dma_rx_phy,
							  GFP_KERNEL);
1665 1666 1667
			if (!rx_q->dma_rx)
				goto err_dma;
		}
1668 1669 1670 1671 1672
	}

	return 0;

err_dma:
1673 1674
	free_dma_rx_desc_resources(priv);

1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	return ret;
}

/**
 * alloc_dma_tx_desc_resources - alloc TX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
{
1688
	u32 tx_count = priv->plat->tx_queues_to_use;
1689
	int ret = -ENOMEM;
1690
	u32 queue;
1691

1692 1693 1694
	/* TX queues buffers and DMA */
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1695 1696
		size_t size;
		void *addr;
1697

1698 1699
		tx_q->queue_index = queue;
		tx_q->priv_data = priv;
1700

1701
		tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1702 1703
					      sizeof(*tx_q->tx_skbuff_dma),
					      GFP_KERNEL);
1704
		if (!tx_q->tx_skbuff_dma)
1705
			goto err_dma;
1706

1707
		tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1708 1709
					  sizeof(struct sk_buff *),
					  GFP_KERNEL);
1710
		if (!tx_q->tx_skbuff)
1711
			goto err_dma;
1712

1713 1714 1715 1716 1717 1718 1719
		if (priv->extend_desc)
			size = sizeof(struct dma_extended_desc);
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			size = sizeof(struct dma_edesc);
		else
			size = sizeof(struct dma_desc);

1720
		size *= priv->dma_tx_size;
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732

		addr = dma_alloc_coherent(priv->device, size,
					  &tx_q->dma_tx_phy, GFP_KERNEL);
		if (!addr)
			goto err_dma;

		if (priv->extend_desc)
			tx_q->dma_etx = addr;
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			tx_q->dma_entx = addr;
		else
			tx_q->dma_tx = addr;
1733 1734 1735 1736
	}

	return 0;

1737
err_dma:
1738
	free_dma_tx_desc_resources(priv);
1739 1740 1741
	return ret;
}

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
/**
 * alloc_dma_desc_resources - alloc TX/RX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
1752
	/* RX Allocation */
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	int ret = alloc_dma_rx_desc_resources(priv);

	if (ret)
		return ret;

	ret = alloc_dma_tx_desc_resources(priv);

	return ret;
}

/**
 * free_dma_desc_resources - free dma desc resources
 * @priv: private structure
 */
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
	/* Release the DMA RX socket buffers */
	free_dma_rx_desc_resources(priv);

	/* Release the DMA TX socket buffers */
	free_dma_tx_desc_resources(priv);
}

J
jpinto 已提交
1776 1777 1778 1779 1780 1781 1782
/**
 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
 *  @priv: driver private structure
 *  Description: It is used for enabling the rx queues in the MAC
 */
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
1783 1784 1785
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	int queue;
	u8 mode;
J
jpinto 已提交
1786

1787 1788
	for (queue = 0; queue < rx_queues_count; queue++) {
		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1789
		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1790
	}
J
jpinto 已提交
1791 1792
}

1793 1794 1795 1796 1797 1798 1799 1800 1801 1802
/**
 * stmmac_start_rx_dma - start RX DMA channel
 * @priv: driver private structure
 * @chan: RX channel index
 * Description:
 * This starts a RX DMA channel
 */
static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1803
	stmmac_start_rx(priv, priv->ioaddr, chan);
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
}

/**
 * stmmac_start_tx_dma - start TX DMA channel
 * @priv: driver private structure
 * @chan: TX channel index
 * Description:
 * This starts a TX DMA channel
 */
static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1816
	stmmac_start_tx(priv, priv->ioaddr, chan);
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
}

/**
 * stmmac_stop_rx_dma - stop RX DMA channel
 * @priv: driver private structure
 * @chan: RX channel index
 * Description:
 * This stops a RX DMA channel
 */
static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1829
	stmmac_stop_rx(priv, priv->ioaddr, chan);
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
}

/**
 * stmmac_stop_tx_dma - stop TX DMA channel
 * @priv: driver private structure
 * @chan: TX channel index
 * Description:
 * This stops a TX DMA channel
 */
static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1842
	stmmac_stop_tx(priv, priv->ioaddr, chan);
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
}

/**
 * stmmac_start_all_dma - start all RX and TX DMA channels
 * @priv: driver private structure
 * Description:
 * This starts all the RX and TX DMA channels
 */
static void stmmac_start_all_dma(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_start_rx_dma(priv, chan);

	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_start_tx_dma(priv, chan);
}

/**
 * stmmac_stop_all_dma - stop all RX and TX DMA channels
 * @priv: driver private structure
 * Description:
 * This stops the RX and TX DMA channels
 */
static void stmmac_stop_all_dma(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_stop_rx_dma(priv, chan);

	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_stop_tx_dma(priv, chan);
}

1883 1884
/**
 *  stmmac_dma_operation_mode - HW DMA operation mode
1885
 *  @priv: driver private structure
1886 1887
 *  Description: it is used for configuring the DMA operation mode register in
 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1888 1889 1890
 */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
1891 1892
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1893
	int rxfifosz = priv->plat->rx_fifo_size;
1894
	int txfifosz = priv->plat->tx_fifo_size;
1895 1896 1897
	u32 txmode = 0;
	u32 rxmode = 0;
	u32 chan = 0;
1898
	u8 qmode = 0;
1899

1900 1901
	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;
1902 1903 1904 1905 1906 1907
	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	/* Adjust for real per queue fifo size */
	rxfifosz /= rx_channels_count;
	txfifosz /= tx_channels_count;
1908

1909 1910 1911 1912
	if (priv->plat->force_thresh_dma_mode) {
		txmode = tc;
		rxmode = tc;
	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1913 1914 1915
		/*
		 * In case of GMAC, SF mode can be enabled
		 * to perform the TX COE in HW. This depends on:
1916 1917 1918 1919
		 * 1) TX COE if actually supported
		 * 2) There is no bugged Jumbo frame support
		 *    that needs to not insert csum in the TDES.
		 */
1920 1921
		txmode = SF_DMA_MODE;
		rxmode = SF_DMA_MODE;
1922
		priv->xstats.threshold = SF_DMA_MODE;
1923 1924 1925 1926 1927 1928
	} else {
		txmode = tc;
		rxmode = SF_DMA_MODE;
	}

	/* configure all channels */
1929 1930
	for (chan = 0; chan < rx_channels_count; chan++) {
		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1931

1932 1933
		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
				rxfifosz, qmode);
1934 1935
		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
				chan);
1936
	}
1937

1938 1939
	for (chan = 0; chan < tx_channels_count; chan++) {
		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1940

1941 1942
		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
				txfifosz, qmode);
1943
	}
1944 1945 1946
}

/**
1947
 * stmmac_tx_clean - to manage the transmission completion
1948
 * @priv: driver private structure
1949
 * @budget: napi budget limiting this functions packet handling
1950
 * @queue: TX queue index
1951
 * Description: it reclaims the transmit resources after transmission completes.
1952
 */
1953
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1954
{
1955
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
B
Beniamino Galvani 已提交
1956
	unsigned int bytes_compl = 0, pkts_compl = 0;
1957
	unsigned int entry, count = 0;
1958

1959
	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1960

1961 1962
	priv->xstats.tx_clean++;

1963
	entry = tx_q->dirty_tx;
1964
	while ((entry != tx_q->cur_tx) && (count < budget)) {
1965
		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1966
		struct dma_desc *p;
1967
		int status;
1968 1969

		if (priv->extend_desc)
1970
			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1971 1972
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			p = &tx_q->dma_entx[entry].basic;
1973
		else
1974
			p = tx_q->dma_tx + entry;
1975

1976 1977
		status = stmmac_tx_status(priv, &priv->dev->stats,
				&priv->xstats, p, priv->ioaddr);
1978 1979 1980 1981
		/* Check if the descriptor is owned by the DMA */
		if (unlikely(status & tx_dma_own))
			break;

1982 1983
		count++;

1984 1985 1986 1987 1988
		/* Make sure descriptor fields are read after reading
		 * the own bit.
		 */
		dma_rmb();

1989 1990 1991 1992 1993 1994
		/* Just consider the last segment and ...*/
		if (likely(!(status & tx_not_ls))) {
			/* ... verify the status error condition */
			if (unlikely(status & tx_err)) {
				priv->dev->stats.tx_errors++;
			} else {
1995 1996
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
1997
			}
1998
			stmmac_get_tx_hwtstamp(priv, p, skb);
1999 2000
		}

2001 2002
		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
			if (tx_q->tx_skbuff_dma[entry].map_as_page)
G
Giuseppe CAVALLARO 已提交
2003
				dma_unmap_page(priv->device,
2004 2005
					       tx_q->tx_skbuff_dma[entry].buf,
					       tx_q->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
2006 2007 2008
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
2009 2010
						 tx_q->tx_skbuff_dma[entry].buf,
						 tx_q->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
2011
						 DMA_TO_DEVICE);
2012 2013 2014
			tx_q->tx_skbuff_dma[entry].buf = 0;
			tx_q->tx_skbuff_dma[entry].len = 0;
			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2015
		}
A
Alexandre TORGUE 已提交
2016

2017
		stmmac_clean_desc3(priv, tx_q, p);
A
Alexandre TORGUE 已提交
2018

2019 2020
		tx_q->tx_skbuff_dma[entry].last_segment = false;
		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2021 2022

		if (likely(skb != NULL)) {
B
Beniamino Galvani 已提交
2023 2024
			pkts_compl++;
			bytes_compl += skb->len;
2025
			dev_consume_skb_any(skb);
2026
			tx_q->tx_skbuff[entry] = NULL;
2027 2028
		}

2029
		stmmac_release_tx_desc(priv, p, priv->mode);
2030

2031
		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2032
	}
2033
	tx_q->dirty_tx = entry;
B
Beniamino Galvani 已提交
2034

2035 2036 2037 2038 2039
	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
				  pkts_compl, bytes_compl);

	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
								queue))) &&
2040
	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
B
Beniamino Galvani 已提交
2041

2042 2043
		netif_dbg(priv, tx_done, priv->dev,
			  "%s: restart transmit\n", __func__);
2044
		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2045
	}
2046 2047 2048

	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
		stmmac_enable_eee_mode(priv);
2049
		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2050
	}
2051

2052 2053
	/* We still have pending packets, let's call for a new scheduling */
	if (tx_q->dirty_tx != tx_q->cur_tx)
2054
		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2055

2056 2057 2058
	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));

	return count;
2059 2060 2061
}

/**
2062
 * stmmac_tx_err - to manage the tx error
2063
 * @priv: driver private structure
2064
 * @chan: channel index
2065
 * Description: it cleans the descriptors and restarts the transmission
2066
 * in case of transmission errors.
2067
 */
2068
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2069
{
2070 2071
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];

2072
	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2073

2074
	stmmac_stop_tx_dma(priv, chan);
2075
	dma_free_tx_skbufs(priv, chan);
2076
	stmmac_clear_tx_descriptors(priv, chan);
2077 2078
	tx_q->dirty_tx = 0;
	tx_q->cur_tx = 0;
2079
	tx_q->mss = 0;
2080
	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2081 2082
	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
			    tx_q->dma_tx_phy, chan);
2083
	stmmac_start_tx_dma(priv, chan);
2084 2085

	priv->dev->stats.tx_errors++;
2086
	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2087 2088
}

2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
/**
 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
 *  @priv: driver private structure
 *  @txmode: TX operating mode
 *  @rxmode: RX operating mode
 *  @chan: channel index
 *  Description: it is used for configuring of the DMA operation mode in
 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
 *  mode.
 */
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
					  u32 rxmode, u32 chan)
{
2102 2103
	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2104 2105
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2106
	int rxfifosz = priv->plat->rx_fifo_size;
2107
	int txfifosz = priv->plat->tx_fifo_size;
2108 2109 2110

	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;
2111 2112 2113 2114 2115 2116
	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	/* Adjust for real per queue fifo size */
	rxfifosz /= rx_channels_count;
	txfifosz /= tx_channels_count;
2117

2118 2119
	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2120 2121
}

2122 2123
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
2124
	int ret;
2125

2126 2127 2128
	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
	if (ret && (ret != -EINVAL)) {
2129
		stmmac_global_err(priv);
2130 2131 2132 2133
		return true;
	}

	return false;
2134 2135
}

2136 2137 2138 2139 2140
static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
{
	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
						 &priv->xstats, chan);
	struct stmmac_channel *ch = &priv->channel[chan];
2141
	unsigned long flags;
2142

2143
	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2144
		if (napi_schedule_prep(&ch->rx_napi)) {
2145 2146 2147
			spin_lock_irqsave(&ch->lock, flags);
			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
			spin_unlock_irqrestore(&ch->lock, flags);
2148 2149
			__napi_schedule_irqoff(&ch->rx_napi);
		}
2150 2151
	}

2152 2153 2154 2155 2156 2157 2158 2159
	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
		if (napi_schedule_prep(&ch->tx_napi)) {
			spin_lock_irqsave(&ch->lock, flags);
			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
			spin_unlock_irqrestore(&ch->lock, flags);
			__napi_schedule_irqoff(&ch->tx_napi);
		}
	}
2160 2161 2162 2163

	return status;
}

2164
/**
2165
 * stmmac_dma_interrupt - DMA ISR
2166 2167
 * @priv: driver private structure
 * Description: this is the DMA ISR. It is called by the main ISR.
2168 2169
 * It calls the dwmac dma routine and schedule poll method in case of some
 * work can be done.
2170
 */
2171 2172
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
2173
	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2174 2175 2176
	u32 rx_channel_count = priv->plat->rx_queues_to_use;
	u32 channels_to_check = tx_channel_count > rx_channel_count ?
				tx_channel_count : rx_channel_count;
2177
	u32 chan;
K
Kees Cook 已提交
2178 2179 2180 2181 2182
	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];

	/* Make sure we never check beyond our status buffer. */
	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
		channels_to_check = ARRAY_SIZE(status);
2183 2184

	for (chan = 0; chan < channels_to_check; chan++)
2185
		status[chan] = stmmac_napi_check(priv, chan);
2186

2187 2188
	for (chan = 0; chan < tx_channel_count; chan++) {
		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
			/* Try to bump up the dma threshold on this failure */
			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
			    (tc <= 256)) {
				tc += 64;
				if (priv->plat->force_thresh_dma_mode)
					stmmac_set_dma_operation_mode(priv,
								      tc,
								      tc,
								      chan);
				else
					stmmac_set_dma_operation_mode(priv,
								    tc,
								    SF_DMA_MODE,
								    chan);
				priv->xstats.threshold = tc;
			}
2205
		} else if (unlikely(status[chan] == tx_hard_error)) {
2206
			stmmac_tx_err(priv, chan);
2207
		}
2208
	}
2209 2210
}

2211 2212 2213 2214 2215
/**
 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
 * @priv: driver private structure
 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
 */
2216 2217 2218
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2219
			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2220

2221
	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
G
Giuseppe CAVALLARO 已提交
2222 2223

	if (priv->dma_cap.rmon) {
2224
		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
G
Giuseppe CAVALLARO 已提交
2225 2226
		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
	} else
2227
		netdev_info(priv->dev, "No MAC Management Counters available\n");
2228 2229
}

2230
/**
2231
 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2232
 * @priv: driver private structure
2233 2234 2235 2236 2237
 * Description:
 *  new GMAC chip generations have a new register to indicate the
 *  presence of the optional feature/functions.
 *  This can be also used to override the value passed through the
 *  platform and necessary for old MAC10/100 and GMAC chips.
2238 2239 2240
 */
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
2241
	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2242 2243
}

2244
/**
2245
 * stmmac_check_ether_addr - check if the MAC addr is valid
2246 2247 2248 2249 2250
 * @priv: driver private structure
 * Description:
 * it is to verify if the MAC address is valid, in case of failures it
 * generates a random MAC address
 */
2251 2252 2253
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2254
		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
G
Giuseppe CAVALLARO 已提交
2255
		if (!is_valid_ether_addr(priv->dev->dev_addr))
2256
			eth_hw_addr_random(priv->dev);
2257 2258
		dev_info(priv->device, "device MAC address %pM\n",
			 priv->dev->dev_addr);
2259 2260 2261
	}
}

2262
/**
2263
 * stmmac_init_dma_engine - DMA init.
2264 2265 2266 2267 2268 2269
 * @priv: driver private structure
 * Description:
 * It inits the DMA invoking the specific MAC/GMAC callback.
 * Some DMA parameters can be passed from the platform;
 * in case of these are not passed a default is kept for the MAC or GMAC.
 */
2270 2271
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
2272 2273
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2274
	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2275
	struct stmmac_rx_queue *rx_q;
2276
	struct stmmac_tx_queue *tx_q;
2277
	u32 chan = 0;
2278
	int atds = 0;
2279
	int ret = 0;
2280

2281 2282
	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
		dev_err(priv->device, "Invalid DMA configuration\n");
2283
		return -EINVAL;
2284 2285
	}

2286 2287 2288
	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
		atds = 1;

2289
	ret = stmmac_reset(priv, priv->ioaddr);
2290 2291 2292 2293 2294
	if (ret) {
		dev_err(priv->device, "Failed to reset the dma\n");
		return ret;
	}

2295 2296 2297 2298 2299 2300
	/* DMA Configuration */
	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);

	if (priv->plat->axi)
		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);

2301 2302 2303 2304
	/* DMA CSR Channel configuration */
	for (chan = 0; chan < dma_csr_ch; chan++)
		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);

2305 2306 2307
	/* DMA RX Channel Configuration */
	for (chan = 0; chan < rx_channels_count; chan++) {
		rx_q = &priv->rx_queue[chan];
2308

2309 2310
		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
				    rx_q->dma_rx_phy, chan);
2311

2312
		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2313 2314
				     (priv->dma_rx_size *
				      sizeof(struct dma_desc));
2315 2316 2317
		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
				       rx_q->rx_tail_addr, chan);
	}
2318

2319 2320 2321
	/* DMA TX Channel Configuration */
	for (chan = 0; chan < tx_channels_count; chan++) {
		tx_q = &priv->tx_queue[chan];
2322

2323 2324
		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
				    tx_q->dma_tx_phy, chan);
2325

2326
		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2327 2328 2329
		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
				       tx_q->tx_tail_addr, chan);
	}
2330

2331
	return ret;
2332 2333
}

2334 2335 2336 2337 2338 2339 2340
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
}

2341
/**
2342
 * stmmac_tx_timer - mitigation sw timer for tx.
2343
 * @t: data pointer
2344 2345 2346
 * Description:
 * This is the timer handler to directly invoke the stmmac_tx_clean.
 */
2347
static void stmmac_tx_timer(struct timer_list *t)
2348
{
2349 2350 2351 2352 2353
	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
	struct stmmac_priv *priv = tx_q->priv_data;
	struct stmmac_channel *ch;

	ch = &priv->channel[tx_q->queue_index];
2354

2355 2356 2357 2358 2359 2360
	if (likely(napi_schedule_prep(&ch->tx_napi))) {
		unsigned long flags;

		spin_lock_irqsave(&ch->lock, flags);
		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
		spin_unlock_irqrestore(&ch->lock, flags);
2361
		__napi_schedule(&ch->tx_napi);
2362
	}
2363 2364 2365
}

/**
2366
 * stmmac_init_coalesce - init mitigation options.
2367
 * @priv: driver private structure
2368
 * Description:
2369
 * This inits the coalesce parameters: i.e. timer rate,
2370 2371 2372
 * timer handler and default threshold used for enabling the
 * interrupt on completion bit.
 */
2373
static void stmmac_init_coalesce(struct stmmac_priv *priv)
2374
{
2375 2376 2377
	u32 tx_channel_count = priv->plat->tx_queues_to_use;
	u32 chan;

2378 2379
	priv->tx_coal_frames = STMMAC_TX_FRAMES;
	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2380
	priv->rx_coal_frames = STMMAC_RX_FRAMES;
2381 2382 2383 2384 2385 2386

	for (chan = 0; chan < tx_channel_count; chan++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];

		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
	}
2387 2388
}

2389 2390 2391 2392 2393 2394 2395
static void stmmac_set_rings_length(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan;

	/* set TX ring length */
2396 2397
	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_set_tx_ring_len(priv, priv->ioaddr,
2398
				       (priv->dma_tx_size - 1), chan);
2399 2400

	/* set RX ring length */
2401 2402
	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_set_rx_ring_len(priv, priv->ioaddr,
2403
				       (priv->dma_rx_size - 1), chan);
2404 2405
}

2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
/**
 *  stmmac_set_tx_queue_weight - Set TX queue weight
 *  @priv: driver private structure
 *  Description: It is used for setting TX queues weight
 */
static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 weight;
	u32 queue;

	for (queue = 0; queue < tx_queues_count; queue++) {
		weight = priv->plat->tx_queues_cfg[queue].weight;
2419
		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2420 2421 2422
	}
}

2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
/**
 *  stmmac_configure_cbs - Configure CBS in TX queue
 *  @priv: driver private structure
 *  Description: It is used for configuring CBS in AVB TX queues
 */
static void stmmac_configure_cbs(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 mode_to_use;
	u32 queue;

J
Joao Pinto 已提交
2434 2435
	/* queue 0 is reserved for legacy traffic */
	for (queue = 1; queue < tx_queues_count; queue++) {
2436 2437 2438 2439
		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
		if (mode_to_use == MTL_QUEUE_DCB)
			continue;

2440
		stmmac_config_cbs(priv, priv->hw,
2441 2442 2443 2444 2445 2446 2447 2448
				priv->plat->tx_queues_cfg[queue].send_slope,
				priv->plat->tx_queues_cfg[queue].idle_slope,
				priv->plat->tx_queues_cfg[queue].high_credit,
				priv->plat->tx_queues_cfg[queue].low_credit,
				queue);
	}
}

2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
/**
 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
 *  @priv: driver private structure
 *  Description: It is used for mapping RX queues to RX dma channels
 */
static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 chan;

	for (queue = 0; queue < rx_queues_count; queue++) {
		chan = priv->plat->rx_queues_cfg[queue].chan;
2462
		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2463 2464 2465
	}
}

2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
/**
 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
 *  @priv: driver private structure
 *  Description: It is used for configuring the RX Queue Priority
 */
static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 prio;

	for (queue = 0; queue < rx_queues_count; queue++) {
		if (!priv->plat->rx_queues_cfg[queue].use_prio)
			continue;

		prio = priv->plat->rx_queues_cfg[queue].prio;
2482
		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
	}
}

/**
 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
 *  @priv: driver private structure
 *  Description: It is used for configuring the TX Queue Priority
 */
static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 queue;
	u32 prio;

	for (queue = 0; queue < tx_queues_count; queue++) {
		if (!priv->plat->tx_queues_cfg[queue].use_prio)
			continue;

		prio = priv->plat->tx_queues_cfg[queue].prio;
2502
		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2503 2504 2505
	}
}

2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
/**
 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
 *  @priv: driver private structure
 *  Description: It is used for configuring the RX queue routing
 */
static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u8 packet;

	for (queue = 0; queue < rx_queues_count; queue++) {
		/* no specific packet type routing specified for the queue */
		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
			continue;

		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2523
		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2524 2525 2526
	}
}

2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
static void stmmac_mac_config_rss(struct stmmac_priv *priv)
{
	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
		priv->rss.enable = false;
		return;
	}

	if (priv->dev->features & NETIF_F_RXHASH)
		priv->rss.enable = true;
	else
		priv->rss.enable = false;

	stmmac_rss_configure(priv, priv->hw, &priv->rss,
			     priv->plat->rx_queues_to_use);
}

2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
/**
 *  stmmac_mtl_configuration - Configure MTL
 *  @priv: driver private structure
 *  Description: It is used for configurring MTL
 */
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 tx_queues_count = priv->plat->tx_queues_to_use;

2553
	if (tx_queues_count > 1)
2554 2555
		stmmac_set_tx_queue_weight(priv);

2556
	/* Configure MTL RX algorithms */
2557 2558 2559
	if (rx_queues_count > 1)
		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
				priv->plat->rx_sched_algorithm);
2560 2561

	/* Configure MTL TX algorithms */
2562 2563 2564
	if (tx_queues_count > 1)
		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
				priv->plat->tx_sched_algorithm);
2565

2566
	/* Configure CBS in AVB TX queues */
2567
	if (tx_queues_count > 1)
2568 2569
		stmmac_configure_cbs(priv);

2570
	/* Map RX MTL to DMA channels */
2571
	stmmac_rx_queue_dma_chan_map(priv);
2572

2573
	/* Enable MAC RX Queues */
2574
	stmmac_mac_enable_rx_queues(priv);
2575

2576
	/* Set RX priorities */
2577
	if (rx_queues_count > 1)
2578 2579 2580
		stmmac_mac_config_rx_queues_prio(priv);

	/* Set TX priorities */
2581
	if (tx_queues_count > 1)
2582
		stmmac_mac_config_tx_queues_prio(priv);
2583 2584

	/* Set RX routing */
2585
	if (rx_queues_count > 1)
2586
		stmmac_mac_config_rx_queues_routing(priv);
2587 2588 2589 2590

	/* Receive Side Scaling */
	if (rx_queues_count > 1)
		stmmac_mac_config_rss(priv);
2591 2592
}

2593 2594
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
{
2595
	if (priv->dma_cap.asp) {
2596
		netdev_info(priv->dev, "Enabling Safety Features\n");
2597
		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2598 2599 2600 2601 2602
	} else {
		netdev_info(priv->dev, "No Safety Features support found\n");
	}
}

2603
/**
2604
 * stmmac_hw_setup - setup mac in a usable state.
2605
 *  @dev : pointer to the device structure.
2606
 *  @init_ptp: initialize PTP if set
2607
 *  Description:
2608 2609 2610 2611
 *  this is the main function to setup the HW in a usable state because the
 *  dma engine is reset, the core registers are configured (e.g. AXI,
 *  Checksum features, timers). The DMA is ready to start receiving and
 *  transmitting.
2612 2613 2614 2615
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
2616
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2617 2618
{
	struct stmmac_priv *priv = netdev_priv(dev);
2619
	u32 rx_cnt = priv->plat->rx_queues_to_use;
2620 2621
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 chan;
2622 2623 2624 2625 2626
	int ret;

	/* DMA initialization and SW reset */
	ret = stmmac_init_dma_engine(priv);
	if (ret < 0) {
2627 2628
		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
			   __func__);
2629 2630 2631 2632
		return ret;
	}

	/* Copy the MAC addr into the HW  */
2633
	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2634

2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
	/* PS and related bits will be programmed according to the speed */
	if (priv->hw->pcs) {
		int speed = priv->plat->mac_port_sel_speed;

		if ((speed == SPEED_10) || (speed == SPEED_100) ||
		    (speed == SPEED_1000)) {
			priv->hw->ps = speed;
		} else {
			dev_warn(priv->device, "invalid port speed\n");
			priv->hw->ps = 0;
		}
	}

2648
	/* Initialize the MAC Core */
2649
	stmmac_core_init(priv, priv->hw, dev);
2650

2651
	/* Initialize MTL*/
2652
	stmmac_mtl_configuration(priv);
J
jpinto 已提交
2653

2654
	/* Initialize Safety Features */
2655
	stmmac_safety_feat_configuration(priv);
2656

2657
	ret = stmmac_rx_ipc(priv, priv->hw);
2658
	if (!ret) {
2659
		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2660
		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2661
		priv->hw->rx_csum = 0;
2662 2663
	}

2664
	/* Enable the MAC Rx/Tx */
2665
	stmmac_mac_set(priv, priv->ioaddr, true);
2666

2667 2668 2669
	/* Set the HW DMA mode and the COE */
	stmmac_dma_operation_mode(priv);

2670 2671
	stmmac_mmc_setup(priv);

2672
	if (init_ptp) {
2673 2674 2675 2676
		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
		if (ret < 0)
			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);

2677
		ret = stmmac_init_ptp(priv);
2678 2679 2680 2681
		if (ret == -EOPNOTSUPP)
			netdev_warn(priv->dev, "PTP not supported by HW\n");
		else if (ret)
			netdev_warn(priv->dev, "PTP init failed\n");
2682
	}
2683

2684 2685 2686 2687 2688
	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;

	/* Convert the timer from msec to usec */
	if (!priv->tx_lpi_timer)
		priv->tx_lpi_timer = eee_timer * 1000;
2689

2690
	if (priv->use_riwt) {
2691 2692 2693 2694
		if (!priv->rx_riwt)
			priv->rx_riwt = DEF_DMA_RIWT;

		ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2695 2696
	}

2697
	if (priv->hw->pcs)
2698
		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2699

2700 2701 2702
	/* set TX and RX rings length */
	stmmac_set_rings_length(priv);

A
Alexandre TORGUE 已提交
2703
	/* Enable TSO */
2704 2705
	if (priv->tso) {
		for (chan = 0; chan < tx_cnt; chan++)
2706
			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2707
	}
A
Alexandre TORGUE 已提交
2708

2709 2710 2711 2712 2713 2714
	/* Enable Split Header */
	if (priv->sph && priv->hw->rx_csum) {
		for (chan = 0; chan < rx_cnt; chan++)
			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
	}

2715 2716 2717 2718
	/* VLAN Tag Insertion */
	if (priv->dma_cap.vlins)
		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);

2719 2720 2721 2722 2723 2724 2725 2726
	/* TBS */
	for (chan = 0; chan < tx_cnt; chan++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;

		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
	}

2727 2728 2729 2730
	/* Configure real RX and TX queues */
	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);

2731 2732 2733
	/* Start the ball rolling... */
	stmmac_start_all_dma(priv);

2734 2735 2736
	return 0;
}

2737 2738 2739 2740 2741 2742 2743
static void stmmac_hw_teardown(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	clk_disable_unprepare(priv->plat->clk_ptp_ref);
}

2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
/**
 *  stmmac_open - open entry point of the driver
 *  @dev : pointer to the device structure.
 *  Description:
 *  This function is the open entry point of the driver.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_open(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2756
	int bfsize = 0;
2757
	u32 chan;
2758 2759
	int ret;

2760
	if (priv->hw->pcs != STMMAC_PCS_TBI &&
2761 2762
	    priv->hw->pcs != STMMAC_PCS_RTBI &&
	    priv->hw->xpcs == NULL) {
2763 2764
		ret = stmmac_init_phy(dev);
		if (ret) {
2765 2766 2767
			netdev_err(priv->dev,
				   "%s: Cannot attach to PHY (error: %d)\n",
				   __func__, ret);
2768
			return ret;
2769
		}
2770
	}
2771

2772 2773 2774 2775
	/* Extra statistics */
	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
	priv->xstats.threshold = tc;

2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
	if (bfsize < 0)
		bfsize = 0;

	if (bfsize < BUF_SIZE_16KiB)
		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);

	priv->dma_buf_sz = bfsize;
	buf_sz = bfsize;

2786
	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2787

2788 2789 2790 2791 2792
	if (!priv->dma_tx_size)
		priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
	if (!priv->dma_rx_size)
		priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;

2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
	/* Earlier check for TBS */
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;

		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
		if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
			tx_q->tbs &= ~STMMAC_TBS_AVAIL;
	}

2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
	ret = alloc_dma_desc_resources(priv);
	if (ret < 0) {
		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
			   __func__);
		goto dma_desc_error;
	}

	ret = init_dma_desc_rings(dev, GFP_KERNEL);
	if (ret < 0) {
		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
			   __func__);
		goto init_error;
	}

2817
	ret = stmmac_hw_setup(dev, true);
2818
	if (ret < 0) {
2819
		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2820
		goto init_error;
2821 2822
	}

2823
	stmmac_init_coalesce(priv);
2824

2825
	phylink_start(priv->phylink);
2826 2827
	/* We may have called phylink_speed_down before */
	phylink_speed_up(priv->phylink);
2828

2829 2830
	/* Request the IRQ lines */
	ret = request_irq(dev->irq, stmmac_interrupt,
G
Giuseppe CAVALLARO 已提交
2831
			  IRQF_SHARED, dev->name, dev);
2832
	if (unlikely(ret < 0)) {
2833 2834 2835
		netdev_err(priv->dev,
			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
			   __func__, dev->irq, ret);
2836
		goto irq_error;
2837 2838
	}

2839 2840 2841 2842 2843
	/* Request the Wake IRQ in case of another line is used for WoL */
	if (priv->wol_irq != dev->irq) {
		ret = request_irq(priv->wol_irq, stmmac_interrupt,
				  IRQF_SHARED, dev->name, dev);
		if (unlikely(ret < 0)) {
2844 2845 2846
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
				   __func__, priv->wol_irq, ret);
2847
			goto wolirq_error;
2848 2849 2850
		}
	}

2851
	/* Request the IRQ lines */
2852
	if (priv->lpi_irq > 0) {
2853 2854 2855
		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
				  dev->name, dev);
		if (unlikely(ret < 0)) {
2856 2857 2858
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
				   __func__, priv->lpi_irq, ret);
2859
			goto lpiirq_error;
2860 2861 2862
		}
	}

2863
	stmmac_enable_all_queues(priv);
2864
	netif_tx_start_all_queues(priv->dev);
2865

2866
	return 0;
2867

2868
lpiirq_error:
2869 2870
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2871
wolirq_error:
2872
	free_irq(dev->irq, dev);
2873
irq_error:
2874
	phylink_stop(priv->phylink);
2875

2876 2877 2878
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);

2879
	stmmac_hw_teardown(dev);
2880 2881
init_error:
	free_dma_desc_resources(priv);
2882
dma_desc_error:
2883
	phylink_disconnect_phy(priv->phylink);
2884
	return ret;
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
}

/**
 *  stmmac_release - close entry point of the driver
 *  @dev : device pointer.
 *  Description:
 *  This is the stop entry point of the driver.
 */
static int stmmac_release(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2896
	u32 chan;
2897

2898 2899 2900
	if (priv->eee_enabled)
		del_timer_sync(&priv->eee_ctrl_timer);

2901 2902
	if (device_may_wakeup(priv->device))
		phylink_speed_down(priv->phylink, false);
2903
	/* Stop and disconnect the PHY */
2904 2905
	phylink_stop(priv->phylink);
	phylink_disconnect_phy(priv->phylink);
2906

2907
	stmmac_disable_all_queues(priv);
2908

2909 2910
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);
2911

2912 2913
	/* Free the IRQ lines */
	free_irq(dev->irq, dev);
2914 2915
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2916
	if (priv->lpi_irq > 0)
2917
		free_irq(priv->lpi_irq, dev);
2918 2919

	/* Stop TX/RX DMA and clear the descriptors */
2920
	stmmac_stop_all_dma(priv);
2921 2922 2923 2924

	/* Release and free the Rx/Tx resources */
	free_dma_desc_resources(priv);

2925
	/* Disable the MAC Rx/Tx */
2926
	stmmac_mac_set(priv, priv->ioaddr, false);
2927 2928 2929

	netif_carrier_off(dev);

2930 2931
	stmmac_release_ptp(priv);

2932 2933 2934
	return 0;
}

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
			       struct stmmac_tx_queue *tx_q)
{
	u16 tag = 0x0, inner_tag = 0x0;
	u32 inner_type = 0x0;
	struct dma_desc *p;

	if (!priv->dma_cap.vlins)
		return false;
	if (!skb_vlan_tag_present(skb))
		return false;
	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
		inner_tag = skb_vlan_tag_get(skb);
		inner_type = STMMAC_VLAN_INSERT;
	}

	tag = skb_vlan_tag_get(skb);

2953 2954 2955 2956 2957
	if (tx_q->tbs & STMMAC_TBS_AVAIL)
		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
	else
		p = &tx_q->dma_tx[tx_q->cur_tx];

2958 2959 2960 2961
	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
		return false;

	stmmac_set_tx_owner(priv, p);
2962
	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2963 2964 2965
	return true;
}

A
Alexandre TORGUE 已提交
2966 2967 2968 2969 2970
/**
 *  stmmac_tso_allocator - close entry point of the driver
 *  @priv: driver private structure
 *  @des: buffer start address
 *  @total_len: total length to fill in descriptors
2971
 *  @last_segment: condition for the last descriptor
2972
 *  @queue: TX queue index
A
Alexandre TORGUE 已提交
2973 2974 2975 2976
 *  Description:
 *  This function fills descriptor and request new descriptors according to
 *  buffer length to fill
 */
2977
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2978
				 int total_len, bool last_segment, u32 queue)
A
Alexandre TORGUE 已提交
2979
{
2980
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
A
Alexandre TORGUE 已提交
2981
	struct dma_desc *desc;
2982
	u32 buff_size;
2983
	int tmp_len;
A
Alexandre TORGUE 已提交
2984 2985 2986 2987

	tmp_len = total_len;

	while (tmp_len > 0) {
2988 2989
		dma_addr_t curr_addr;

2990 2991
		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
						priv->dma_tx_size);
2992
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2993 2994 2995 2996 2997

		if (tx_q->tbs & STMMAC_TBS_AVAIL)
			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
		else
			desc = &tx_q->dma_tx[tx_q->cur_tx];
A
Alexandre TORGUE 已提交
2998

2999 3000 3001 3002 3003 3004
		curr_addr = des + (total_len - tmp_len);
		if (priv->dma_cap.addr64 <= 32)
			desc->des0 = cpu_to_le32(curr_addr);
		else
			stmmac_set_desc_addr(priv, desc, curr_addr);

A
Alexandre TORGUE 已提交
3005 3006 3007
		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
			    TSO_MAX_BUFF_SIZE : tmp_len;

3008 3009 3010 3011
		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
				0, 1,
				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
				0, 0);
A
Alexandre TORGUE 已提交
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045

		tmp_len -= TSO_MAX_BUFF_SIZE;
	}
}

/**
 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
 *  @skb : the socket buffer
 *  @dev : device pointer
 *  Description: this is the transmit function that is called on TSO frames
 *  (support available on GMAC4 and newer chips).
 *  Diagram below show the ring programming in case of TSO frames:
 *
 *  First Descriptor
 *   --------
 *   | DES0 |---> buffer1 = L2/L3/L4 header
 *   | DES1 |---> TCP Payload (can continue on next descr...)
 *   | DES2 |---> buffer 1 and 2 len
 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
 *   --------
 *	|
 *     ...
 *	|
 *   --------
 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
 *   | DES1 | --|
 *   | DES2 | --> buffer 1 and 2 len
 *   | DES3 |
 *   --------
 *
 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
 */
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
3046
	struct dma_desc *desc, *first, *mss_desc = NULL;
A
Alexandre TORGUE 已提交
3047
	struct stmmac_priv *priv = netdev_priv(dev);
3048
	int desc_size, tmp_pay_len = 0, first_tx;
A
Alexandre TORGUE 已提交
3049
	int nfrags = skb_shinfo(skb)->nr_frags;
3050
	u32 queue = skb_get_queue_mapping(skb);
J
Jose Abreu 已提交
3051
	unsigned int first_entry, tx_packets;
3052
	struct stmmac_tx_queue *tx_q;
J
Jose Abreu 已提交
3053
	bool has_vlan, set_ic;
3054
	u8 proto_hdr_len, hdr;
3055
	u32 pay_len, mss;
3056
	dma_addr_t des;
A
Alexandre TORGUE 已提交
3057 3058
	int i;

3059
	tx_q = &priv->tx_queue[queue];
J
Jose Abreu 已提交
3060
	first_tx = tx_q->cur_tx;
3061

A
Alexandre TORGUE 已提交
3062
	/* Compute header lengths */
3063 3064 3065 3066 3067 3068 3069
	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
		hdr = sizeof(struct udphdr);
	} else {
		proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
		hdr = tcp_hdrlen(skb);
	}
A
Alexandre TORGUE 已提交
3070 3071

	/* Desc availability based on threshold should be enough safe */
3072
	if (unlikely(stmmac_tx_avail(priv, queue) <
A
Alexandre TORGUE 已提交
3073
		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3074 3075 3076
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
								queue));
A
Alexandre TORGUE 已提交
3077
			/* This is a hard error, log it. */
3078 3079 3080
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
A
Alexandre TORGUE 已提交
3081 3082 3083 3084 3085 3086 3087 3088 3089
		}
		return NETDEV_TX_BUSY;
	}

	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */

	mss = skb_shinfo(skb)->gso_size;

	/* set new MSS value if needed */
3090
	if (mss != tx_q->mss) {
3091 3092 3093 3094 3095
		if (tx_q->tbs & STMMAC_TBS_AVAIL)
			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
		else
			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];

3096
		stmmac_set_mss(priv, mss_desc, mss);
3097
		tx_q->mss = mss;
3098 3099
		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
						priv->dma_tx_size);
3100
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
A
Alexandre TORGUE 已提交
3101 3102 3103
	}

	if (netif_msg_tx_queued(priv)) {
3104 3105
		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
			__func__, hdr, proto_hdr_len, pay_len, mss);
A
Alexandre TORGUE 已提交
3106 3107 3108 3109
		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
			skb->data_len);
	}

3110 3111 3112
	/* Check if VLAN can be inserted by HW */
	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);

3113
	first_entry = tx_q->cur_tx;
3114
	WARN_ON(tx_q->tx_skbuff[first_entry]);
A
Alexandre TORGUE 已提交
3115

3116 3117 3118 3119
	if (tx_q->tbs & STMMAC_TBS_AVAIL)
		desc = &tx_q->dma_entx[first_entry].basic;
	else
		desc = &tx_q->dma_tx[first_entry];
A
Alexandre TORGUE 已提交
3120 3121
	first = desc;

3122 3123 3124
	if (has_vlan)
		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);

A
Alexandre TORGUE 已提交
3125 3126 3127 3128 3129 3130
	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, des))
		goto dma_map_err;

3131 3132
	tx_q->tx_skbuff_dma[first_entry].buf = des;
	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
A
Alexandre TORGUE 已提交
3133

3134 3135
	if (priv->dma_cap.addr64 <= 32) {
		first->des0 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
3136

3137 3138 3139
		/* Fill start of payload in buff2 of first descriptor */
		if (pay_len)
			first->des1 = cpu_to_le32(des + proto_hdr_len);
A
Alexandre TORGUE 已提交
3140

3141 3142 3143 3144 3145
		/* If needed take extra descriptors to fill the remaining payload */
		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
	} else {
		stmmac_set_desc_addr(priv, first, des);
		tmp_pay_len = pay_len;
3146
		des += proto_hdr_len;
3147
		pay_len = 0;
3148
	}
A
Alexandre TORGUE 已提交
3149

3150
	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
A
Alexandre TORGUE 已提交
3151 3152 3153 3154 3155 3156 3157 3158

	/* Prepare fragments */
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(priv->device, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
3159 3160
		if (dma_mapping_error(priv->device, des))
			goto dma_map_err;
A
Alexandre TORGUE 已提交
3161 3162

		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3163
				     (i == nfrags - 1), queue);
A
Alexandre TORGUE 已提交
3164

3165 3166 3167
		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
A
Alexandre TORGUE 已提交
3168 3169
	}

3170
	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
A
Alexandre TORGUE 已提交
3171

3172 3173 3174
	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[tx_q->cur_tx] = skb;

3175
	/* Manage tx mitigation */
J
Jose Abreu 已提交
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
	tx_packets = (tx_q->cur_tx + 1) - first_tx;
	tx_q->tx_count_frames += tx_packets;

	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
		set_ic = true;
	else if (!priv->tx_coal_frames)
		set_ic = false;
	else if (tx_packets > priv->tx_coal_frames)
		set_ic = true;
	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
		set_ic = true;
	else
		set_ic = false;

	if (set_ic) {
3191 3192 3193 3194 3195
		if (tx_q->tbs & STMMAC_TBS_AVAIL)
			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
		else
			desc = &tx_q->dma_tx[tx_q->cur_tx];

3196 3197 3198 3199 3200
		tx_q->tx_count_frames = 0;
		stmmac_set_tx_ic(priv, desc);
		priv->xstats.tx_set_ic_bit++;
	}

3201 3202 3203 3204 3205
	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
3206
	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
A
Alexandre TORGUE 已提交
3207

3208
	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3209 3210
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
3211
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
A
Alexandre TORGUE 已提交
3212 3213 3214 3215 3216 3217
	}

	dev->stats.tx_bytes += skb->len;
	priv->xstats.tx_tso_frames++;
	priv->xstats.tx_tso_nfrags += nfrags;

3218 3219 3220
	if (priv->sarc_type)
		stmmac_set_desc_sarc(priv, first, priv->sarc_type);

3221
	skb_tx_timestamp(skb);
A
Alexandre TORGUE 已提交
3222 3223 3224 3225 3226

	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		     priv->hwts_tx_en)) {
		/* declare that device is doing timestamping */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3227
		stmmac_enable_tx_timestamp(priv, first);
A
Alexandre TORGUE 已提交
3228 3229 3230
	}

	/* Complete the first descriptor before granting the DMA */
3231
	stmmac_prepare_tso_tx_desc(priv, first, 1,
A
Alexandre TORGUE 已提交
3232 3233
			proto_hdr_len,
			pay_len,
3234
			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3235
			hdr / 4, (skb->len - proto_hdr_len));
A
Alexandre TORGUE 已提交
3236 3237

	/* If context desc is used to change MSS */
3238 3239 3240 3241 3242 3243 3244
	if (mss_desc) {
		/* Make sure that first descriptor has been completely
		 * written, including its own bit. This is because MSS is
		 * actually before first descriptor, so we need to make
		 * sure that MSS's own bit is the last thing written.
		 */
		dma_wmb();
3245
		stmmac_set_tx_owner(priv, mss_desc);
3246
	}
A
Alexandre TORGUE 已提交
3247 3248 3249 3250 3251

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
3252
	wmb();
A
Alexandre TORGUE 已提交
3253 3254 3255

	if (netif_msg_pktdata(priv)) {
		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3256 3257
			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
			tx_q->cur_tx, first, nfrags);
A
Alexandre TORGUE 已提交
3258 3259 3260 3261
		pr_info(">>> frame to be transmitted: ");
		print_pkt(skb->data, skb_headlen(skb));
	}

3262
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
A
Alexandre TORGUE 已提交
3263

3264 3265 3266 3267 3268 3269
	if (tx_q->tbs & STMMAC_TBS_AVAIL)
		desc_size = sizeof(struct dma_edesc);
	else
		desc_size = sizeof(struct dma_desc);

	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3270
	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3271
	stmmac_tx_timer_arm(priv, queue);
A
Alexandre TORGUE 已提交
3272 3273 3274 3275 3276 3277 3278 3279 3280 3281

	return NETDEV_TX_OK;

dma_map_err:
	dev_err(priv->device, "Tx dma map failed\n");
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}

3282
/**
3283
 *  stmmac_xmit - Tx entry point of the driver
3284 3285
 *  @skb : the socket buffer
 *  @dev : device pointer
3286 3287 3288
 *  Description : this is the tx entry point of the driver.
 *  It programs the chain or the ring and supports oversized frames
 *  and SG feature.
3289 3290 3291
 */
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
J
Jose Abreu 已提交
3292
	unsigned int first_entry, tx_packets, enh_desc;
3293
	struct stmmac_priv *priv = netdev_priv(dev);
3294
	unsigned int nopaged_len = skb_headlen(skb);
3295
	int i, csum_insertion = 0, is_jumbo = 0;
3296
	u32 queue = skb_get_queue_mapping(skb);
3297
	int nfrags = skb_shinfo(skb)->nr_frags;
3298
	int gso = skb_shinfo(skb)->gso_type;
3299 3300
	struct dma_edesc *tbs_desc = NULL;
	int entry, desc_size, first_tx;
3301
	struct dma_desc *desc, *first;
3302
	struct stmmac_tx_queue *tx_q;
J
Jose Abreu 已提交
3303
	bool has_vlan, set_ic;
3304
	dma_addr_t des;
A
Alexandre TORGUE 已提交
3305

3306
	tx_q = &priv->tx_queue[queue];
J
Jose Abreu 已提交
3307
	first_tx = tx_q->cur_tx;
3308

3309 3310 3311
	if (priv->tx_path_in_lpi_mode)
		stmmac_disable_eee_mode(priv);

A
Alexandre TORGUE 已提交
3312 3313
	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb) && priv->tso) {
3314 3315 3316
		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
			return stmmac_tso_xmit(skb, dev);
		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
A
Alexandre TORGUE 已提交
3317 3318
			return stmmac_tso_xmit(skb, dev);
	}
3319

3320
	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3321 3322 3323
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
								queue));
3324
			/* This is a hard error, log it. */
3325 3326 3327
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
3328 3329 3330 3331
		}
		return NETDEV_TX_BUSY;
	}

3332 3333 3334
	/* Check if VLAN can be inserted by HW */
	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);

3335
	entry = tx_q->cur_tx;
3336
	first_entry = entry;
3337
	WARN_ON(tx_q->tx_skbuff[first_entry]);
3338

3339
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3340

3341
	if (likely(priv->extend_desc))
3342
		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3343 3344
	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
		desc = &tx_q->dma_entx[entry].basic;
3345
	else
3346
		desc = tx_q->dma_tx + entry;
3347

3348 3349
	first = desc;

3350 3351 3352
	if (has_vlan)
		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);

3353
	enh_desc = priv->plat->enh_desc;
3354
	/* To program the descriptors according to the size of the frame */
G
Giuseppe CAVALLARO 已提交
3355
	if (enh_desc)
3356
		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
G
Giuseppe CAVALLARO 已提交
3357

3358
	if (unlikely(is_jumbo)) {
3359
		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3360
		if (unlikely(entry < 0) && (entry != -EINVAL))
G
Giuseppe CAVALLARO 已提交
3361
			goto dma_map_err;
G
Giuseppe CAVALLARO 已提交
3362
	}
3363 3364

	for (i = 0; i < nfrags; i++) {
E
Eric Dumazet 已提交
3365 3366
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);
3367
		bool last_segment = (i == (nfrags - 1));
3368

3369
		entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3370
		WARN_ON(tx_q->tx_skbuff[entry]);
3371

3372
		if (likely(priv->extend_desc))
3373
			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3374 3375
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			desc = &tx_q->dma_entx[entry].basic;
3376
		else
3377
			desc = tx_q->dma_tx + entry;
3378

A
Alexandre TORGUE 已提交
3379 3380 3381
		des = skb_frag_dma_map(priv->device, frag, 0, len,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
G
Giuseppe CAVALLARO 已提交
3382 3383
			goto dma_map_err; /* should reuse desc w/o issues */

3384
		tx_q->tx_skbuff_dma[entry].buf = des;
3385 3386

		stmmac_set_desc_addr(priv, desc, des);
A
Alexandre TORGUE 已提交
3387

3388 3389 3390
		tx_q->tx_skbuff_dma[entry].map_as_page = true;
		tx_q->tx_skbuff_dma[entry].len = len;
		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3391 3392

		/* Prepare the descriptor and set the own bit too */
3393 3394
		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
				priv->mode, 1, last_segment, skb->len);
3395 3396
	}

3397 3398
	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[entry] = skb;
3399

3400 3401 3402 3403 3404
	/* According to the coalesce parameter the IC bit for the latest
	 * segment is reset and the timer re-started to clean the tx status.
	 * This approach takes care about the fragments: desc is the first
	 * element in case of no SG.
	 */
J
Jose Abreu 已提交
3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
	tx_packets = (entry + 1) - first_tx;
	tx_q->tx_count_frames += tx_packets;

	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
		set_ic = true;
	else if (!priv->tx_coal_frames)
		set_ic = false;
	else if (tx_packets > priv->tx_coal_frames)
		set_ic = true;
	else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
		set_ic = true;
	else
		set_ic = false;

	if (set_ic) {
3420 3421
		if (likely(priv->extend_desc))
			desc = &tx_q->dma_etx[entry].basic;
3422 3423
		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
			desc = &tx_q->dma_entx[entry].basic;
3424 3425 3426 3427 3428 3429 3430 3431
		else
			desc = &tx_q->dma_tx[entry];

		tx_q->tx_count_frames = 0;
		stmmac_set_tx_ic(priv, desc);
		priv->xstats.tx_set_ic_bit++;
	}

3432 3433 3434 3435 3436
	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
3437
	entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3438
	tx_q->cur_tx = entry;
3439 3440

	if (netif_msg_pktdata(priv)) {
3441 3442
		netdev_dbg(priv->dev,
			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3443
			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3444
			   entry, first, nfrags);
3445

3446
		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3447 3448
		print_pkt(skb->data, skb->len);
	}
3449

3450
	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3451 3452
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
3453
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3454 3455 3456 3457
	}

	dev->stats.tx_bytes += skb->len;

3458 3459 3460
	if (priv->sarc_type)
		stmmac_set_desc_sarc(priv, first, priv->sarc_type);

3461
	skb_tx_timestamp(skb);
3462

3463 3464 3465 3466 3467 3468 3469
	/* Ready to fill the first descriptor and set the OWN bit w/o any
	 * problems because all the descriptors are actually ready to be
	 * passed to the DMA engine.
	 */
	if (likely(!is_jumbo)) {
		bool last_segment = (nfrags == 0);

A
Alexandre TORGUE 已提交
3470 3471 3472
		des = dma_map_single(priv->device, skb->data,
				     nopaged_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
3473 3474
			goto dma_map_err;

3475
		tx_q->tx_skbuff_dma[first_entry].buf = des;
3476 3477

		stmmac_set_desc_addr(priv, first, des);
A
Alexandre TORGUE 已提交
3478

3479 3480
		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3481 3482 3483 3484 3485

		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
			     priv->hwts_tx_en)) {
			/* declare that device is doing timestamping */
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3486
			stmmac_enable_tx_timestamp(priv, first);
3487 3488 3489
		}

		/* Prepare the first descriptor setting the OWN bit too */
3490
		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3491
				csum_insertion, priv->mode, 0, last_segment,
3492
				skb->len);
3493 3494
	}

3495 3496 3497 3498 3499 3500 3501 3502 3503
	if (tx_q->tbs & STMMAC_TBS_EN) {
		struct timespec64 ts = ns_to_timespec64(skb->tstamp);

		tbs_desc = &tx_q->dma_entx[first_entry];
		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
	}

	stmmac_set_tx_owner(priv, first);

3504 3505 3506 3507 3508 3509
	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
	wmb();

3510
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
A
Alexandre TORGUE 已提交
3511

3512
	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3513

3514 3515 3516 3517 3518 3519 3520 3521
	if (likely(priv->extend_desc))
		desc_size = sizeof(struct dma_extended_desc);
	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
		desc_size = sizeof(struct dma_edesc);
	else
		desc_size = sizeof(struct dma_desc);

	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3522
	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3523
	stmmac_tx_timer_arm(priv, queue);
3524

G
Giuseppe CAVALLARO 已提交
3525
	return NETDEV_TX_OK;
3526

G
Giuseppe CAVALLARO 已提交
3527
dma_map_err:
3528
	netdev_err(priv->dev, "Tx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
3529 3530
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
3531 3532 3533
	return NETDEV_TX_OK;
}

3534 3535
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
3536 3537
	struct vlan_ethhdr *veth;
	__be16 vlan_proto;
3538 3539
	u16 vlanid;

3540 3541 3542 3543 3544 3545 3546
	veth = (struct vlan_ethhdr *)skb->data;
	vlan_proto = veth->h_vlan_proto;

	if ((vlan_proto == htons(ETH_P_8021Q) &&
	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
	    (vlan_proto == htons(ETH_P_8021AD) &&
	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3547
		/* pop the vlan tag */
3548 3549
		vlanid = ntohs(veth->h_vlan_TCI);
		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3550
		skb_pull(skb, VLAN_HLEN);
3551
		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3552 3553 3554
	}
}

3555
/**
3556
 * stmmac_rx_refill - refill used skb preallocated buffers
3557
 * @priv: driver private structure
3558
 * @queue: RX queue index
3559 3560 3561
 * Description : this is to reallocate the skb for the reception process
 * that is based on zero-copy.
 */
3562
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3563
{
3564
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3565
	int len, dirty = stmmac_rx_dirty(priv, queue);
3566 3567
	unsigned int entry = rx_q->dirty_rx;

3568 3569
	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;

3570
	while (dirty-- > 0) {
3571
		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3572
		struct dma_desc *p;
3573
		bool use_rx_wd;
3574 3575

		if (priv->extend_desc)
3576
			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3577
		else
3578
			p = rx_q->dma_rx + entry;
3579

3580 3581 3582
		if (!buf->page) {
			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
			if (!buf->page)
G
Giuseppe CAVALLARO 已提交
3583
				break;
3584
		}
3585

3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596
		if (priv->sph && !buf->sec_page) {
			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
			if (!buf->sec_page)
				break;

			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);

			dma_sync_single_for_device(priv->device, buf->sec_addr,
						   len, DMA_FROM_DEVICE);
		}

3597
		buf->addr = page_pool_get_dma_addr(buf->page);
3598 3599 3600 3601 3602 3603 3604

		/* Sync whole allocation to device. This will invalidate old
		 * data.
		 */
		dma_sync_single_for_device(priv->device, buf->addr, len,
					   DMA_FROM_DEVICE);

3605
		stmmac_set_desc_addr(priv, p, buf->addr);
3606
		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3607
		stmmac_refill_desc3(priv, rx_q, p);
A
Alexandre TORGUE 已提交
3608

3609
		rx_q->rx_count_frames++;
J
Jose Abreu 已提交
3610 3611 3612
		rx_q->rx_count_frames += priv->rx_coal_frames;
		if (rx_q->rx_count_frames > priv->rx_coal_frames)
			rx_q->rx_count_frames = 0;
3613 3614 3615 3616 3617

		use_rx_wd = !priv->rx_coal_frames;
		use_rx_wd |= rx_q->rx_count_frames > 0;
		if (!priv->use_riwt)
			use_rx_wd = false;
3618

P
Pavel Machek 已提交
3619
		dma_wmb();
3620
		stmmac_set_rx_owner(priv, p, use_rx_wd);
3621

3622
		entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
3623
	}
3624
	rx_q->dirty_rx = entry;
3625 3626
	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
			    (rx_q->dirty_rx * sizeof(struct dma_desc));
3627
	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3628 3629
}

J
Jose Abreu 已提交
3630 3631 3632 3633 3634
static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
				       struct dma_desc *p,
				       int status, unsigned int len)
{
	unsigned int plen = 0, hlen = 0;
3635
	int coe = priv->hw->rx_csum;
J
Jose Abreu 已提交
3636 3637 3638 3639 3640 3641

	/* Not first descriptor, buffer is always zero */
	if (priv->sph && len)
		return 0;

	/* First descriptor, get split header length */
3642
	stmmac_get_rx_header_len(priv, p, &hlen);
J
Jose Abreu 已提交
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
	if (priv->sph && hlen) {
		priv->xstats.rx_split_hdr_pkt_n++;
		return hlen;
	}

	/* First descriptor, not last descriptor and not split header */
	if (status & rx_not_ls)
		return priv->dma_buf_sz;

	plen = stmmac_get_rx_frame_len(priv, p, coe);

	/* First descriptor and last descriptor and not split header */
	return min_t(unsigned int, priv->dma_buf_sz, plen);
}

static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
				       struct dma_desc *p,
				       int status, unsigned int len)
{
	int coe = priv->hw->rx_csum;
	unsigned int plen = 0;

	/* Not split header, buffer is not available */
	if (!priv->sph)
		return 0;

	/* Not last descriptor */
	if (status & rx_not_ls)
		return priv->dma_buf_sz;

	plen = stmmac_get_rx_frame_len(priv, p, coe);

	/* Last descriptor */
	return plen - len;
}

3679
/**
3680
 * stmmac_rx - manage the receive process
3681
 * @priv: driver private structure
3682 3683
 * @limit: napi bugget
 * @queue: RX queue index.
3684 3685 3686
 * Description :  this the function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
3687
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3688
{
3689
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3690
	struct stmmac_channel *ch = &priv->channel[queue];
3691 3692
	unsigned int count = 0, error = 0, len = 0;
	int status = 0, coe = priv->hw->rx_csum;
3693
	unsigned int next_entry = rx_q->cur_rx;
3694
	struct sk_buff *skb = NULL;
3695

3696
	if (netif_msg_rx_status(priv)) {
3697 3698
		void *rx_head;

3699
		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3700
		if (priv->extend_desc)
3701
			rx_head = (void *)rx_q->dma_erx;
3702
		else
3703
			rx_head = (void *)rx_q->dma_rx;
3704

3705
		stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
3706
	}
3707
	while (count < limit) {
J
Jose Abreu 已提交
3708
		unsigned int buf1_len = 0, buf2_len = 0;
3709
		enum pkt_hash_types hash_type;
3710 3711
		struct stmmac_rx_buffer *buf;
		struct dma_desc *np, *p;
3712 3713
		int entry;
		u32 hash;
3714

3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
		if (!count && rx_q->state_saved) {
			skb = rx_q->state.skb;
			error = rx_q->state.error;
			len = rx_q->state.len;
		} else {
			rx_q->state_saved = false;
			skb = NULL;
			error = 0;
			len = 0;
		}

		if (count >= limit)
			break;

read_again:
J
Jose Abreu 已提交
3730 3731
		buf1_len = 0;
		buf2_len = 0;
3732
		entry = next_entry;
3733
		buf = &rx_q->buf_pool[entry];
3734

3735
		if (priv->extend_desc)
3736
			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3737
		else
3738
			p = rx_q->dma_rx + entry;
3739

3740
		/* read the status of the incoming frame */
3741 3742
		status = stmmac_rx_status(priv, &priv->dev->stats,
				&priv->xstats, p);
3743 3744
		/* check if managed by the DMA otherwise go ahead */
		if (unlikely(status & dma_own))
3745 3746
			break;

3747 3748
		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
						priv->dma_rx_size);
3749
		next_entry = rx_q->cur_rx;
3750

3751
		if (priv->extend_desc)
3752
			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3753
		else
3754
			np = rx_q->dma_rx + next_entry;
3755 3756

		prefetch(np);
3757

3758 3759 3760
		if (priv->extend_desc)
			stmmac_rx_extended_status(priv, &priv->dev->stats,
					&priv->xstats, rx_q->dma_erx + entry);
3761
		if (unlikely(status == discard_frame)) {
3762 3763
			page_pool_recycle_direct(rx_q->page_pool, buf->page);
			buf->page = NULL;
3764
			error = 1;
3765 3766
			if (!priv->hwts_rx_en)
				priv->dev->stats.rx_errors++;
3767 3768 3769 3770 3771
		}

		if (unlikely(error && (status & rx_not_ls)))
			goto read_again;
		if (unlikely(error)) {
3772
			dev_kfree_skb(skb);
J
Jose Abreu 已提交
3773
			skb = NULL;
3774
			count++;
3775 3776 3777 3778 3779
			continue;
		}

		/* Buffer is good. Go on. */

J
Jose Abreu 已提交
3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795
		prefetch(page_address(buf->page));
		if (buf->sec_page)
			prefetch(page_address(buf->sec_page));

		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
		len += buf1_len;
		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
		len += buf2_len;

		/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
		 * Type frames (LLC/LLC-SNAP)
		 *
		 * llc_snap is never checked in GMAC >= 4, so this ACS
		 * feature is always disabled and packets need to be
		 * stripped manually.
		 */
3796 3797 3798
		if (likely(!(status & rx_not_ls)) &&
		    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
		     unlikely(status != llc_snap))) {
J
Jose Abreu 已提交
3799 3800 3801 3802 3803 3804
			if (buf2_len)
				buf2_len -= ETH_FCS_LEN;
			else
				buf1_len -= ETH_FCS_LEN;

			len -= ETH_FCS_LEN;
3805
		}
3806

3807
		if (!skb) {
J
Jose Abreu 已提交
3808
			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3809
			if (!skb) {
3810
				priv->dev->stats.rx_dropped++;
3811
				count++;
J
Jose Abreu 已提交
3812
				goto drain_data;
3813 3814
			}

J
Jose Abreu 已提交
3815 3816
			dma_sync_single_for_cpu(priv->device, buf->addr,
						buf1_len, DMA_FROM_DEVICE);
3817
			skb_copy_to_linear_data(skb, page_address(buf->page),
J
Jose Abreu 已提交
3818 3819
						buf1_len);
			skb_put(skb, buf1_len);
3820

3821 3822 3823
			/* Data payload copied into SKB, page ready for recycle */
			page_pool_recycle_direct(rx_q->page_pool, buf->page);
			buf->page = NULL;
J
Jose Abreu 已提交
3824
		} else if (buf1_len) {
3825
			dma_sync_single_for_cpu(priv->device, buf->addr,
J
Jose Abreu 已提交
3826
						buf1_len, DMA_FROM_DEVICE);
3827
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
J
Jose Abreu 已提交
3828
					buf->page, 0, buf1_len,
3829
					priv->dma_buf_sz);
3830

3831 3832 3833 3834
			/* Data payload appended into SKB */
			page_pool_release_page(rx_q->page_pool, buf->page);
			buf->page = NULL;
		}
3835

J
Jose Abreu 已提交
3836
		if (buf2_len) {
3837
			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
J
Jose Abreu 已提交
3838
						buf2_len, DMA_FROM_DEVICE);
3839
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
J
Jose Abreu 已提交
3840
					buf->sec_page, 0, buf2_len,
3841 3842 3843 3844 3845 3846 3847
					priv->dma_buf_sz);

			/* Data payload appended into SKB */
			page_pool_release_page(rx_q->page_pool, buf->sec_page);
			buf->sec_page = NULL;
		}

J
Jose Abreu 已提交
3848
drain_data:
3849 3850
		if (likely(status & rx_not_ls))
			goto read_again;
J
Jose Abreu 已提交
3851 3852
		if (!skb)
			continue;
3853

3854
		/* Got entire packet into SKB. Finish it. */
3855

3856 3857 3858
		stmmac_get_rx_hwtstamp(priv, p, np, skb);
		stmmac_rx_vlan(priv->dev, skb);
		skb->protocol = eth_type_trans(skb, priv->dev);
3859

3860 3861 3862 3863
		if (unlikely(!coe))
			skb_checksum_none_assert(skb);
		else
			skb->ip_summed = CHECKSUM_UNNECESSARY;
3864

3865 3866 3867 3868 3869
		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
			skb_set_hash(skb, hash, hash_type);

		skb_record_rx_queue(skb, queue);
		napi_gro_receive(&ch->rx_napi, skb);
J
Jose Abreu 已提交
3870
		skb = NULL;
3871 3872 3873

		priv->dev->stats.rx_packets++;
		priv->dev->stats.rx_bytes += len;
3874
		count++;
3875 3876
	}

J
Jose Abreu 已提交
3877
	if (status & rx_not_ls || skb) {
3878 3879 3880 3881
		rx_q->state_saved = true;
		rx_q->state.skb = skb;
		rx_q->state.error = error;
		rx_q->state.len = len;
3882 3883
	}

3884
	stmmac_rx_refill(priv, queue);
3885 3886 3887 3888 3889 3890

	priv->xstats.rx_pkt_n += count;

	return count;
}

3891
static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3892
{
3893
	struct stmmac_channel *ch =
3894
		container_of(napi, struct stmmac_channel, rx_napi);
3895 3896
	struct stmmac_priv *priv = ch->priv_data;
	u32 chan = ch->index;
3897
	int work_done;
3898

3899
	priv->xstats.napi_poll++;
3900

3901
	work_done = stmmac_rx(priv, budget, chan);
3902 3903 3904 3905 3906 3907 3908 3909
	if (work_done < budget && napi_complete_done(napi, work_done)) {
		unsigned long flags;

		spin_lock_irqsave(&ch->lock, flags);
		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
		spin_unlock_irqrestore(&ch->lock, flags);
	}

3910 3911
	return work_done;
}
3912

3913 3914 3915 3916 3917 3918 3919
static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
{
	struct stmmac_channel *ch =
		container_of(napi, struct stmmac_channel, tx_napi);
	struct stmmac_priv *priv = ch->priv_data;
	u32 chan = ch->index;
	int work_done;
3920

3921 3922
	priv->xstats.napi_poll++;

3923
	work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
3924
	work_done = min(work_done, budget);
3925

3926 3927
	if (work_done < budget && napi_complete_done(napi, work_done)) {
		unsigned long flags;
3928

3929 3930 3931
		spin_lock_irqsave(&ch->lock, flags);
		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
		spin_unlock_irqrestore(&ch->lock, flags);
3932
	}
3933

3934 3935 3936 3937 3938 3939
	return work_done;
}

/**
 *  stmmac_tx_timeout
 *  @dev : Pointer to net device structure
3940
 *  @txqueue: the index of the hanging transmit queue
3941
 *  Description: this function is called when a packet transmission fails to
3942
 *   complete within a reasonable time. The driver will mark the error in the
3943 3944 3945
 *   netdev structure and arrange for the device to be reset to a sane state
 *   in order to transmit a new packet.
 */
3946
static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
3947 3948 3949
{
	struct stmmac_priv *priv = netdev_priv(dev);

3950
	stmmac_global_err(priv);
3951 3952 3953
}

/**
3954
 *  stmmac_set_rx_mode - entry point for multicast addressing
3955 3956 3957 3958 3959 3960 3961
 *  @dev : pointer to the device structure
 *  Description:
 *  This function is a driver entry point which gets called by the kernel
 *  whenever multicast addresses must be enabled/disabled.
 *  Return value:
 *  void.
 */
3962
static void stmmac_set_rx_mode(struct net_device *dev)
3963 3964 3965
{
	struct stmmac_priv *priv = netdev_priv(dev);

3966
	stmmac_set_filter(priv, priv->hw, dev);
3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981
}

/**
 *  stmmac_change_mtu - entry point to change MTU size for the device.
 *  @dev : device pointer.
 *  @new_mtu : the new MTU size for the device.
 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
3982
	struct stmmac_priv *priv = netdev_priv(dev);
3983 3984 3985 3986 3987 3988
	int txfifosz = priv->plat->tx_fifo_size;

	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	txfifosz /= priv->plat->tx_queues_to_use;
3989

3990
	if (netif_running(dev)) {
3991
		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3992 3993 3994
		return -EBUSY;
	}

3995 3996 3997 3998 3999 4000
	new_mtu = STMMAC_ALIGN(new_mtu);

	/* If condition true, FIFO is too small or MTU too large */
	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
		return -EINVAL;

4001
	dev->mtu = new_mtu;
A
Alexandre TORGUE 已提交
4002

4003 4004 4005 4006 4007
	netdev_update_features(dev);

	return 0;
}

4008
static netdev_features_t stmmac_fix_features(struct net_device *dev,
G
Giuseppe CAVALLARO 已提交
4009
					     netdev_features_t features)
4010 4011 4012
{
	struct stmmac_priv *priv = netdev_priv(dev);

4013
	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4014
		features &= ~NETIF_F_RXCSUM;
4015

4016
	if (!priv->plat->tx_coe)
4017
		features &= ~NETIF_F_CSUM_MASK;
4018

4019 4020 4021
	/* Some GMAC devices have a bugged Jumbo frame support that
	 * needs to have the Tx COE disabled for oversized frames
	 * (due to limited buffer sizes). In this case we disable
4022
	 * the TX csum insertion in the TDES and not use SF.
G
Giuseppe CAVALLARO 已提交
4023
	 */
4024
	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4025
		features &= ~NETIF_F_CSUM_MASK;
4026

A
Alexandre TORGUE 已提交
4027 4028 4029 4030 4031 4032 4033 4034
	/* Disable tso if asked by ethtool */
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		if (features & NETIF_F_TSO)
			priv->tso = true;
		else
			priv->tso = false;
	}

4035
	return features;
4036 4037
}

4038 4039 4040 4041
static int stmmac_set_features(struct net_device *netdev,
			       netdev_features_t features)
{
	struct stmmac_priv *priv = netdev_priv(netdev);
4042 4043
	bool sph_en;
	u32 chan;
4044 4045 4046 4047 4048 4049 4050 4051 4052

	/* Keep the COE Type in case of csum is supporting */
	if (features & NETIF_F_RXCSUM)
		priv->hw->rx_csum = priv->plat->rx_coe;
	else
		priv->hw->rx_csum = 0;
	/* No check needed because rx_coe has been set before and it will be
	 * fixed in case of issue.
	 */
4053
	stmmac_rx_ipc(priv, priv->hw);
4054

4055 4056 4057 4058
	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);

4059 4060 4061
	return 0;
}

4062 4063 4064
/**
 *  stmmac_interrupt - main ISR
 *  @irq: interrupt number.
4065
 *  @dev_id: to pass the net device pointer (must be valid).
4066
 *  Description: this is the main driver interrupt service routine.
4067 4068 4069 4070 4071
 *  It can call:
 *  o DMA service routine (to manage incoming frame reception and transmission
 *    status)
 *  o Core interrupts to manage: remote wake-up, management counter, LPI
 *    interrupts.
4072
 */
4073 4074 4075 4076
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct stmmac_priv *priv = netdev_priv(dev);
4077 4078 4079 4080
	u32 rx_cnt = priv->plat->rx_queues_to_use;
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 queues_count;
	u32 queue;
4081
	bool xmac;
4082

4083
	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4084
	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4085

4086 4087 4088
	if (priv->irq_wake)
		pm_wakeup_event(priv->device, 0);

4089 4090 4091
	/* Check if adapter is up */
	if (test_bit(STMMAC_DOWN, &priv->state))
		return IRQ_HANDLED;
4092 4093 4094
	/* Check if a fatal error happened */
	if (stmmac_safety_feat_interrupt(priv))
		return IRQ_HANDLED;
4095

4096
	/* To handle GMAC own interrupts */
4097
	if ((priv->plat->has_gmac) || xmac) {
4098
		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4099
		int mtl_status;
4100

4101 4102
		if (unlikely(status)) {
			/* For LPI we need to save the tx status */
4103
			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4104
				priv->tx_path_in_lpi_mode = true;
4105
			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4106
				priv->tx_path_in_lpi_mode = false;
4107 4108
		}

4109 4110
		for (queue = 0; queue < queues_count; queue++) {
			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4111

4112 4113 4114 4115
			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
								queue);
			if (mtl_status != -EINVAL)
				status |= mtl_status;
4116

4117 4118 4119 4120
			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
						       rx_q->rx_tail_addr,
						       queue);
4121
		}
4122 4123

		/* PCS link status */
4124
		if (priv->hw->pcs) {
4125 4126 4127 4128 4129
			if (priv->xstats.pcs_link)
				netif_carrier_on(dev);
			else
				netif_carrier_off(dev);
		}
4130
	}
4131

4132
	/* To handle DMA interrupts */
4133
	stmmac_dma_interrupt(priv);
4134 4135 4136 4137 4138 4139

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
G
Giuseppe CAVALLARO 已提交
4140 4141
 * to allow network I/O with interrupts disabled.
 */
4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156
static void stmmac_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	stmmac_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

/**
 *  stmmac_ioctl - Entry point for the Ioctl
 *  @dev: Device pointer.
 *  @rq: An IOCTL specefic structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  @cmd: IOCTL command
 *  Description:
4157
 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4158 4159 4160
 */
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
4161
	struct stmmac_priv *priv = netdev_priv (dev);
4162
	int ret = -EOPNOTSUPP;
4163 4164 4165 4166

	if (!netif_running(dev))
		return -EINVAL;

4167 4168 4169 4170
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
4171
		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4172 4173
		break;
	case SIOCSHWTSTAMP:
4174 4175 4176 4177
		ret = stmmac_hwtstamp_set(dev, rq);
		break;
	case SIOCGHWTSTAMP:
		ret = stmmac_hwtstamp_get(dev, rq);
4178 4179 4180 4181
		break;
	default:
		break;
	}
4182

4183 4184 4185
	return ret;
}

4186 4187 4188 4189 4190 4191
static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
				    void *cb_priv)
{
	struct stmmac_priv *priv = cb_priv;
	int ret = -EOPNOTSUPP;

4192 4193 4194
	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
		return ret;

4195 4196 4197 4198
	stmmac_disable_all_queues(priv);

	switch (type) {
	case TC_SETUP_CLSU32:
4199 4200 4201 4202
		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
		break;
	case TC_SETUP_CLSFLOWER:
		ret = stmmac_tc_setup_cls(priv, priv, type_data);
4203 4204 4205 4206 4207 4208 4209 4210 4211
		break;
	default:
		break;
	}

	stmmac_enable_all_queues(priv);
	return ret;
}

4212 4213
static LIST_HEAD(stmmac_block_cb_list);

4214 4215 4216 4217 4218 4219 4220
static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
			   void *type_data)
{
	struct stmmac_priv *priv = netdev_priv(ndev);

	switch (type) {
	case TC_SETUP_BLOCK:
4221 4222
		return flow_block_cb_setup_simple(type_data,
						  &stmmac_block_cb_list,
4223 4224
						  stmmac_setup_tc_block_cb,
						  priv, priv, true);
4225 4226
	case TC_SETUP_QDISC_CBS:
		return stmmac_tc_setup_cbs(priv, priv, type_data);
4227 4228
	case TC_SETUP_QDISC_TAPRIO:
		return stmmac_tc_setup_taprio(priv, priv, type_data);
4229 4230
	case TC_SETUP_QDISC_ETF:
		return stmmac_tc_setup_etf(priv, priv, type_data);
4231 4232 4233 4234 4235
	default:
		return -EOPNOTSUPP;
	}
}

4236 4237 4238
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
			       struct net_device *sb_dev)
{
4239 4240 4241
	int gso = skb_shinfo(skb)->gso_type;

	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4242
		/*
4243
		 * There is no way to determine the number of TSO/USO
4244
		 * capable Queues. Let's use always the Queue 0
4245
		 * because if TSO/USO is supported then at least this
4246 4247 4248 4249 4250 4251 4252 4253
		 * one will be capable.
		 */
		return 0;
	}

	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}

4254 4255 4256 4257 4258 4259 4260 4261 4262
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	int ret = 0;

	ret = eth_mac_addr(ndev, addr);
	if (ret)
		return ret;

4263
	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4264 4265 4266 4267

	return ret;
}

4268
#ifdef CONFIG_DEBUG_FS
4269 4270
static struct dentry *stmmac_fs_dir;

4271
static void sysfs_display_ring(void *head, int size, int extend_desc,
G
Giuseppe CAVALLARO 已提交
4272
			       struct seq_file *seq)
4273 4274
{
	int i;
G
Giuseppe CAVALLARO 已提交
4275 4276
	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
	struct dma_desc *p = (struct dma_desc *)head;
4277

4278 4279 4280
	for (i = 0; i < size; i++) {
		if (extend_desc) {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
4281
				   i, (unsigned int)virt_to_phys(ep),
4282 4283 4284 4285
				   le32_to_cpu(ep->basic.des0),
				   le32_to_cpu(ep->basic.des1),
				   le32_to_cpu(ep->basic.des2),
				   le32_to_cpu(ep->basic.des3));
4286 4287 4288
			ep++;
		} else {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4289
				   i, (unsigned int)virt_to_phys(p),
4290 4291
				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4292 4293
			p++;
		}
4294 4295
		seq_printf(seq, "\n");
	}
4296
}
4297

4298
static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4299 4300 4301
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);
4302
	u32 rx_count = priv->plat->rx_queues_to_use;
4303
	u32 tx_count = priv->plat->tx_queues_to_use;
4304 4305
	u32 queue;

4306 4307 4308
	if ((dev->flags & IFF_UP) == 0)
		return 0;

4309 4310 4311 4312 4313 4314 4315 4316
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		seq_printf(seq, "RX Queue %d:\n", queue);

		if (priv->extend_desc) {
			seq_printf(seq, "Extended descriptor ring:\n");
			sysfs_display_ring((void *)rx_q->dma_erx,
4317
					   priv->dma_rx_size, 1, seq);
4318 4319 4320
		} else {
			seq_printf(seq, "Descriptor ring:\n");
			sysfs_display_ring((void *)rx_q->dma_rx,
4321
					   priv->dma_rx_size, 0, seq);
4322 4323
		}
	}
4324

4325 4326 4327 4328 4329 4330 4331 4332
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		seq_printf(seq, "TX Queue %d:\n", queue);

		if (priv->extend_desc) {
			seq_printf(seq, "Extended descriptor ring:\n");
			sysfs_display_ring((void *)tx_q->dma_etx,
4333
					   priv->dma_tx_size, 1, seq);
4334
		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4335 4336
			seq_printf(seq, "Descriptor ring:\n");
			sysfs_display_ring((void *)tx_q->dma_tx,
4337
					   priv->dma_tx_size, 0, seq);
4338
		}
4339 4340 4341 4342
	}

	return 0;
}
4343
DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4344

4345
static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4346 4347 4348 4349
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);

4350
	if (!priv->hw_cap_support) {
4351 4352 4353 4354 4355 4356 4357 4358
		seq_printf(seq, "DMA HW features not supported\n");
		return 0;
	}

	seq_printf(seq, "==============================\n");
	seq_printf(seq, "\tDMA HW features\n");
	seq_printf(seq, "==============================\n");

4359
	seq_printf(seq, "\t10/100 Mbps: %s\n",
4360
		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4361
	seq_printf(seq, "\t1000 Mbps: %s\n",
4362
		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4363
	seq_printf(seq, "\tHalf duplex: %s\n",
4364 4365 4366 4367 4368
		   (priv->dma_cap.half_duplex) ? "Y" : "N");
	seq_printf(seq, "\tHash Filter: %s\n",
		   (priv->dma_cap.hash_filter) ? "Y" : "N");
	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
		   (priv->dma_cap.multi_addr) ? "Y" : "N");
4369
	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380
		   (priv->dma_cap.pcs) ? "Y" : "N");
	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
	seq_printf(seq, "\tPMT Remote wake up: %s\n",
		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
	seq_printf(seq, "\tPMT Magic Frame: %s\n",
		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
	seq_printf(seq, "\tRMON module: %s\n",
		   (priv->dma_cap.rmon) ? "Y" : "N");
	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4381
	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4382
		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4383
	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4384 4385 4386 4387
		   (priv->dma_cap.eee) ? "Y" : "N");
	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
		   (priv->dma_cap.tx_coe) ? "Y" : "N");
A
Alexandre TORGUE 已提交
4388 4389 4390 4391 4392 4393 4394 4395 4396
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
			   (priv->dma_cap.rx_coe) ? "Y" : "N");
	} else {
		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
	}
4397 4398 4399 4400 4401 4402
	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
		   priv->dma_cap.number_rx_channel);
	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
		   priv->dma_cap.number_tx_channel);
4403 4404 4405 4406
	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
		   priv->dma_cap.number_rx_queues);
	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
		   priv->dma_cap.number_tx_queues);
4407 4408
	seq_printf(seq, "\tEnhanced descriptors: %s\n",
		   (priv->dma_cap.enh_desc) ? "Y" : "N");
4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434
	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
	seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
		   priv->dma_cap.pps_out_num);
	seq_printf(seq, "\tSafety Features: %s\n",
		   priv->dma_cap.asp ? "Y" : "N");
	seq_printf(seq, "\tFlexible RX Parser: %s\n",
		   priv->dma_cap.frpsel ? "Y" : "N");
	seq_printf(seq, "\tEnhanced Addressing: %d\n",
		   priv->dma_cap.addr64);
	seq_printf(seq, "\tReceive Side Scaling: %s\n",
		   priv->dma_cap.rssen ? "Y" : "N");
	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
		   priv->dma_cap.vlhash ? "Y" : "N");
	seq_printf(seq, "\tSplit Header: %s\n",
		   priv->dma_cap.sphen ? "Y" : "N");
	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
		   priv->dma_cap.vlins ? "Y" : "N");
	seq_printf(seq, "\tDouble VLAN: %s\n",
		   priv->dma_cap.dvlan ? "Y" : "N");
	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
		   priv->dma_cap.l3l4fnum);
	seq_printf(seq, "\tARP Offloading: %s\n",
		   priv->dma_cap.arpoffsel ? "Y" : "N");
4435 4436 4437 4438 4439 4440
	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
		   priv->dma_cap.estsel ? "Y" : "N");
	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
		   priv->dma_cap.fpesel ? "Y" : "N");
	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
		   priv->dma_cap.tbssel ? "Y" : "N");
4441 4442
	return 0;
}
4443
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4444

4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
/* Use network device events to rename debugfs file entries.
 */
static int stmmac_device_event(struct notifier_block *unused,
			       unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct stmmac_priv *priv = netdev_priv(dev);

	if (dev->netdev_ops != &stmmac_netdev_ops)
		goto done;

	switch (event) {
	case NETDEV_CHANGENAME:
		if (priv->dbgfs_dir)
			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
							 priv->dbgfs_dir,
							 stmmac_fs_dir,
							 dev->name);
		break;
	}
done:
	return NOTIFY_DONE;
}

static struct notifier_block stmmac_notifier = {
	.notifier_call = stmmac_device_event,
};

4473
static void stmmac_init_fs(struct net_device *dev)
4474
{
4475 4476
	struct stmmac_priv *priv = netdev_priv(dev);

4477 4478
	rtnl_lock();

4479 4480
	/* Create per netdev entries */
	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4481 4482

	/* Entry to report DMA RX/TX rings */
4483 4484
	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
			    &stmmac_rings_status_fops);
4485

4486
	/* Entry to report the DMA HW features */
4487 4488
	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
			    &stmmac_dma_cap_fops);
4489

4490
	rtnl_unlock();
4491 4492
}

4493
static void stmmac_exit_fs(struct net_device *dev)
4494
{
4495 4496 4497
	struct stmmac_priv *priv = netdev_priv(dev);

	debugfs_remove_recursive(priv->dbgfs_dir);
4498
}
4499
#endif /* CONFIG_DEBUG_FS */
4500

4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527
static u32 stmmac_vid_crc32_le(__le16 vid_le)
{
	unsigned char *data = (unsigned char *)&vid_le;
	unsigned char data_byte = 0;
	u32 crc = ~0x0;
	u32 temp = 0;
	int i, bits;

	bits = get_bitmask_order(VLAN_VID_MASK);
	for (i = 0; i < bits; i++) {
		if ((i % 8) == 0)
			data_byte = data[i / 8];

		temp = ((crc & 1) ^ data_byte) & 1;
		crc >>= 1;
		data_byte >>= 1;

		if (temp)
			crc ^= 0xedb88320;
	}

	return crc;
}

static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
	u32 crc, hash = 0;
J
Jose Abreu 已提交
4528
	__le16 pmatch = 0;
4529 4530
	int count = 0;
	u16 vid = 0;
4531 4532 4533 4534 4535

	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
		__le16 vid_le = cpu_to_le16(vid);
		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
		hash |= (1 << crc);
4536 4537 4538 4539 4540 4541 4542
		count++;
	}

	if (!priv->dma_cap.vlhash) {
		if (count > 2) /* VID = 0 always passes filter */
			return -EOPNOTSUPP;

J
Jose Abreu 已提交
4543
		pmatch = cpu_to_le16(vid);
4544
		hash = 0;
4545 4546
	}

J
Jose Abreu 已提交
4547
	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565
}

static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	bool is_double = false;
	int ret;

	if (be16_to_cpu(proto) == ETH_P_8021AD)
		is_double = true;

	set_bit(vid, priv->active_vlans);
	ret = stmmac_vlan_update(priv, is_double);
	if (ret) {
		clear_bit(vid, priv->active_vlans);
		return ret;
	}

4566 4567 4568 4569 4570
	if (priv->hw->num_vlan) {
		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
		if (ret)
			return ret;
	}
4571

4572
	return 0;
4573 4574 4575 4576 4577 4578
}

static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	bool is_double = false;
4579
	int ret;
4580 4581 4582 4583 4584

	if (be16_to_cpu(proto) == ETH_P_8021AD)
		is_double = true;

	clear_bit(vid, priv->active_vlans);
4585 4586 4587 4588 4589 4590

	if (priv->hw->num_vlan) {
		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
		if (ret)
			return ret;
	}
4591

4592 4593 4594
	return stmmac_vlan_update(priv, is_double);
}

4595 4596 4597 4598 4599
static const struct net_device_ops stmmac_netdev_ops = {
	.ndo_open = stmmac_open,
	.ndo_start_xmit = stmmac_xmit,
	.ndo_stop = stmmac_release,
	.ndo_change_mtu = stmmac_change_mtu,
4600
	.ndo_fix_features = stmmac_fix_features,
4601
	.ndo_set_features = stmmac_set_features,
4602
	.ndo_set_rx_mode = stmmac_set_rx_mode,
4603 4604
	.ndo_tx_timeout = stmmac_tx_timeout,
	.ndo_do_ioctl = stmmac_ioctl,
4605
	.ndo_setup_tc = stmmac_setup_tc,
4606
	.ndo_select_queue = stmmac_select_queue,
4607 4608 4609
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = stmmac_poll_controller,
#endif
4610
	.ndo_set_mac_address = stmmac_set_mac_address,
4611 4612
	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4613 4614
};

4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630
static void stmmac_reset_subtask(struct stmmac_priv *priv)
{
	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
		return;
	if (test_bit(STMMAC_DOWN, &priv->state))
		return;

	netdev_err(priv->dev, "Reset adapter.\n");

	rtnl_lock();
	netif_trans_update(priv->dev);
	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
		usleep_range(1000, 2000);

	set_bit(STMMAC_DOWN, &priv->state);
	dev_close(priv->dev);
4631
	dev_open(priv->dev, NULL);
4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645
	clear_bit(STMMAC_DOWN, &priv->state);
	clear_bit(STMMAC_RESETING, &priv->state);
	rtnl_unlock();
}

static void stmmac_service_task(struct work_struct *work)
{
	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
			service_task);

	stmmac_reset_subtask(priv);
	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
}

4646 4647
/**
 *  stmmac_hw_init - Init the MAC device
4648
 *  @priv: driver private structure
4649 4650 4651 4652
 *  Description: this function is to configure the MAC device according to
 *  some platform parameters or the HW capability register. It prepares the
 *  driver to use either ring or chain modes and to setup either enhanced or
 *  normal descriptors.
4653 4654 4655
 */
static int stmmac_hw_init(struct stmmac_priv *priv)
{
4656
	int ret;
4657

4658 4659 4660
	/* dwmac-sun8i only work in chain mode */
	if (priv->plat->has_sun8i)
		chain_mode = 1;
4661
	priv->chain_mode = chain_mode;
4662

4663 4664 4665 4666
	/* Initialize HW Interface */
	ret = stmmac_hwif_init(priv);
	if (ret)
		return ret;
4667

4668 4669 4670
	/* Get the HW capability (new GMAC newer than 3.50a) */
	priv->hw_cap_support = stmmac_get_hw_features(priv);
	if (priv->hw_cap_support) {
4671
		dev_info(priv->device, "DMA HW capability register supported\n");
4672 4673 4674 4675 4676 4677 4678 4679

		/* We can override some gmac/dma configuration fields: e.g.
		 * enh_desc, tx_coe (e.g. that are passed through the
		 * platform) with the values from the HW capability
		 * register (if supported).
		 */
		priv->plat->enh_desc = priv->dma_cap.enh_desc;
		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4680
		priv->hw->pmt = priv->plat->pmt;
4681 4682 4683 4684 4685 4686
		if (priv->dma_cap.hash_tb_sz) {
			priv->hw->multicast_filter_bins =
					(BIT(priv->dma_cap.hash_tb_sz) << 5);
			priv->hw->mcast_bits_log2 =
					ilog2(priv->hw->multicast_filter_bins);
		}
4687

4688 4689 4690 4691 4692 4693
		/* TXCOE doesn't work in thresh DMA mode */
		if (priv->plat->force_thresh_dma_mode)
			priv->plat->tx_coe = 0;
		else
			priv->plat->tx_coe = priv->dma_cap.tx_coe;

A
Alexandre TORGUE 已提交
4694 4695
		/* In case of GMAC4 rx_coe is from HW cap register. */
		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4696 4697 4698 4699 4700 4701

		if (priv->dma_cap.rx_coe_type2)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
		else if (priv->dma_cap.rx_coe_type1)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;

4702 4703 4704
	} else {
		dev_info(priv->device, "No HW DMA feature register supported\n");
	}
4705

4706 4707
	if (priv->plat->rx_coe) {
		priv->hw->rx_csum = priv->plat->rx_coe;
4708
		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
A
Alexandre TORGUE 已提交
4709
		if (priv->synopsys_id < DWMAC_CORE_4_00)
4710
			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4711
	}
4712
	if (priv->plat->tx_coe)
4713
		dev_info(priv->device, "TX Checksum insertion supported\n");
4714 4715

	if (priv->plat->pmt) {
4716
		dev_info(priv->device, "Wake-Up On Lan supported\n");
4717 4718 4719
		device_set_wakeup_capable(priv->device, 1);
	}

A
Alexandre TORGUE 已提交
4720
	if (priv->dma_cap.tsoen)
4721
		dev_info(priv->device, "TSO supported\n");
A
Alexandre TORGUE 已提交
4722

4723 4724 4725
	priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;

4726 4727 4728 4729 4730 4731 4732
	/* Run HW quirks, if any */
	if (priv->hwif_quirks) {
		ret = priv->hwif_quirks(priv);
		if (ret)
			return ret;
	}

4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744
	/* Rx Watchdog is available in the COREs newer than the 3.40.
	 * In some case, for example on bugged HW this feature
	 * has to be disable and this can be done by passing the
	 * riwt_off field from the platform.
	 */
	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
		priv->use_riwt = 1;
		dev_info(priv->device,
			 "Enable RX Mitigation via HW Watchdog Timer\n");
	}

4745
	return 0;
4746 4747
}

4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759
static void stmmac_napi_add(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	u32 queue, maxq;

	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);

	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];

		ch->priv_data = priv;
		ch->index = queue;
4760
		spin_lock_init(&ch->lock);
4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811

		if (queue < priv->plat->rx_queues_to_use) {
			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
				       NAPI_POLL_WEIGHT);
		}
		if (queue < priv->plat->tx_queues_to_use) {
			netif_tx_napi_add(dev, &ch->tx_napi,
					  stmmac_napi_poll_tx,
					  NAPI_POLL_WEIGHT);
		}
	}
}

static void stmmac_napi_del(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	u32 queue, maxq;

	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);

	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];

		if (queue < priv->plat->rx_queues_to_use)
			netif_napi_del(&ch->rx_napi);
		if (queue < priv->plat->tx_queues_to_use)
			netif_napi_del(&ch->tx_napi);
	}
}

int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret = 0;

	if (netif_running(dev))
		stmmac_release(dev);

	stmmac_napi_del(dev);

	priv->plat->rx_queues_to_use = rx_cnt;
	priv->plat->tx_queues_to_use = tx_cnt;

	stmmac_napi_add(dev);

	if (netif_running(dev))
		ret = stmmac_open(dev);

	return ret;
}

4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret = 0;

	if (netif_running(dev))
		stmmac_release(dev);

	priv->dma_rx_size = rx_size;
	priv->dma_tx_size = tx_size;

	if (netif_running(dev))
		ret = stmmac_open(dev);

	return ret;
}

4829
/**
4830 4831
 * stmmac_dvr_probe
 * @device: device pointer
4832
 * @plat_dat: platform data pointer
4833
 * @res: stmmac resource pointer
4834 4835
 * Description: this is the main probe function used to
 * call the alloc_etherdev, allocate the priv structure.
4836
 * Return:
4837
 * returns 0 on success, otherwise errno.
4838
 */
4839 4840 4841
int stmmac_dvr_probe(struct device *device,
		     struct plat_stmmacenet_data *plat_dat,
		     struct stmmac_resources *res)
4842
{
4843 4844
	struct net_device *ndev = NULL;
	struct stmmac_priv *priv;
4845
	u32 rxq;
4846
	int i, ret = 0;
4847

4848 4849
	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4850
	if (!ndev)
4851
		return -ENOMEM;
4852 4853 4854 4855 4856 4857

	SET_NETDEV_DEV(ndev, device);

	priv = netdev_priv(ndev);
	priv->device = device;
	priv->dev = ndev;
4858

4859
	stmmac_set_ethtool_ops(ndev);
4860 4861
	priv->pause = pause;
	priv->plat = plat_dat;
4862 4863 4864 4865 4866 4867 4868
	priv->ioaddr = res->addr;
	priv->dev->base_addr = (unsigned long)res->addr;

	priv->dev->irq = res->irq;
	priv->wol_irq = res->wol_irq;
	priv->lpi_irq = res->lpi_irq;

4869
	if (!IS_ERR_OR_NULL(res->mac))
4870
		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4871

4872
	dev_set_drvdata(device, priv->dev);
4873

4874 4875
	/* Verify driver arguments */
	stmmac_verify_args();
4876

4877 4878 4879 4880
	/* Allocate workqueue */
	priv->wq = create_singlethread_workqueue("stmmac_wq");
	if (!priv->wq) {
		dev_err(priv->device, "failed to create workqueue\n");
4881
		return -ENOMEM;
4882 4883 4884 4885
	}

	INIT_WORK(&priv->service_task, stmmac_service_task);

4886
	/* Override with kernel parameters if supplied XXX CRS XXX
G
Giuseppe CAVALLARO 已提交
4887 4888
	 * this needs to have multiple instances
	 */
4889 4890 4891
	if ((phyaddr >= 0) && (phyaddr <= 31))
		priv->plat->phy_addr = phyaddr;

4892 4893
	if (priv->plat->stmmac_rst) {
		ret = reset_control_assert(priv->plat->stmmac_rst);
4894
		reset_control_deassert(priv->plat->stmmac_rst);
4895 4896 4897 4898 4899 4900
		/* Some reset controllers have only reset callback instead of
		 * assert + deassert callbacks pair.
		 */
		if (ret == -ENOTSUPP)
			reset_control_reset(priv->plat->stmmac_rst);
	}
4901

4902
	/* Init MAC and get the capabilities */
4903 4904
	ret = stmmac_hw_init(priv);
	if (ret)
4905
		goto error_hw_init;
4906

4907 4908
	stmmac_check_ether_addr(priv);

4909
	ndev->netdev_ops = &stmmac_netdev_ops;
4910

4911 4912
	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			    NETIF_F_RXCSUM;
A
Alexandre TORGUE 已提交
4913

4914 4915 4916 4917 4918
	ret = stmmac_tc_init(priv, priv);
	if (!ret) {
		ndev->hw_features |= NETIF_F_HW_TC;
	}

A
Alexandre TORGUE 已提交
4919
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
N
Niklas Cassel 已提交
4920
		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4921 4922
		if (priv->plat->has_gmac4)
			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
A
Alexandre TORGUE 已提交
4923
		priv->tso = true;
4924
		dev_info(priv->device, "TSO feature enabled\n");
A
Alexandre TORGUE 已提交
4925
	}
4926

4927 4928 4929 4930 4931 4932
	if (priv->dma_cap.sphen) {
		ndev->hw_features |= NETIF_F_GRO;
		priv->sph = true;
		dev_info(priv->device, "SPH feature enabled\n");
	}

4933 4934 4935 4936 4937 4938
	if (priv->dma_cap.addr64) {
		ret = dma_set_mask_and_coherent(device,
				DMA_BIT_MASK(priv->dma_cap.addr64));
		if (!ret) {
			dev_info(priv->device, "Using %d bits DMA width\n",
				 priv->dma_cap.addr64);
4939 4940 4941 4942 4943 4944 4945

			/*
			 * If more than 32 bits can be addressed, make sure to
			 * enable enhanced addressing mode.
			 */
			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
				priv->plat->dma_cfg->eame = true;
4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956
		} else {
			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
			if (ret) {
				dev_err(priv->device, "Failed to set DMA Mask\n");
				goto error_hw_init;
			}

			priv->dma_cap.addr64 = 32;
		}
	}

4957 4958
	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4959 4960
#ifdef STMMAC_VLAN_TAG_USED
	/* Both mac100 and gmac support receive VLAN tag detection */
4961
	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4962 4963 4964 4965
	if (priv->dma_cap.vlhash) {
		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
	}
4966 4967 4968 4969 4970
	if (priv->dma_cap.vlins) {
		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
		if (priv->dma_cap.dvlan)
			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
	}
4971 4972 4973
#endif
	priv->msg_enable = netif_msg_init(debug, default_msg_level);

4974 4975 4976 4977 4978 4979 4980 4981 4982
	/* Initialize RSS */
	rxq = priv->plat->rx_queues_to_use;
	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);

	if (priv->dma_cap.rssen && priv->plat->rss_en)
		ndev->features |= NETIF_F_RXHASH;

4983 4984
	/* MTU range: 46 - hw-specific max */
	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4985
	if (priv->plat->has_xgmac)
4986
		ndev->max_mtu = XGMAC_JUMBO_LEN;
4987 4988
	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
		ndev->max_mtu = JUMBO_LEN;
4989 4990
	else
		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4991 4992 4993 4994 4995
	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
	 */
	if ((priv->plat->maxmtu < ndev->max_mtu) &&
	    (priv->plat->maxmtu >= ndev->min_mtu))
4996
		ndev->max_mtu = priv->plat->maxmtu;
4997
	else if (priv->plat->maxmtu < ndev->min_mtu)
4998 4999 5000
		dev_warn(priv->device,
			 "%s: warning: maxmtu having invalid value (%d)\n",
			 __func__, priv->plat->maxmtu);
5001

5002 5003 5004
	if (flow_ctrl)
		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */

5005
	/* Setup channels NAPI */
5006
	stmmac_napi_add(ndev);
5007

5008
	mutex_init(&priv->lock);
5009

5010 5011 5012 5013 5014 5015
	/* If a specific clk_csr value is passed from the platform
	 * this means that the CSR Clock Range selection cannot be
	 * changed at run-time and it is fixed. Viceversa the driver'll try to
	 * set the MDC clock dynamically according to the csr actual
	 * clock input.
	 */
5016
	if (priv->plat->clk_csr >= 0)
5017
		priv->clk_csr = priv->plat->clk_csr;
5018 5019
	else
		stmmac_clk_csr_set(priv);
5020

5021 5022
	stmmac_check_pcs_mode(priv);

5023
	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5024
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
5025 5026 5027
		/* MDIO bus Registration */
		ret = stmmac_mdio_register(ndev);
		if (ret < 0) {
5028 5029 5030
			dev_err(priv->device,
				"%s: MDIO bus (id: %d) registration failed",
				__func__, priv->plat->bus_id);
5031 5032
			goto error_mdio_register;
		}
5033 5034
	}

5035 5036 5037 5038 5039 5040
	ret = stmmac_phy_setup(priv);
	if (ret) {
		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
		goto error_phy_setup;
	}

5041
	ret = register_netdev(ndev);
5042
	if (ret) {
5043 5044
		dev_err(priv->device, "%s: ERROR %i registering the device\n",
			__func__, ret);
5045 5046
		goto error_netdev_register;
	}
5047

5048 5049 5050 5051 5052
	if (priv->plat->serdes_powerup) {
		ret = priv->plat->serdes_powerup(ndev,
						 priv->plat->bsp_priv);

		if (ret < 0)
5053
			goto error_serdes_powerup;
5054 5055
	}

5056
#ifdef CONFIG_DEBUG_FS
5057
	stmmac_init_fs(ndev);
5058 5059
#endif

5060
	return ret;
5061

5062 5063
error_serdes_powerup:
	unregister_netdev(ndev);
5064
error_netdev_register:
5065 5066
	phylink_destroy(priv->phylink);
error_phy_setup:
5067
	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5068 5069
	    priv->hw->pcs != STMMAC_PCS_RTBI)
		stmmac_mdio_unregister(ndev);
5070
error_mdio_register:
5071
	stmmac_napi_del(ndev);
5072
error_hw_init:
5073
	destroy_workqueue(priv->wq);
5074

5075
	return ret;
5076
}
5077
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5078 5079 5080

/**
 * stmmac_dvr_remove
5081
 * @dev: device pointer
5082
 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5083
 * changes the link status, releases the DMA descriptor rings.
5084
 */
5085
int stmmac_dvr_remove(struct device *dev)
5086
{
5087
	struct net_device *ndev = dev_get_drvdata(dev);
5088
	struct stmmac_priv *priv = netdev_priv(ndev);
5089

5090
	netdev_info(priv->dev, "%s: removing driver", __func__);
5091

5092
	stmmac_stop_all_dma(priv);
5093

5094 5095 5096
	if (priv->plat->serdes_powerdown)
		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);

5097
	stmmac_mac_set(priv, priv->ioaddr, false);
5098 5099
	netif_carrier_off(ndev);
	unregister_netdev(ndev);
5100 5101 5102
#ifdef CONFIG_DEBUG_FS
	stmmac_exit_fs(ndev);
#endif
5103
	phylink_destroy(priv->phylink);
5104 5105 5106 5107
	if (priv->plat->stmmac_rst)
		reset_control_assert(priv->plat->stmmac_rst);
	clk_disable_unprepare(priv->plat->pclk);
	clk_disable_unprepare(priv->plat->stmmac_clk);
5108
	if (priv->hw->pcs != STMMAC_PCS_TBI &&
5109
	    priv->hw->pcs != STMMAC_PCS_RTBI)
5110
		stmmac_mdio_unregister(ndev);
5111
	destroy_workqueue(priv->wq);
5112
	mutex_destroy(&priv->lock);
5113 5114 5115

	return 0;
}
5116
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5117

5118 5119
/**
 * stmmac_suspend - suspend callback
5120
 * @dev: device pointer
5121 5122 5123 5124
 * Description: this is the function to suspend the device and it is called
 * by the platform driver to stop the network queue, release the resources,
 * program the PMT register (for WoL), clean and release driver resources.
 */
5125
int stmmac_suspend(struct device *dev)
5126
{
5127
	struct net_device *ndev = dev_get_drvdata(dev);
5128
	struct stmmac_priv *priv = netdev_priv(ndev);
5129
	u32 chan;
5130

5131
	if (!ndev || !netif_running(ndev))
5132 5133
		return 0;

5134
	phylink_mac_change(priv->phylink, false);
5135

5136
	mutex_lock(&priv->lock);
5137

5138
	netif_device_detach(ndev);
5139

5140
	stmmac_disable_all_queues(priv);
5141

5142 5143 5144
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);

5145
	/* Stop TX/RX DMA */
5146
	stmmac_stop_all_dma(priv);
5147

5148 5149 5150
	if (priv->plat->serdes_powerdown)
		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);

5151
	/* Enable Power down mode by programming the PMT regs */
5152
	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5153
		stmmac_pmt(priv, priv->hw, priv->wolopts);
5154 5155
		priv->irq_wake = 1;
	} else {
5156
		mutex_unlock(&priv->lock);
5157
		rtnl_lock();
5158 5159
		if (device_may_wakeup(priv->device))
			phylink_speed_down(priv->phylink, false);
5160 5161
		phylink_stop(priv->phylink);
		rtnl_unlock();
5162
		mutex_lock(&priv->lock);
5163

5164
		stmmac_mac_set(priv, priv->ioaddr, false);
5165
		pinctrl_pm_select_sleep_state(priv->device);
5166
		/* Disable clock in case of PWM is off */
5167
		clk_disable_unprepare(priv->plat->clk_ptp_ref);
5168 5169
		clk_disable_unprepare(priv->plat->pclk);
		clk_disable_unprepare(priv->plat->stmmac_clk);
5170
	}
5171
	mutex_unlock(&priv->lock);
5172

5173
	priv->speed = SPEED_UNKNOWN;
5174 5175
	return 0;
}
5176
EXPORT_SYMBOL_GPL(stmmac_suspend);
5177

5178 5179
/**
 * stmmac_reset_queues_param - reset queue parameters
5180
 * @priv: device pointer
5181 5182 5183 5184
 */
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{
	u32 rx_cnt = priv->plat->rx_queues_to_use;
5185
	u32 tx_cnt = priv->plat->tx_queues_to_use;
5186 5187 5188 5189 5190 5191 5192 5193 5194
	u32 queue;

	for (queue = 0; queue < rx_cnt; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		rx_q->cur_rx = 0;
		rx_q->dirty_rx = 0;
	}

5195 5196 5197 5198 5199
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		tx_q->cur_tx = 0;
		tx_q->dirty_tx = 0;
5200
		tx_q->mss = 0;
5201
	}
5202 5203
}

5204 5205
/**
 * stmmac_resume - resume callback
5206
 * @dev: device pointer
5207 5208 5209
 * Description: when resume this function is invoked to setup the DMA and CORE
 * in a usable state.
 */
5210
int stmmac_resume(struct device *dev)
5211
{
5212
	struct net_device *ndev = dev_get_drvdata(dev);
5213
	struct stmmac_priv *priv = netdev_priv(ndev);
5214
	int ret;
5215

5216
	if (!netif_running(ndev))
5217 5218 5219 5220 5221 5222
		return 0;

	/* Power Down bit, into the PM register, is cleared
	 * automatically as soon as a magic packet or a Wake-up frame
	 * is received. Anyway, it's better to manually clear
	 * this bit because it can generate problems while resuming
G
Giuseppe CAVALLARO 已提交
5223 5224
	 * from another devices (e.g. serial console).
	 */
5225
	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5226
		mutex_lock(&priv->lock);
5227
		stmmac_pmt(priv, priv->hw, 0);
5228
		mutex_unlock(&priv->lock);
5229
		priv->irq_wake = 0;
5230
	} else {
5231
		pinctrl_pm_select_default_state(priv->device);
5232
		/* enable the clk previously disabled */
5233 5234 5235 5236
		clk_prepare_enable(priv->plat->stmmac_clk);
		clk_prepare_enable(priv->plat->pclk);
		if (priv->plat->clk_ptp_ref)
			clk_prepare_enable(priv->plat->clk_ptp_ref);
5237 5238 5239 5240
		/* reset the phy so that it's ready */
		if (priv->mii)
			stmmac_mdio_reset(priv->mii);
	}
5241

5242 5243 5244 5245 5246 5247 5248 5249
	if (priv->plat->serdes_powerup) {
		ret = priv->plat->serdes_powerup(ndev,
						 priv->plat->bsp_priv);

		if (ret < 0)
			return ret;
	}

5250
	rtnl_lock();
5251
	mutex_lock(&priv->lock);
5252

5253 5254
	stmmac_reset_queues_param(priv);

5255 5256
	stmmac_clear_descriptors(priv);

5257
	stmmac_hw_setup(ndev, false);
5258
	stmmac_init_coalesce(priv);
5259
	stmmac_set_rx_mode(ndev);
5260

5261 5262
	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);

5263
	stmmac_enable_all_queues(priv);
5264

5265
	mutex_unlock(&priv->lock);
5266
	rtnl_unlock();
5267

5268
	if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5269 5270
		rtnl_lock();
		phylink_start(priv->phylink);
5271 5272
		/* We may have called phylink_speed_down before */
		phylink_speed_up(priv->phylink);
5273 5274 5275 5276
		rtnl_unlock();
	}

	phylink_mac_change(priv->phylink, true);
5277

5278 5279
	netif_device_attach(ndev);

5280 5281
	return 0;
}
5282
EXPORT_SYMBOL_GPL(stmmac_resume);
5283

5284 5285 5286 5287 5288 5289 5290 5291
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
	char *opt;

	if (!str || !*str)
		return -EINVAL;
	while ((opt = strsep(&str, ",")) != NULL) {
5292
		if (!strncmp(opt, "debug:", 6)) {
5293
			if (kstrtoint(opt + 6, 0, &debug))
5294 5295
				goto err;
		} else if (!strncmp(opt, "phyaddr:", 8)) {
5296
			if (kstrtoint(opt + 8, 0, &phyaddr))
5297 5298
				goto err;
		} else if (!strncmp(opt, "buf_sz:", 7)) {
5299
			if (kstrtoint(opt + 7, 0, &buf_sz))
5300 5301
				goto err;
		} else if (!strncmp(opt, "tc:", 3)) {
5302
			if (kstrtoint(opt + 3, 0, &tc))
5303 5304
				goto err;
		} else if (!strncmp(opt, "watchdog:", 9)) {
5305
			if (kstrtoint(opt + 9, 0, &watchdog))
5306 5307
				goto err;
		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
5308
			if (kstrtoint(opt + 10, 0, &flow_ctrl))
5309 5310
				goto err;
		} else if (!strncmp(opt, "pause:", 6)) {
5311
			if (kstrtoint(opt + 6, 0, &pause))
5312
				goto err;
5313
		} else if (!strncmp(opt, "eee_timer:", 10)) {
5314 5315
			if (kstrtoint(opt + 10, 0, &eee_timer))
				goto err;
5316 5317 5318
		} else if (!strncmp(opt, "chain_mode:", 11)) {
			if (kstrtoint(opt + 11, 0, &chain_mode))
				goto err;
5319
		}
5320 5321
	}
	return 0;
5322 5323 5324 5325

err:
	pr_err("%s: ERROR broken module parameter conversion", __func__);
	return -EINVAL;
5326 5327 5328
}

__setup("stmmaceth=", stmmac_cmdline_opt);
G
Giuseppe CAVALLARO 已提交
5329
#endif /* MODULE */
5330

5331 5332 5333 5334
static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
	/* Create debugfs main directory if it doesn't exist yet */
5335
	if (!stmmac_fs_dir)
5336
		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5337
	register_netdevice_notifier(&stmmac_notifier);
5338 5339 5340 5341 5342 5343 5344 5345
#endif

	return 0;
}

static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
5346
	unregister_netdevice_notifier(&stmmac_notifier);
5347 5348 5349 5350 5351 5352 5353
	debugfs_remove_recursive(stmmac_fs_dir);
#endif
}

module_init(stmmac_init)
module_exit(stmmac_exit)

5354 5355 5356
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");