stmmac_main.c 132.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*******************************************************************************
  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
  ST Ethernet IPs are built around a Synopsys IP Core.

6
	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 8 9 10 11 12 13 14 15 16


  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>

  Documentation available at:
	http://www.stlinux.com
  Support available at:
	https://bugzilla.stlinux.com/
*******************************************************************************/

17
#include <linux/clk.h>
18 19 20 21 22 23 24 25 26
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
27
#include <linux/if.h>
28 29
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
30
#include <linux/slab.h>
31
#include <linux/prefetch.h>
32
#include <linux/pinctrl/consumer.h>
33
#ifdef CONFIG_DEBUG_FS
34 35
#include <linux/debugfs.h>
#include <linux/seq_file.h>
36
#endif /* CONFIG_DEBUG_FS */
37
#include <linux/net_tstamp.h>
38
#include <linux/phylink.h>
39
#include <net/pkt_cls.h>
40
#include "stmmac_ptp.h"
41
#include "stmmac.h"
42
#include <linux/reset.h>
43
#include <linux/of_mdio.h>
44
#include "dwmac1000.h"
45
#include "dwxgmac2.h"
46
#include "hwif.h"
47

48
#define	STMMAC_ALIGN(x)		__ALIGN_KERNEL(x, SMP_CACHE_BYTES)
A
Alexandre TORGUE 已提交
49
#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
50 51

/* Module parameters */
52
#define TX_TIMEO	5000
53
static int watchdog = TX_TIMEO;
54
module_param(watchdog, int, 0644);
55
MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
56

57
static int debug = -1;
58
module_param(debug, int, 0644);
59
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
60

61
static int phyaddr = -1;
62
module_param(phyaddr, int, 0444);
63 64
MODULE_PARM_DESC(phyaddr, "Physical device address");

65
#define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
66
#define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
67

68
static int flow_ctrl = FLOW_AUTO;
69
module_param(flow_ctrl, int, 0644);
70 71 72
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");

static int pause = PAUSE_TIME;
73
module_param(pause, int, 0644);
74 75 76 77
MODULE_PARM_DESC(pause, "Flow Control Pause Time");

#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
78
module_param(tc, int, 0644);
79 80
MODULE_PARM_DESC(tc, "DMA threshold control value");

81 82
#define	DEFAULT_BUFSIZE	1536
static int buf_sz = DEFAULT_BUFSIZE;
83
module_param(buf_sz, int, 0644);
84 85
MODULE_PARM_DESC(buf_sz, "DMA buffer size");

86 87
#define	STMMAC_RX_COPYBREAK	256

88 89 90 91
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);

92 93
#define STMMAC_DEFAULT_LPI_TIMER	1000
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
94
module_param(eee_timer, int, 0644);
95
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
G
Giuseppe CAVALLARO 已提交
96
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
97

98 99
/* By default the driver will use the ring mode to manage tx and rx descriptors,
 * but allow user to force to use the chain instead of the ring
100 101
 */
static unsigned int chain_mode;
102
module_param(chain_mode, int, 0444);
103 104
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");

105 106
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);

107
#ifdef CONFIG_DEBUG_FS
108
static void stmmac_init_fs(struct net_device *dev);
109
static void stmmac_exit_fs(struct net_device *dev);
110 111
#endif

112 113
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))

114 115
/**
 * stmmac_verify_args - verify the driver parameters.
116 117
 * Description: it checks the driver parameters and set a default in case of
 * errors.
118 119 120 121 122
 */
static void stmmac_verify_args(void)
{
	if (unlikely(watchdog < 0))
		watchdog = TX_TIMEO;
123 124
	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
		buf_sz = DEFAULT_BUFSIZE;
125 126 127 128 129 130
	if (unlikely(flow_ctrl > 1))
		flow_ctrl = FLOW_AUTO;
	else if (likely(flow_ctrl < 0))
		flow_ctrl = FLOW_OFF;
	if (unlikely((pause < 0) || (pause > 0xffff)))
		pause = PAUSE_TIME;
131 132
	if (eee_timer < 0)
		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
133 134
}

135 136 137 138 139 140 141
/**
 * stmmac_disable_all_queues - Disable all queues
 * @priv: driver private structure
 */
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
142 143
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
144 145
	u32 queue;

146 147
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
148

149 150 151 152
		if (queue < rx_queues_cnt)
			napi_disable(&ch->rx_napi);
		if (queue < tx_queues_cnt)
			napi_disable(&ch->tx_napi);
153 154 155 156 157 158 159 160 161 162
	}
}

/**
 * stmmac_enable_all_queues - Enable all queues
 * @priv: driver private structure
 */
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
{
	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
163 164
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
165 166
	u32 queue;

167 168
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
169

170 171 172 173
		if (queue < rx_queues_cnt)
			napi_enable(&ch->rx_napi);
		if (queue < tx_queues_cnt)
			napi_enable(&ch->tx_napi);
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	}
}

/**
 * stmmac_stop_all_queues - Stop all queues
 * @priv: driver private structure
 */
static void stmmac_stop_all_queues(struct stmmac_priv *priv)
{
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	for (queue = 0; queue < tx_queues_cnt; queue++)
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}

/**
 * stmmac_start_all_queues - Start all queues
 * @priv: driver private structure
 */
static void stmmac_start_all_queues(struct stmmac_priv *priv)
{
	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	for (queue = 0; queue < tx_queues_cnt; queue++)
		netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
	if (!test_bit(STMMAC_DOWN, &priv->state) &&
	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
		queue_work(priv->wq, &priv->service_task);
}

static void stmmac_global_err(struct stmmac_priv *priv)
{
	netif_carrier_off(priv->dev);
	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
	stmmac_service_event_schedule(priv);
}

217 218 219 220 221 222 223 224 225 226 227 228
/**
 * stmmac_clk_csr_set - dynamically set the MDC clock
 * @priv: driver private structure
 * Description: this is to dynamically set the MDC clock according to the csr
 * clock input.
 * Note:
 *	If a specific clk_csr value is passed from the platform
 *	this means that the CSR Clock Range selection cannot be
 *	changed at run-time and it is fixed (as reported in the driver
 *	documentation). Viceversa the driver will try to set the MDC
 *	clock dynamically according to the actual clock input.
 */
229 230 231 232
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
	u32 clk_rate;

233
	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
234 235

	/* Platform provided default clk_csr would be assumed valid
G
Giuseppe CAVALLARO 已提交
236 237 238 239 240 241
	 * for all other cases except for the below mentioned ones.
	 * For values higher than the IEEE 802.3 specified frequency
	 * we can not estimate the proper divider as it is not known
	 * the frequency of clk_csr_i. So we do not change the default
	 * divider.
	 */
242 243 244 245 246 247 248 249 250 251 252
	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
		if (clk_rate < CSR_F_35M)
			priv->clk_csr = STMMAC_CSR_20_35M;
		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
			priv->clk_csr = STMMAC_CSR_35_60M;
		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
			priv->clk_csr = STMMAC_CSR_60_100M;
		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
			priv->clk_csr = STMMAC_CSR_100_150M;
		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
			priv->clk_csr = STMMAC_CSR_150_250M;
253
		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
254
			priv->clk_csr = STMMAC_CSR_250_300M;
G
Giuseppe CAVALLARO 已提交
255
	}
256 257 258 259 260 261 262 263 264 265 266

	if (priv->plat->has_sun8i) {
		if (clk_rate > 160000000)
			priv->clk_csr = 0x03;
		else if (clk_rate > 80000000)
			priv->clk_csr = 0x02;
		else if (clk_rate > 40000000)
			priv->clk_csr = 0x01;
		else
			priv->clk_csr = 0;
	}
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281

	if (priv->plat->has_xgmac) {
		if (clk_rate > 400000000)
			priv->clk_csr = 0x5;
		else if (clk_rate > 350000000)
			priv->clk_csr = 0x4;
		else if (clk_rate > 300000000)
			priv->clk_csr = 0x3;
		else if (clk_rate > 250000000)
			priv->clk_csr = 0x2;
		else if (clk_rate > 150000000)
			priv->clk_csr = 0x1;
		else
			priv->clk_csr = 0x0;
	}
282 283
}

284 285
static void print_pkt(unsigned char *buf, int len)
{
286 287
	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
288 289
}

290
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
291
{
292
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
293
	u32 avail;
294

295 296
	if (tx_q->dirty_tx > tx_q->cur_tx)
		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
297
	else
298
		avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
299 300 301 302

	return avail;
}

303 304 305 306 307 308
/**
 * stmmac_rx_dirty - Get RX queue dirty
 * @priv: driver private structure
 * @queue: RX queue index
 */
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
309
{
310
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
311
	u32 dirty;
312

313 314
	if (rx_q->dirty_rx <= rx_q->cur_rx)
		dirty = rx_q->cur_rx - rx_q->dirty_rx;
315
	else
316
		dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
317 318

	return dirty;
319 320
}

321
/**
322
 * stmmac_enable_eee_mode - check and enter in LPI mode
323
 * @priv: driver private structure
324 325
 * Description: this function is to verify and enter in LPI mode in case of
 * EEE.
326
 */
327 328
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
329 330 331 332 333 334 335 336 337 338 339
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 queue;

	/* check if all TX queues have the work finished */
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		if (tx_q->dirty_tx != tx_q->cur_tx)
			return; /* still unfinished work */
	}

340
	/* Check and enter in LPI mode */
341
	if (!priv->tx_path_in_lpi_mode)
342 343
		stmmac_set_eee_mode(priv, priv->hw,
				priv->plat->en_tx_lpi_clockgating);
344 345
}

346
/**
347
 * stmmac_disable_eee_mode - disable and exit from LPI mode
348 349 350 351
 * @priv: driver private structure
 * Description: this function is to exit and disable EEE in case of
 * LPI state is true. This is called by the xmit.
 */
352 353
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
354
	stmmac_reset_eee_mode(priv, priv->hw);
355 356 357 358 359
	del_timer_sync(&priv->eee_ctrl_timer);
	priv->tx_path_in_lpi_mode = false;
}

/**
360
 * stmmac_eee_ctrl_timer - EEE TX SW timer.
361 362
 * @arg : data hook
 * Description:
363
 *  if there is no data transfer and if we are not in LPI state,
364 365
 *  then MAC Transmitter can be moved to LPI state.
 */
366
static void stmmac_eee_ctrl_timer(struct timer_list *t)
367
{
368
	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
369 370

	stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
371
	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
372 373 374
}

/**
375
 * stmmac_eee_init - init EEE
376
 * @priv: driver private structure
377
 * Description:
378 379 380
 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 *  can also manage EEE, this function enable the LPI state and start related
 *  timer.
381 382 383
 */
bool stmmac_eee_init(struct stmmac_priv *priv)
{
384
	int tx_lpi_timer = priv->tx_lpi_timer;
385

G
Giuseppe CAVALLARO 已提交
386 387 388
	/* Using PCS we cannot dial with the phy registers at this stage
	 * so we do not support extra feature like EEE.
	 */
389 390 391
	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
	    (priv->hw->pcs == STMMAC_PCS_RTBI))
392
		return false;
393

394 395 396 397 398
	/* Check if MAC core supports the EEE feature. */
	if (!priv->dma_cap.eee)
		return false;

	mutex_lock(&priv->lock);
399

400
	/* Check if it needs to be deactivated */
401 402 403 404 405 406
	if (!priv->eee_active) {
		if (priv->eee_enabled) {
			netdev_dbg(priv->dev, "disable EEE\n");
			del_timer_sync(&priv->eee_ctrl_timer);
			stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
		}
407
		mutex_unlock(&priv->lock);
408
		return false;
409
	}
410 411 412 413 414 415 416 417 418 419 420

	if (priv->eee_active && !priv->eee_enabled) {
		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
				     tx_lpi_timer);
	}

	mutex_unlock(&priv->lock);
	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
	return true;
421 422
}

423
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
424
 * @priv: driver private structure
425
 * @p : descriptor pointer
426 427 428 429 430 431
 * @skb : the socket buffer
 * Description :
 * This function will read timestamp from the descriptor & pass it to stack.
 * and also perform some sanity checks.
 */
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
432
				   struct dma_desc *p, struct sk_buff *skb)
433 434
{
	struct skb_shared_hwtstamps shhwtstamp;
435
	bool found = false;
436
	u64 ns = 0;
437 438 439 440

	if (!priv->hwts_tx_en)
		return;

G
Giuseppe CAVALLARO 已提交
441
	/* exit if skb doesn't support hw tstamp */
442
	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
443 444 445
		return;

	/* check tx tstamp status */
446 447
	if (stmmac_get_tx_timestamp_status(priv, p)) {
		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
448 449 450 451
		found = true;
	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
		found = true;
	}
452

453
	if (found) {
454 455
		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp.hwtstamp = ns_to_ktime(ns);
456

457
		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
458 459 460
		/* pass tstamp to stack */
		skb_tstamp_tx(skb, &shhwtstamp);
	}
461 462
}

463
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
464
 * @priv: driver private structure
465 466
 * @p : descriptor pointer
 * @np : next descriptor pointer
467 468 469 470 471
 * @skb : the socket buffer
 * Description :
 * This function will read received packet's timestamp from the descriptor
 * and pass it to stack. It also perform some sanity checks.
 */
472 473
static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
				   struct dma_desc *np, struct sk_buff *skb)
474 475
{
	struct skb_shared_hwtstamps *shhwtstamp = NULL;
476
	struct dma_desc *desc = p;
477
	u64 ns = 0;
478 479 480

	if (!priv->hwts_rx_en)
		return;
481
	/* For GMAC4, the valid timestamp is from CTX next desc. */
482
	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
483
		desc = np;
484

485
	/* Check if timestamp is available */
486 487
	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
488
		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
489 490 491 492
		shhwtstamp = skb_hwtstamps(skb);
		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp->hwtstamp = ns_to_ktime(ns);
	} else  {
493
		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
494
	}
495 496 497
}

/**
498
 *  stmmac_hwtstamp_set - control hardware timestamping.
499
 *  @dev: device pointer.
500
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
501 502 503 504 505 506 507
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function configures the MAC to enable/disable both outgoing(TX)
 *  and incoming(RX) packets time stamping based on user input.
 *  Return Value:
 *  0 on success and an appropriate -ve integer on failure.
 */
508
static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
509 510 511
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config config;
A
Arnd Bergmann 已提交
512
	struct timespec64 now;
513 514 515 516 517 518 519 520 521
	u64 temp = 0;
	u32 ptp_v2 = 0;
	u32 tstamp_all = 0;
	u32 ptp_over_ipv4_udp = 0;
	u32 ptp_over_ipv6_udp = 0;
	u32 ptp_over_ethernet = 0;
	u32 snap_type_sel = 0;
	u32 ts_master_en = 0;
	u32 ts_event_en = 0;
522
	u32 sec_inc = 0;
523
	u32 value = 0;
524 525 526
	bool xmac;

	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
527 528 529 530 531 532 533 534 535 536

	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
		netdev_alert(priv->dev, "No support for HW time stamping\n");
		priv->hwts_tx_en = 0;
		priv->hwts_rx_en = 0;

		return -EOPNOTSUPP;
	}

	if (copy_from_user(&config, ifr->ifr_data,
537
			   sizeof(config)))
538 539
		return -EFAULT;

540 541
	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
		   __func__, config.flags, config.tx_type, config.rx_filter);
542 543 544 545 546

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

547 548
	if (config.tx_type != HWTSTAMP_TX_OFF &&
	    config.tx_type != HWTSTAMP_TX_ON)
549 550 551 552 553
		return -ERANGE;

	if (priv->adv_ts) {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
G
Giuseppe CAVALLARO 已提交
554
			/* time stamp no incoming packet at all */
555 556 557 558
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
559
			/* PTP v1, UDP, any kind of event packet */
560
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
561 562 563 564 565 566 567
			/* 'xmac' hardware can support Sync, Pdelay_Req and
			 * Pdelay_resp by setting bit14 and bits17/16 to 01
			 * This leaves Delay_Req timestamps out.
			 * Enable all events *and* general purpose message
			 * timestamping
			 */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
568 569 570 571 572
			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
573
			/* PTP v1, UDP, Sync packet */
574 575 576 577 578 579 580 581 582
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
583
			/* PTP v1, UDP, Delay_req packet */
584 585 586 587 588 589 590 591 592 593
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
594
			/* PTP v2, UDP, any kind of event packet */
595 596 597
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
598
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
599 600 601 602 603 604

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
605
			/* PTP v2, UDP, Sync packet */
606 607 608 609 610 611 612 613 614 615
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
616
			/* PTP v2, UDP, Delay_req packet */
617 618 619 620 621 622 623 624 625 626 627
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_EVENT:
G
Giuseppe CAVALLARO 已提交
628
			/* PTP v2/802.AS1 any layer, any kind of event packet */
629 630
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
631
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
632 633 634 635 636 637
			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_SYNC:
G
Giuseppe CAVALLARO 已提交
638
			/* PTP v2/802.AS1, any layer, Sync packet */
639 640 641 642 643 644 645 646 647 648 649
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
650
			/* PTP v2/802.AS1, any layer, Delay_req packet */
651 652 653 654 655 656 657 658 659 660 661
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

662
		case HWTSTAMP_FILTER_NTP_ALL:
663
		case HWTSTAMP_FILTER_ALL:
G
Giuseppe CAVALLARO 已提交
664
			/* time stamp any incoming packet */
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
			config.rx_filter = HWTSTAMP_FILTER_ALL;
			tstamp_all = PTP_TCR_TSENALL;
			break;

		default:
			return -ERANGE;
		}
	} else {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;
		default:
			/* PTP v1, UDP, any kind of event packet */
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			break;
		}
	}
	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
684
	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
685 686

	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
687
		stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
688 689
	else {
		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
G
Giuseppe CAVALLARO 已提交
690 691 692
			 tstamp_all | ptp_v2 | ptp_over_ethernet |
			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
			 ts_master_en | snap_type_sel);
693
		stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
694 695

		/* program Sub Second Increment reg */
696 697
		stmmac_config_sub_second_increment(priv,
				priv->ptpaddr, priv->plat->clk_ptp_rate,
698
				xmac, &sec_inc);
699
		temp = div_u64(1000000000ULL, sec_inc);
700

701 702 703 704
		/* Store sub second increment and flags for later use */
		priv->sub_second_inc = sec_inc;
		priv->systime_flags = value;

705 706 707
		/* calculate default added value:
		 * formula is :
		 * addend = (2^32)/freq_div_ratio;
708
		 * where, freq_div_ratio = 1e9ns/sec_inc
709
		 */
710
		temp = (u64)(temp << 32);
711
		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
712
		stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
713 714

		/* initialize system time */
A
Arnd Bergmann 已提交
715 716 717
		ktime_get_real_ts64(&now);

		/* lower 32 bits of tv_sec are safe until y2106 */
718 719
		stmmac_init_systime(priv, priv->ptpaddr,
				(u32)now.tv_sec, now.tv_nsec);
720 721
	}

722 723
	memcpy(&priv->tstamp_config, &config, sizeof(config));

724
	return copy_to_user(ifr->ifr_data, &config,
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
			    sizeof(config)) ? -EFAULT : 0;
}

/**
 *  stmmac_hwtstamp_get - read hardware timestamping.
 *  @dev: device pointer.
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function obtain the current hardware timestamping settings
    as requested.
 */
static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config *config = &priv->tstamp_config;

	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

	return copy_to_user(ifr->ifr_data, config,
			    sizeof(*config)) ? -EFAULT : 0;
747 748
}

749
/**
750
 * stmmac_init_ptp - init PTP
751
 * @priv: driver private structure
752
 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
753
 * This is done by looking at the HW cap. register.
754
 * This function also registers the ptp driver.
755
 */
756
static int stmmac_init_ptp(struct stmmac_priv *priv)
757
{
758 759
	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;

760 761 762
	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

763
	priv->adv_ts = 0;
764 765
	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
	if (xmac && priv->dma_cap.atime_stamp)
766 767 768
		priv->adv_ts = 1;
	/* Dwmac 3.x core with extend_desc can support adv_ts */
	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
769 770
		priv->adv_ts = 1;

771 772
	if (priv->dma_cap.time_stamp)
		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
773

774 775 776
	if (priv->adv_ts)
		netdev_info(priv->dev,
			    "IEEE 1588-2008 Advanced Timestamp supported\n");
777 778 779

	priv->hwts_tx_en = 0;
	priv->hwts_rx_en = 0;
780

781 782 783
	stmmac_ptp_register(priv);

	return 0;
784 785 786 787
}

static void stmmac_release_ptp(struct stmmac_priv *priv)
{
788 789
	if (priv->plat->clk_ptp_ref)
		clk_disable_unprepare(priv->plat->clk_ptp_ref);
790
	stmmac_ptp_unregister(priv);
791 792
}

793 794 795 796 797 798 799 800 801
/**
 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 *  @priv: driver private structure
 *  Description: It is used for configuring the flow control in all queues
 */
static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
{
	u32 tx_cnt = priv->plat->tx_queues_to_use;

802 803
	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
			priv->pause, tx_cnt);
804 805
}

806 807 808 809 810
static void stmmac_validate(struct phylink_config *config,
			    unsigned long *supported,
			    struct phylink_link_state *state)
{
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
811
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
812 813 814 815
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
	int tx_cnt = priv->plat->tx_queues_to_use;
	int max_speed = priv->plat->max_speed;

816 817 818 819
	phylink_set(mac_supported, 10baseT_Half);
	phylink_set(mac_supported, 10baseT_Full);
	phylink_set(mac_supported, 100baseT_Half);
	phylink_set(mac_supported, 100baseT_Full);
820 821 822
	phylink_set(mac_supported, 1000baseT_Half);
	phylink_set(mac_supported, 1000baseT_Full);
	phylink_set(mac_supported, 1000baseKX_Full);
823 824 825 826 827 828

	phylink_set(mac_supported, Autoneg);
	phylink_set(mac_supported, Pause);
	phylink_set(mac_supported, Asym_Pause);
	phylink_set_port_modes(mac_supported);

829 830 831 832
	/* Cut down 1G if asked to */
	if ((max_speed > 0) && (max_speed < 1000)) {
		phylink_set(mask, 1000baseT_Full);
		phylink_set(mask, 1000baseX_Full);
833 834 835 836 837 838 839 840 841 842
	} else if (priv->plat->has_xgmac) {
		phylink_set(mac_supported, 2500baseT_Full);
		phylink_set(mac_supported, 5000baseT_Full);
		phylink_set(mac_supported, 10000baseSR_Full);
		phylink_set(mac_supported, 10000baseLR_Full);
		phylink_set(mac_supported, 10000baseER_Full);
		phylink_set(mac_supported, 10000baseLRM_Full);
		phylink_set(mac_supported, 10000baseT_Full);
		phylink_set(mac_supported, 10000baseKX4_Full);
		phylink_set(mac_supported, 10000baseKR_Full);
843 844 845 846 847 848 849 850 851
	}

	/* Half-Duplex can only work with single queue */
	if (tx_cnt > 1) {
		phylink_set(mask, 10baseT_Half);
		phylink_set(mask, 100baseT_Half);
		phylink_set(mask, 1000baseT_Half);
	}

852 853 854 855 856 857
	bitmap_and(supported, supported, mac_supported,
		   __ETHTOOL_LINK_MODE_MASK_NBITS);
	bitmap_andnot(supported, supported, mask,
		      __ETHTOOL_LINK_MODE_MASK_NBITS);
	bitmap_and(state->advertising, state->advertising, mac_supported,
		   __ETHTOOL_LINK_MODE_MASK_NBITS);
858 859 860 861 862 863 864 865 866 867
	bitmap_andnot(state->advertising, state->advertising, mask,
		      __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static int stmmac_mac_link_state(struct phylink_config *config,
				 struct phylink_link_state *state)
{
	return -EOPNOTSUPP;
}

868 869
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
			      const struct phylink_link_state *state)
870
{
871
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
872 873 874
	u32 ctrl;

	ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
875
	ctrl &= ~priv->hw->link.speed_mask;
876

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
	if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
		switch (state->speed) {
		case SPEED_10000:
			ctrl |= priv->hw->link.xgmii.speed10000;
			break;
		case SPEED_5000:
			ctrl |= priv->hw->link.xgmii.speed5000;
			break;
		case SPEED_2500:
			ctrl |= priv->hw->link.xgmii.speed2500;
			break;
		default:
			return;
		}
	} else {
		switch (state->speed) {
		case SPEED_2500:
			ctrl |= priv->hw->link.speed2500;
			break;
		case SPEED_1000:
			ctrl |= priv->hw->link.speed1000;
			break;
		case SPEED_100:
			ctrl |= priv->hw->link.speed100;
			break;
		case SPEED_10:
			ctrl |= priv->hw->link.speed10;
			break;
		default:
			return;
		}
908 909
	}

910
	priv->speed = state->speed;
911

912 913 914 915 916 917 918
	if (priv->plat->fix_mac_speed)
		priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);

	if (!state->duplex)
		ctrl &= ~priv->hw->link.duplex;
	else
		ctrl |= priv->hw->link.duplex;
919 920

	/* Flow Control operation */
921 922
	if (state->pause)
		stmmac_mac_flow_ctrl(priv, state->duplex);
923 924 925 926

	writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
}

927 928 929 930 931
static void stmmac_mac_an_restart(struct phylink_config *config)
{
	/* Not Supported */
}

932 933
static void stmmac_mac_link_down(struct phylink_config *config,
				 unsigned int mode, phy_interface_t interface)
934
{
935
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936 937

	stmmac_mac_set(priv, priv->ioaddr, false);
938 939 940
	priv->eee_active = false;
	stmmac_eee_init(priv);
	stmmac_set_eee_pls(priv, priv->hw, false);
941 942
}

943 944 945
static void stmmac_mac_link_up(struct phylink_config *config,
			       unsigned int mode, phy_interface_t interface,
			       struct phy_device *phy)
946
{
947
	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
948 949

	stmmac_mac_set(priv, priv->ioaddr, true);
950
	if (phy && priv->dma_cap.eee) {
951 952 953 954
		priv->eee_active = phy_init_eee(phy, 1) >= 0;
		priv->eee_enabled = stmmac_eee_init(priv);
		stmmac_set_eee_pls(priv, priv->hw, true);
	}
955 956
}

957
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
958 959
	.validate = stmmac_validate,
	.mac_link_state = stmmac_mac_link_state,
960
	.mac_config = stmmac_mac_config,
961
	.mac_an_restart = stmmac_mac_an_restart,
962 963
	.mac_link_down = stmmac_mac_link_down,
	.mac_link_up = stmmac_mac_link_up,
964 965
};

966
/**
967
 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
968 969 970 971 972
 * @priv: driver private structure
 * Description: this is to verify if the HW supports the PCS.
 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 * configured for the TBI, RTBI, or SGMII PHY interface.
 */
973 974 975 976 977
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
	int interface = priv->plat->interface;

	if (priv->dma_cap.pcs) {
B
Byungho An 已提交
978 979 980 981
		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
982
			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
983
			priv->hw->pcs = STMMAC_PCS_RGMII;
B
Byungho An 已提交
984
		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
985
			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
986
			priv->hw->pcs = STMMAC_PCS_SGMII;
987 988 989 990
		}
	}
}

991 992 993 994 995 996 997 998 999 1000 1001
/**
 * stmmac_init_phy - PHY initialization
 * @dev: net device structure
 * Description: it initializes the driver's PHY state, and attaches the PHY
 * to the mac driver.
 *  Return value:
 *  0 on success
 */
static int stmmac_init_phy(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
1002 1003
	struct device_node *node;
	int ret;
1004

1005
	node = priv->plat->phylink_node;
1006

1007
	if (node)
1008
		ret = phylink_of_phy_connect(priv->phylink, node, 0);
1009 1010 1011 1012 1013

	/* Some DT bindings do not set-up the PHY handle. Let's try to
	 * manually parse it
	 */
	if (!node || ret) {
1014 1015
		int addr = priv->plat->phy_addr;
		struct phy_device *phydev;
1016

1017 1018 1019
		phydev = mdiobus_get_phy(priv->mii, addr);
		if (!phydev) {
			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1020
			return -ENODEV;
1021
		}
1022

1023
		ret = phylink_connect_phy(priv->phylink, phydev);
1024 1025
	}

1026 1027
	return ret;
}
1028

1029 1030
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
1031
	struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1032 1033
	int mode = priv->plat->interface;
	struct phylink *phylink;
1034

1035 1036
	priv->phylink_config.dev = &priv->dev->dev;
	priv->phylink_config.type = PHYLINK_NETDEV;
1037

1038
	phylink = phylink_create(&priv->phylink_config, fwnode,
1039 1040 1041
				 mode, &stmmac_phylink_mac_ops);
	if (IS_ERR(phylink))
		return PTR_ERR(phylink);
1042

1043
	priv->phylink = phylink;
1044 1045 1046
	return 0;
}

1047
static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1048
{
1049
	u32 rx_cnt = priv->plat->rx_queues_to_use;
1050
	void *head_rx;
1051
	u32 queue;
1052

1053 1054 1055
	/* Display RX rings */
	for (queue = 0; queue < rx_cnt; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1056

1057 1058 1059 1060 1061 1062 1063 1064
		pr_info("\tRX Queue %u rings\n", queue);

		if (priv->extend_desc)
			head_rx = (void *)rx_q->dma_erx;
		else
			head_rx = (void *)rx_q->dma_rx;

		/* Display RX ring */
1065
		stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1066
	}
1067 1068 1069 1070
}

static void stmmac_display_tx_rings(struct stmmac_priv *priv)
{
1071
	u32 tx_cnt = priv->plat->tx_queues_to_use;
1072
	void *head_tx;
1073
	u32 queue;
1074

1075 1076 1077
	/* Display TX rings */
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1078

1079 1080 1081 1082 1083 1084 1085
		pr_info("\tTX Queue %d rings\n", queue);

		if (priv->extend_desc)
			head_tx = (void *)tx_q->dma_etx;
		else
			head_tx = (void *)tx_q->dma_tx;

1086
		stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1087
	}
1088 1089
}

1090 1091 1092 1093 1094 1095 1096 1097 1098
static void stmmac_display_rings(struct stmmac_priv *priv)
{
	/* Display RX ring */
	stmmac_display_rx_rings(priv);

	/* Display TX ring */
	stmmac_display_tx_rings(priv);
}

1099 1100 1101 1102 1103 1104 1105 1106
static int stmmac_set_bfsize(int mtu, int bufsize)
{
	int ret = bufsize;

	if (mtu >= BUF_SIZE_4KiB)
		ret = BUF_SIZE_8KiB;
	else if (mtu >= BUF_SIZE_2KiB)
		ret = BUF_SIZE_4KiB;
1107
	else if (mtu > DEFAULT_BUFSIZE)
1108 1109
		ret = BUF_SIZE_2KiB;
	else
1110
		ret = DEFAULT_BUFSIZE;
1111 1112 1113 1114

	return ret;
}

1115
/**
1116
 * stmmac_clear_rx_descriptors - clear RX descriptors
1117
 * @priv: driver private structure
1118
 * @queue: RX queue index
1119
 * Description: this function is called to clear the RX descriptors
1120 1121
 * in case of both basic and extended descriptors are used.
 */
1122
static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1123
{
1124
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1125
	int i;
1126

1127
	/* Clear the RX descriptors */
1128
	for (i = 0; i < DMA_RX_SIZE; i++)
1129
		if (priv->extend_desc)
1130 1131
			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
					priv->use_riwt, priv->mode,
1132 1133
					(i == DMA_RX_SIZE - 1),
					priv->dma_buf_sz);
1134
		else
1135 1136
			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
					priv->use_riwt, priv->mode,
1137 1138
					(i == DMA_RX_SIZE - 1),
					priv->dma_buf_sz);
1139 1140 1141 1142 1143
}

/**
 * stmmac_clear_tx_descriptors - clear tx descriptors
 * @priv: driver private structure
1144
 * @queue: TX queue index.
1145 1146 1147
 * Description: this function is called to clear the TX descriptors
 * in case of both basic and extended descriptors are used.
 */
1148
static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1149
{
1150
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1151 1152 1153
	int i;

	/* Clear the TX descriptors */
1154
	for (i = 0; i < DMA_TX_SIZE; i++)
1155
		if (priv->extend_desc)
1156 1157
			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
					priv->mode, (i == DMA_TX_SIZE - 1));
1158
		else
1159 1160
			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
					priv->mode, (i == DMA_TX_SIZE - 1));
1161 1162
}

1163 1164 1165 1166 1167 1168 1169 1170
/**
 * stmmac_clear_descriptors - clear descriptors
 * @priv: driver private structure
 * Description: this function is called to clear the TX and RX descriptors
 * in case of both basic and extended descriptors are used.
 */
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
1171
	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1172
	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1173 1174
	u32 queue;

1175
	/* Clear the RX descriptors */
1176 1177
	for (queue = 0; queue < rx_queue_cnt; queue++)
		stmmac_clear_rx_descriptors(priv, queue);
1178 1179

	/* Clear the TX descriptors */
1180 1181
	for (queue = 0; queue < tx_queue_cnt; queue++)
		stmmac_clear_tx_descriptors(priv, queue);
1182 1183
}

1184 1185 1186 1187 1188
/**
 * stmmac_init_rx_buffers - init the RX descriptor buffer.
 * @priv: driver private structure
 * @p: descriptor pointer
 * @i: descriptor index
1189 1190
 * @flags: gfp flag
 * @queue: RX queue index
1191 1192 1193
 * Description: this function is called to allocate a receive buffer, perform
 * the DMA mapping and init the descriptor.
 */
1194
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1195
				  int i, gfp_t flags, u32 queue)
1196
{
1197
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1198
	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1199

1200 1201
	buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
	if (!buf->page)
1202
		return -ENOMEM;
1203

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
	if (priv->sph) {
		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
		if (!buf->sec_page)
			return -ENOMEM;

		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
	} else {
		buf->sec_page = NULL;
	}

1215 1216
	buf->addr = page_pool_get_dma_addr(buf->page);
	stmmac_set_desc_addr(priv, p, buf->addr);
1217 1218
	if (priv->dma_buf_sz == BUF_SIZE_16KiB)
		stmmac_init_desc3(priv, p);
1219 1220 1221 1222

	return 0;
}

1223 1224 1225
/**
 * stmmac_free_rx_buffer - free RX dma buffers
 * @priv: private structure
1226
 * @queue: RX queue index
1227 1228
 * @i: buffer index.
 */
1229
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1230
{
1231
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1232
	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1233

1234 1235 1236
	if (buf->page)
		page_pool_put_page(rx_q->page_pool, buf->page, false);
	buf->page = NULL;
1237 1238 1239 1240

	if (buf->sec_page)
		page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
	buf->sec_page = NULL;
1241 1242 1243
}

/**
1244 1245
 * stmmac_free_tx_buffer - free RX dma buffers
 * @priv: private structure
1246
 * @queue: RX queue index
1247 1248
 * @i: buffer index.
 */
1249
static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1250
{
1251 1252 1253 1254
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

	if (tx_q->tx_skbuff_dma[i].buf) {
		if (tx_q->tx_skbuff_dma[i].map_as_page)
1255
			dma_unmap_page(priv->device,
1256 1257
				       tx_q->tx_skbuff_dma[i].buf,
				       tx_q->tx_skbuff_dma[i].len,
1258 1259 1260
				       DMA_TO_DEVICE);
		else
			dma_unmap_single(priv->device,
1261 1262
					 tx_q->tx_skbuff_dma[i].buf,
					 tx_q->tx_skbuff_dma[i].len,
1263 1264 1265
					 DMA_TO_DEVICE);
	}

1266 1267 1268 1269 1270
	if (tx_q->tx_skbuff[i]) {
		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
		tx_q->tx_skbuff[i] = NULL;
		tx_q->tx_skbuff_dma[i].buf = 0;
		tx_q->tx_skbuff_dma[i].map_as_page = false;
1271 1272 1273 1274 1275
	}
}

/**
 * init_dma_rx_desc_rings - init the RX descriptor rings
1276
 * @dev: net device structure
1277
 * @flags: gfp flag.
1278
 * Description: this function initializes the DMA RX descriptors
1279
 * and allocates the socket buffers. It supports the chained and ring
1280
 * modes.
1281
 */
1282
static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1283 1284
{
	struct stmmac_priv *priv = netdev_priv(dev);
1285
	u32 rx_count = priv->plat->rx_queues_to_use;
1286
	int ret = -ENOMEM;
1287
	int bfsize = 0;
1288
	int queue;
1289
	int i;
1290

1291 1292 1293
	bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
	if (bfsize < 0)
		bfsize = 0;
1294

1295
	if (bfsize < BUF_SIZE_16KiB)
1296
		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1297

1298 1299
	priv->dma_buf_sz = bfsize;

1300
	/* RX INITIALIZATION */
1301 1302
	netif_dbg(priv, probe, priv->dev,
		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1303

1304 1305
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1306

1307 1308 1309
		netif_dbg(priv, probe, priv->dev,
			  "(%s) dma_rx_phy=0x%08x\n", __func__,
			  (u32)rx_q->dma_rx_phy);
A
Alexandre TORGUE 已提交
1310

1311 1312
		stmmac_clear_rx_descriptors(priv, queue);

1313 1314
		for (i = 0; i < DMA_RX_SIZE; i++) {
			struct dma_desc *p;
1315

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
			if (priv->extend_desc)
				p = &((rx_q->dma_erx + i)->basic);
			else
				p = rx_q->dma_rx + i;

			ret = stmmac_init_rx_buffers(priv, p, i, flags,
						     queue);
			if (ret)
				goto err_init_rx_buffers;
		}

		rx_q->cur_rx = 0;
		rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);

		/* Setup the chained descriptor addresses */
		if (priv->mode == STMMAC_CHAIN_MODE) {
			if (priv->extend_desc)
1333 1334
				stmmac_mode_init(priv, rx_q->dma_erx,
						rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1335
			else
1336 1337
				stmmac_mode_init(priv, rx_q->dma_rx,
						rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1338
		}
1339 1340
	}

1341 1342
	buf_sz = bfsize;

1343
	return 0;
1344

1345
err_init_rx_buffers:
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
	while (queue >= 0) {
		while (--i >= 0)
			stmmac_free_rx_buffer(priv, queue, i);

		if (queue == 0)
			break;

		i = DMA_RX_SIZE;
		queue--;
	}

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
	return ret;
}

/**
 * init_dma_tx_desc_rings - init the TX descriptor rings
 * @dev: net device structure.
 * Description: this function initializes the DMA TX descriptors
 * and allocates the socket buffers. It supports the chained and ring
 * modes.
 */
static int init_dma_tx_desc_rings(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
1370 1371
	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
	u32 queue;
1372 1373
	int i;

1374 1375
	for (queue = 0; queue < tx_queue_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1376

1377 1378 1379
		netif_dbg(priv, probe, priv->dev,
			  "(%s) dma_tx_phy=0x%08x\n", __func__,
			 (u32)tx_q->dma_tx_phy);
A
Alexandre TORGUE 已提交
1380

1381 1382 1383
		/* Setup the chained descriptor addresses */
		if (priv->mode == STMMAC_CHAIN_MODE) {
			if (priv->extend_desc)
1384 1385
				stmmac_mode_init(priv, tx_q->dma_etx,
						tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1386
			else
1387 1388
				stmmac_mode_init(priv, tx_q->dma_tx,
						tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1389
		}
1390

1391 1392 1393 1394 1395 1396 1397
		for (i = 0; i < DMA_TX_SIZE; i++) {
			struct dma_desc *p;
			if (priv->extend_desc)
				p = &((tx_q->dma_etx + i)->basic);
			else
				p = tx_q->dma_tx + i;

1398
			stmmac_clear_desc(priv, p);
1399 1400 1401 1402 1403 1404

			tx_q->tx_skbuff_dma[i].buf = 0;
			tx_q->tx_skbuff_dma[i].map_as_page = false;
			tx_q->tx_skbuff_dma[i].len = 0;
			tx_q->tx_skbuff_dma[i].last_segment = false;
			tx_q->tx_skbuff[i] = NULL;
1405
		}
1406

1407 1408
		tx_q->dirty_tx = 0;
		tx_q->cur_tx = 0;
1409
		tx_q->mss = 0;
1410

1411 1412
		netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
	}
1413

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
	return 0;
}

/**
 * init_dma_desc_rings - init the RX/TX descriptor rings
 * @dev: net device structure
 * @flags: gfp flag.
 * Description: this function initializes the DMA RX/TX descriptors
 * and allocates the socket buffers. It supports the chained and ring
 * modes.
 */
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

	ret = init_dma_rx_desc_rings(dev, flags);
	if (ret)
		return ret;

	ret = init_dma_tx_desc_rings(dev);

1436
	stmmac_clear_descriptors(priv);
1437

1438 1439
	if (netif_msg_hw(priv))
		stmmac_display_rings(priv);
1440 1441

	return ret;
1442 1443
}

1444 1445 1446
/**
 * dma_free_rx_skbufs - free RX dma buffers
 * @priv: private structure
1447
 * @queue: RX queue index
1448
 */
1449
static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1450 1451 1452
{
	int i;

1453
	for (i = 0; i < DMA_RX_SIZE; i++)
1454
		stmmac_free_rx_buffer(priv, queue, i);
1455 1456
}

1457 1458 1459
/**
 * dma_free_tx_skbufs - free TX dma buffers
 * @priv: private structure
1460
 * @queue: TX queue index
1461
 */
1462
static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1463 1464 1465
{
	int i;

1466
	for (i = 0; i < DMA_TX_SIZE; i++)
1467
		stmmac_free_tx_buffer(priv, queue, i);
1468 1469
}

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
/**
 * free_dma_rx_desc_resources - free RX dma desc resources
 * @priv: private structure
 */
static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;

	/* Free RX queue resources */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		/* Release the DMA RX socket buffers */
		dma_free_rx_skbufs(priv, queue);

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
			dma_free_coherent(priv->device,
					  DMA_RX_SIZE * sizeof(struct dma_desc),
					  rx_q->dma_rx, rx_q->dma_rx_phy);
		else
			dma_free_coherent(priv->device, DMA_RX_SIZE *
					  sizeof(struct dma_extended_desc),
					  rx_q->dma_erx, rx_q->dma_rx_phy);

1496 1497 1498 1499 1500
		kfree(rx_q->buf_pool);
		if (rx_q->page_pool) {
			page_pool_request_shutdown(rx_q->page_pool);
			page_pool_destroy(rx_q->page_pool);
		}
1501 1502 1503
	}
}

1504 1505 1506 1507 1508 1509 1510
/**
 * free_dma_tx_desc_resources - free TX dma desc resources
 * @priv: private structure
 */
static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
1511
	u32 queue;
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534

	/* Free TX queue resources */
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		/* Release the DMA TX socket buffers */
		dma_free_tx_skbufs(priv, queue);

		/* Free DMA regions of consistent memory previously allocated */
		if (!priv->extend_desc)
			dma_free_coherent(priv->device,
					  DMA_TX_SIZE * sizeof(struct dma_desc),
					  tx_q->dma_tx, tx_q->dma_tx_phy);
		else
			dma_free_coherent(priv->device, DMA_TX_SIZE *
					  sizeof(struct dma_extended_desc),
					  tx_q->dma_etx, tx_q->dma_tx_phy);

		kfree(tx_q->tx_skbuff_dma);
		kfree(tx_q->tx_skbuff);
	}
}

1535
/**
1536
 * alloc_dma_rx_desc_resources - alloc RX resources.
1537 1538
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
1539 1540 1541
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
1542
 */
1543
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1544
{
1545
	u32 rx_count = priv->plat->rx_queues_to_use;
1546
	int ret = -ENOMEM;
1547
	u32 queue;
1548

1549 1550 1551
	/* RX queues buffers and DMA */
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1552
		struct page_pool_params pp_params = { 0 };
1553

1554 1555
		rx_q->queue_index = queue;
		rx_q->priv_data = priv;
1556

1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
		pp_params.flags = PP_FLAG_DMA_MAP;
		pp_params.pool_size = DMA_RX_SIZE;
		pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
		pp_params.nid = dev_to_node(priv->device);
		pp_params.dev = priv->device;
		pp_params.dma_dir = DMA_FROM_DEVICE;

		rx_q->page_pool = page_pool_create(&pp_params);
		if (IS_ERR(rx_q->page_pool)) {
			ret = PTR_ERR(rx_q->page_pool);
			rx_q->page_pool = NULL;
1568
			goto err_dma;
1569
		}
1570

1571 1572
		rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
					 GFP_KERNEL);
1573
		if (!rx_q->buf_pool)
1574
			goto err_dma;
1575 1576

		if (priv->extend_desc) {
1577 1578 1579 1580
			rx_q->dma_erx = dma_alloc_coherent(priv->device,
							   DMA_RX_SIZE * sizeof(struct dma_extended_desc),
							   &rx_q->dma_rx_phy,
							   GFP_KERNEL);
1581 1582 1583 1584
			if (!rx_q->dma_erx)
				goto err_dma;

		} else {
1585 1586 1587 1588
			rx_q->dma_rx = dma_alloc_coherent(priv->device,
							  DMA_RX_SIZE * sizeof(struct dma_desc),
							  &rx_q->dma_rx_phy,
							  GFP_KERNEL);
1589 1590 1591
			if (!rx_q->dma_rx)
				goto err_dma;
		}
1592 1593 1594 1595 1596
	}

	return 0;

err_dma:
1597 1598
	free_dma_rx_desc_resources(priv);

1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
	return ret;
}

/**
 * alloc_dma_tx_desc_resources - alloc TX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
{
1612
	u32 tx_count = priv->plat->tx_queues_to_use;
1613
	int ret = -ENOMEM;
1614
	u32 queue;
1615

1616 1617 1618
	/* TX queues buffers and DMA */
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1619

1620 1621
		tx_q->queue_index = queue;
		tx_q->priv_data = priv;
1622

1623 1624 1625
		tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
					      sizeof(*tx_q->tx_skbuff_dma),
					      GFP_KERNEL);
1626
		if (!tx_q->tx_skbuff_dma)
1627
			goto err_dma;
1628

1629 1630 1631
		tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
					  sizeof(struct sk_buff *),
					  GFP_KERNEL);
1632
		if (!tx_q->tx_skbuff)
1633
			goto err_dma;
1634 1635

		if (priv->extend_desc) {
1636 1637 1638 1639
			tx_q->dma_etx = dma_alloc_coherent(priv->device,
							   DMA_TX_SIZE * sizeof(struct dma_extended_desc),
							   &tx_q->dma_tx_phy,
							   GFP_KERNEL);
1640
			if (!tx_q->dma_etx)
1641
				goto err_dma;
1642
		} else {
1643 1644 1645 1646
			tx_q->dma_tx = dma_alloc_coherent(priv->device,
							  DMA_TX_SIZE * sizeof(struct dma_desc),
							  &tx_q->dma_tx_phy,
							  GFP_KERNEL);
1647
			if (!tx_q->dma_tx)
1648
				goto err_dma;
1649
		}
1650 1651 1652 1653
	}

	return 0;

1654
err_dma:
1655 1656
	free_dma_tx_desc_resources(priv);

1657 1658 1659
	return ret;
}

1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
/**
 * alloc_dma_desc_resources - alloc TX/RX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
1670
	/* RX Allocation */
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
	int ret = alloc_dma_rx_desc_resources(priv);

	if (ret)
		return ret;

	ret = alloc_dma_tx_desc_resources(priv);

	return ret;
}

/**
 * free_dma_desc_resources - free dma desc resources
 * @priv: private structure
 */
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
	/* Release the DMA RX socket buffers */
	free_dma_rx_desc_resources(priv);

	/* Release the DMA TX socket buffers */
	free_dma_tx_desc_resources(priv);
}

J
jpinto 已提交
1694 1695 1696 1697 1698 1699 1700
/**
 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
 *  @priv: driver private structure
 *  Description: It is used for enabling the rx queues in the MAC
 */
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
1701 1702 1703
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	int queue;
	u8 mode;
J
jpinto 已提交
1704

1705 1706
	for (queue = 0; queue < rx_queues_count; queue++) {
		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1707
		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1708
	}
J
jpinto 已提交
1709 1710
}

1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
/**
 * stmmac_start_rx_dma - start RX DMA channel
 * @priv: driver private structure
 * @chan: RX channel index
 * Description:
 * This starts a RX DMA channel
 */
static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1721
	stmmac_start_rx(priv, priv->ioaddr, chan);
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
}

/**
 * stmmac_start_tx_dma - start TX DMA channel
 * @priv: driver private structure
 * @chan: TX channel index
 * Description:
 * This starts a TX DMA channel
 */
static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1734
	stmmac_start_tx(priv, priv->ioaddr, chan);
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
}

/**
 * stmmac_stop_rx_dma - stop RX DMA channel
 * @priv: driver private structure
 * @chan: RX channel index
 * Description:
 * This stops a RX DMA channel
 */
static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1747
	stmmac_stop_rx(priv, priv->ioaddr, chan);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
}

/**
 * stmmac_stop_tx_dma - stop TX DMA channel
 * @priv: driver private structure
 * @chan: TX channel index
 * Description:
 * This stops a TX DMA channel
 */
static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
{
	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1760
	stmmac_stop_tx(priv, priv->ioaddr, chan);
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
}

/**
 * stmmac_start_all_dma - start all RX and TX DMA channels
 * @priv: driver private structure
 * Description:
 * This starts all the RX and TX DMA channels
 */
static void stmmac_start_all_dma(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_start_rx_dma(priv, chan);

	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_start_tx_dma(priv, chan);
}

/**
 * stmmac_stop_all_dma - stop all RX and TX DMA channels
 * @priv: driver private structure
 * Description:
 * This stops the RX and TX DMA channels
 */
static void stmmac_stop_all_dma(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan = 0;

	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_stop_rx_dma(priv, chan);

	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_stop_tx_dma(priv, chan);
}

1801 1802
/**
 *  stmmac_dma_operation_mode - HW DMA operation mode
1803
 *  @priv: driver private structure
1804 1805
 *  Description: it is used for configuring the DMA operation mode register in
 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1806 1807 1808
 */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
1809 1810
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1811
	int rxfifosz = priv->plat->rx_fifo_size;
1812
	int txfifosz = priv->plat->tx_fifo_size;
1813 1814 1815
	u32 txmode = 0;
	u32 rxmode = 0;
	u32 chan = 0;
1816
	u8 qmode = 0;
1817

1818 1819
	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;
1820 1821 1822 1823 1824 1825
	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	/* Adjust for real per queue fifo size */
	rxfifosz /= rx_channels_count;
	txfifosz /= tx_channels_count;
1826

1827 1828 1829 1830
	if (priv->plat->force_thresh_dma_mode) {
		txmode = tc;
		rxmode = tc;
	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1831 1832 1833
		/*
		 * In case of GMAC, SF mode can be enabled
		 * to perform the TX COE in HW. This depends on:
1834 1835 1836 1837
		 * 1) TX COE if actually supported
		 * 2) There is no bugged Jumbo frame support
		 *    that needs to not insert csum in the TDES.
		 */
1838 1839
		txmode = SF_DMA_MODE;
		rxmode = SF_DMA_MODE;
1840
		priv->xstats.threshold = SF_DMA_MODE;
1841 1842 1843 1844 1845 1846
	} else {
		txmode = tc;
		rxmode = SF_DMA_MODE;
	}

	/* configure all channels */
1847 1848
	for (chan = 0; chan < rx_channels_count; chan++) {
		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1849

1850 1851
		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
				rxfifosz, qmode);
1852 1853
		stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
				chan);
1854
	}
1855

1856 1857
	for (chan = 0; chan < tx_channels_count; chan++) {
		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1858

1859 1860
		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
				txfifosz, qmode);
1861
	}
1862 1863 1864
}

/**
1865
 * stmmac_tx_clean - to manage the transmission completion
1866
 * @priv: driver private structure
1867
 * @queue: TX queue index
1868
 * Description: it reclaims the transmit resources after transmission completes.
1869
 */
1870
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1871
{
1872
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
B
Beniamino Galvani 已提交
1873
	unsigned int bytes_compl = 0, pkts_compl = 0;
1874
	unsigned int entry, count = 0;
1875

1876
	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1877

1878 1879
	priv->xstats.tx_clean++;

1880
	entry = tx_q->dirty_tx;
1881
	while ((entry != tx_q->cur_tx) && (count < budget)) {
1882
		struct sk_buff *skb = tx_q->tx_skbuff[entry];
1883
		struct dma_desc *p;
1884
		int status;
1885 1886

		if (priv->extend_desc)
1887
			p = (struct dma_desc *)(tx_q->dma_etx + entry);
1888
		else
1889
			p = tx_q->dma_tx + entry;
1890

1891 1892
		status = stmmac_tx_status(priv, &priv->dev->stats,
				&priv->xstats, p, priv->ioaddr);
1893 1894 1895 1896
		/* Check if the descriptor is owned by the DMA */
		if (unlikely(status & tx_dma_own))
			break;

1897 1898
		count++;

1899 1900 1901 1902 1903
		/* Make sure descriptor fields are read after reading
		 * the own bit.
		 */
		dma_rmb();

1904 1905 1906 1907 1908 1909
		/* Just consider the last segment and ...*/
		if (likely(!(status & tx_not_ls))) {
			/* ... verify the status error condition */
			if (unlikely(status & tx_err)) {
				priv->dev->stats.tx_errors++;
			} else {
1910 1911
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
1912
			}
1913
			stmmac_get_tx_hwtstamp(priv, p, skb);
1914 1915
		}

1916 1917
		if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
			if (tx_q->tx_skbuff_dma[entry].map_as_page)
G
Giuseppe CAVALLARO 已提交
1918
				dma_unmap_page(priv->device,
1919 1920
					       tx_q->tx_skbuff_dma[entry].buf,
					       tx_q->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1921 1922 1923
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
1924 1925
						 tx_q->tx_skbuff_dma[entry].buf,
						 tx_q->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1926
						 DMA_TO_DEVICE);
1927 1928 1929
			tx_q->tx_skbuff_dma[entry].buf = 0;
			tx_q->tx_skbuff_dma[entry].len = 0;
			tx_q->tx_skbuff_dma[entry].map_as_page = false;
1930
		}
A
Alexandre TORGUE 已提交
1931

1932
		stmmac_clean_desc3(priv, tx_q, p);
A
Alexandre TORGUE 已提交
1933

1934 1935
		tx_q->tx_skbuff_dma[entry].last_segment = false;
		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1936 1937

		if (likely(skb != NULL)) {
B
Beniamino Galvani 已提交
1938 1939
			pkts_compl++;
			bytes_compl += skb->len;
1940
			dev_consume_skb_any(skb);
1941
			tx_q->tx_skbuff[entry] = NULL;
1942 1943
		}

1944
		stmmac_release_tx_desc(priv, p, priv->mode);
1945

1946
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1947
	}
1948
	tx_q->dirty_tx = entry;
B
Beniamino Galvani 已提交
1949

1950 1951 1952 1953 1954 1955
	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
				  pkts_compl, bytes_compl);

	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
								queue))) &&
	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
B
Beniamino Galvani 已提交
1956

1957 1958
		netif_dbg(priv, tx_done, priv->dev,
			  "%s: restart transmit\n", __func__);
1959
		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1960
	}
1961 1962 1963

	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
		stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
1964
		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1965
	}
1966

1967 1968 1969 1970
	/* We still have pending packets, let's call for a new scheduling */
	if (tx_q->dirty_tx != tx_q->cur_tx)
		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));

1971 1972 1973
	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));

	return count;
1974 1975 1976
}

/**
1977
 * stmmac_tx_err - to manage the tx error
1978
 * @priv: driver private structure
1979
 * @chan: channel index
1980
 * Description: it cleans the descriptors and restarts the transmission
1981
 * in case of transmission errors.
1982
 */
1983
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1984
{
1985
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1986
	int i;
1987

1988
	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1989

1990
	stmmac_stop_tx_dma(priv, chan);
1991
	dma_free_tx_skbufs(priv, chan);
1992
	for (i = 0; i < DMA_TX_SIZE; i++)
1993
		if (priv->extend_desc)
1994 1995
			stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
					priv->mode, (i == DMA_TX_SIZE - 1));
1996
		else
1997 1998
			stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
					priv->mode, (i == DMA_TX_SIZE - 1));
1999 2000
	tx_q->dirty_tx = 0;
	tx_q->cur_tx = 0;
2001
	tx_q->mss = 0;
2002
	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2003
	stmmac_start_tx_dma(priv, chan);
2004 2005

	priv->dev->stats.tx_errors++;
2006
	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2007 2008
}

2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
/**
 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
 *  @priv: driver private structure
 *  @txmode: TX operating mode
 *  @rxmode: RX operating mode
 *  @chan: channel index
 *  Description: it is used for configuring of the DMA operation mode in
 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
 *  mode.
 */
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
					  u32 rxmode, u32 chan)
{
2022 2023
	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2024 2025
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2026
	int rxfifosz = priv->plat->rx_fifo_size;
2027
	int txfifosz = priv->plat->tx_fifo_size;
2028 2029 2030

	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;
2031 2032 2033 2034 2035 2036
	if (txfifosz == 0)
		txfifosz = priv->dma_cap.tx_fifo_size;

	/* Adjust for real per queue fifo size */
	rxfifosz /= rx_channels_count;
	txfifosz /= tx_channels_count;
2037

2038 2039
	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2040 2041
}

2042 2043
static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
{
2044
	int ret;
2045

2046 2047 2048
	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
	if (ret && (ret != -EINVAL)) {
2049
		stmmac_global_err(priv);
2050 2051 2052 2053
		return true;
	}

	return false;
2054 2055
}

2056 2057 2058 2059 2060 2061
static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
{
	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
						 &priv->xstats, chan);
	struct stmmac_channel *ch = &priv->channel[chan];

2062
	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2063 2064 2065 2066 2067
		if (napi_schedule_prep(&ch->rx_napi)) {
			stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
			__napi_schedule_irqoff(&ch->rx_napi);
			status |= handle_tx;
		}
2068 2069
	}

2070
	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
2071
		napi_schedule_irqoff(&ch->tx_napi);
2072 2073 2074 2075

	return status;
}

2076
/**
2077
 * stmmac_dma_interrupt - DMA ISR
2078 2079
 * @priv: driver private structure
 * Description: this is the DMA ISR. It is called by the main ISR.
2080 2081
 * It calls the dwmac dma routine and schedule poll method in case of some
 * work can be done.
2082
 */
2083 2084
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
2085
	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2086 2087 2088
	u32 rx_channel_count = priv->plat->rx_queues_to_use;
	u32 channels_to_check = tx_channel_count > rx_channel_count ?
				tx_channel_count : rx_channel_count;
2089
	u32 chan;
K
Kees Cook 已提交
2090 2091 2092 2093 2094
	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];

	/* Make sure we never check beyond our status buffer. */
	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
		channels_to_check = ARRAY_SIZE(status);
2095 2096

	for (chan = 0; chan < channels_to_check; chan++)
2097
		status[chan] = stmmac_napi_check(priv, chan);
2098

2099 2100
	for (chan = 0; chan < tx_channel_count; chan++) {
		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
			/* Try to bump up the dma threshold on this failure */
			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
			    (tc <= 256)) {
				tc += 64;
				if (priv->plat->force_thresh_dma_mode)
					stmmac_set_dma_operation_mode(priv,
								      tc,
								      tc,
								      chan);
				else
					stmmac_set_dma_operation_mode(priv,
								    tc,
								    SF_DMA_MODE,
								    chan);
				priv->xstats.threshold = tc;
			}
2117
		} else if (unlikely(status[chan] == tx_hard_error)) {
2118
			stmmac_tx_err(priv, chan);
2119
		}
2120
	}
2121 2122
}

2123 2124 2125 2126 2127
/**
 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
 * @priv: driver private structure
 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
 */
2128 2129 2130
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2131
			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2132

2133
	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
G
Giuseppe CAVALLARO 已提交
2134 2135

	if (priv->dma_cap.rmon) {
2136
		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
G
Giuseppe CAVALLARO 已提交
2137 2138
		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
	} else
2139
		netdev_info(priv->dev, "No MAC Management Counters available\n");
2140 2141
}

2142
/**
2143
 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2144
 * @priv: driver private structure
2145 2146 2147 2148 2149
 * Description:
 *  new GMAC chip generations have a new register to indicate the
 *  presence of the optional feature/functions.
 *  This can be also used to override the value passed through the
 *  platform and necessary for old MAC10/100 and GMAC chips.
2150 2151 2152
 */
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
2153
	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2154 2155
}

2156
/**
2157
 * stmmac_check_ether_addr - check if the MAC addr is valid
2158 2159 2160 2161 2162
 * @priv: driver private structure
 * Description:
 * it is to verify if the MAC address is valid, in case of failures it
 * generates a random MAC address
 */
2163 2164 2165
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2166
		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
G
Giuseppe CAVALLARO 已提交
2167
		if (!is_valid_ether_addr(priv->dev->dev_addr))
2168
			eth_hw_addr_random(priv->dev);
2169 2170
		dev_info(priv->device, "device MAC address %pM\n",
			 priv->dev->dev_addr);
2171 2172 2173
	}
}

2174
/**
2175
 * stmmac_init_dma_engine - DMA init.
2176 2177 2178 2179 2180 2181
 * @priv: driver private structure
 * Description:
 * It inits the DMA invoking the specific MAC/GMAC callback.
 * Some DMA parameters can be passed from the platform;
 * in case of these are not passed a default is kept for the MAC or GMAC.
 */
2182 2183
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
2184 2185
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2186
	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2187
	struct stmmac_rx_queue *rx_q;
2188
	struct stmmac_tx_queue *tx_q;
2189
	u32 chan = 0;
2190
	int atds = 0;
2191
	int ret = 0;
2192

2193 2194
	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
		dev_err(priv->device, "Invalid DMA configuration\n");
2195
		return -EINVAL;
2196 2197
	}

2198 2199 2200
	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
		atds = 1;

2201
	ret = stmmac_reset(priv, priv->ioaddr);
2202 2203 2204 2205 2206
	if (ret) {
		dev_err(priv->device, "Failed to reset the dma\n");
		return ret;
	}

2207 2208 2209 2210 2211 2212
	/* DMA Configuration */
	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);

	if (priv->plat->axi)
		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);

2213 2214 2215 2216
	/* DMA CSR Channel configuration */
	for (chan = 0; chan < dma_csr_ch; chan++)
		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);

2217 2218 2219
	/* DMA RX Channel Configuration */
	for (chan = 0; chan < rx_channels_count; chan++) {
		rx_q = &priv->rx_queue[chan];
2220

2221 2222
		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
				    rx_q->dma_rx_phy, chan);
2223

2224 2225 2226 2227 2228
		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
			    (DMA_RX_SIZE * sizeof(struct dma_desc));
		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
				       rx_q->rx_tail_addr, chan);
	}
2229

2230 2231 2232
	/* DMA TX Channel Configuration */
	for (chan = 0; chan < tx_channels_count; chan++) {
		tx_q = &priv->tx_queue[chan];
2233

2234 2235
		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
				    tx_q->dma_tx_phy, chan);
2236

2237
		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2238 2239 2240
		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
				       tx_q->tx_tail_addr, chan);
	}
2241

2242
	return ret;
2243 2244
}

2245 2246 2247 2248 2249 2250 2251
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

	mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
}

2252
/**
2253
 * stmmac_tx_timer - mitigation sw timer for tx.
2254 2255 2256 2257
 * @data: data pointer
 * Description:
 * This is the timer handler to directly invoke the stmmac_tx_clean.
 */
2258
static void stmmac_tx_timer(struct timer_list *t)
2259
{
2260 2261 2262 2263 2264
	struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
	struct stmmac_priv *priv = tx_q->priv_data;
	struct stmmac_channel *ch;

	ch = &priv->channel[tx_q->queue_index];
2265

2266 2267 2268 2269 2270 2271 2272 2273
	/*
	 * If NAPI is already running we can miss some events. Let's rearm
	 * the timer and try again.
	 */
	if (likely(napi_schedule_prep(&ch->tx_napi)))
		__napi_schedule(&ch->tx_napi);
	else
		mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2274 2275 2276
}

/**
2277
 * stmmac_init_coalesce - init mitigation options.
2278
 * @priv: driver private structure
2279
 * Description:
2280
 * This inits the coalesce parameters: i.e. timer rate,
2281 2282 2283
 * timer handler and default threshold used for enabling the
 * interrupt on completion bit.
 */
2284
static void stmmac_init_coalesce(struct stmmac_priv *priv)
2285
{
2286 2287 2288
	u32 tx_channel_count = priv->plat->tx_queues_to_use;
	u32 chan;

2289 2290
	priv->tx_coal_frames = STMMAC_TX_FRAMES;
	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2291
	priv->rx_coal_frames = STMMAC_RX_FRAMES;
2292 2293 2294 2295 2296 2297

	for (chan = 0; chan < tx_channel_count; chan++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];

		timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
	}
2298 2299
}

2300 2301 2302 2303 2304 2305 2306
static void stmmac_set_rings_length(struct stmmac_priv *priv)
{
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
	u32 chan;

	/* set TX ring length */
2307 2308 2309
	for (chan = 0; chan < tx_channels_count; chan++)
		stmmac_set_tx_ring_len(priv, priv->ioaddr,
				(DMA_TX_SIZE - 1), chan);
2310 2311

	/* set RX ring length */
2312 2313 2314
	for (chan = 0; chan < rx_channels_count; chan++)
		stmmac_set_rx_ring_len(priv, priv->ioaddr,
				(DMA_RX_SIZE - 1), chan);
2315 2316
}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
/**
 *  stmmac_set_tx_queue_weight - Set TX queue weight
 *  @priv: driver private structure
 *  Description: It is used for setting TX queues weight
 */
static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 weight;
	u32 queue;

	for (queue = 0; queue < tx_queues_count; queue++) {
		weight = priv->plat->tx_queues_cfg[queue].weight;
2330
		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2331 2332 2333
	}
}

2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
/**
 *  stmmac_configure_cbs - Configure CBS in TX queue
 *  @priv: driver private structure
 *  Description: It is used for configuring CBS in AVB TX queues
 */
static void stmmac_configure_cbs(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 mode_to_use;
	u32 queue;

J
Joao Pinto 已提交
2345 2346
	/* queue 0 is reserved for legacy traffic */
	for (queue = 1; queue < tx_queues_count; queue++) {
2347 2348 2349 2350
		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
		if (mode_to_use == MTL_QUEUE_DCB)
			continue;

2351
		stmmac_config_cbs(priv, priv->hw,
2352 2353 2354 2355 2356 2357 2358 2359
				priv->plat->tx_queues_cfg[queue].send_slope,
				priv->plat->tx_queues_cfg[queue].idle_slope,
				priv->plat->tx_queues_cfg[queue].high_credit,
				priv->plat->tx_queues_cfg[queue].low_credit,
				queue);
	}
}

2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
/**
 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
 *  @priv: driver private structure
 *  Description: It is used for mapping RX queues to RX dma channels
 */
static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 chan;

	for (queue = 0; queue < rx_queues_count; queue++) {
		chan = priv->plat->rx_queues_cfg[queue].chan;
2373
		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2374 2375 2376
	}
}

2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
/**
 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
 *  @priv: driver private structure
 *  Description: It is used for configuring the RX Queue Priority
 */
static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 prio;

	for (queue = 0; queue < rx_queues_count; queue++) {
		if (!priv->plat->rx_queues_cfg[queue].use_prio)
			continue;

		prio = priv->plat->rx_queues_cfg[queue].prio;
2393
		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412
	}
}

/**
 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
 *  @priv: driver private structure
 *  Description: It is used for configuring the TX Queue Priority
 */
static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 queue;
	u32 prio;

	for (queue = 0; queue < tx_queues_count; queue++) {
		if (!priv->plat->tx_queues_cfg[queue].use_prio)
			continue;

		prio = priv->plat->tx_queues_cfg[queue].prio;
2413
		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2414 2415 2416
	}
}

2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
/**
 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
 *  @priv: driver private structure
 *  Description: It is used for configuring the RX queue routing
 */
static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u8 packet;

	for (queue = 0; queue < rx_queues_count; queue++) {
		/* no specific packet type routing specified for the queue */
		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
			continue;

		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2434
		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2435 2436 2437
	}
}

2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
static void stmmac_mac_config_rss(struct stmmac_priv *priv)
{
	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
		priv->rss.enable = false;
		return;
	}

	if (priv->dev->features & NETIF_F_RXHASH)
		priv->rss.enable = true;
	else
		priv->rss.enable = false;

	stmmac_rss_configure(priv, priv->hw, &priv->rss,
			     priv->plat->rx_queues_to_use);
}

2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
/**
 *  stmmac_mtl_configuration - Configure MTL
 *  @priv: driver private structure
 *  Description: It is used for configurring MTL
 */
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 tx_queues_count = priv->plat->tx_queues_to_use;

2464
	if (tx_queues_count > 1)
2465 2466
		stmmac_set_tx_queue_weight(priv);

2467
	/* Configure MTL RX algorithms */
2468 2469 2470
	if (rx_queues_count > 1)
		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
				priv->plat->rx_sched_algorithm);
2471 2472

	/* Configure MTL TX algorithms */
2473 2474 2475
	if (tx_queues_count > 1)
		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
				priv->plat->tx_sched_algorithm);
2476

2477
	/* Configure CBS in AVB TX queues */
2478
	if (tx_queues_count > 1)
2479 2480
		stmmac_configure_cbs(priv);

2481
	/* Map RX MTL to DMA channels */
2482
	stmmac_rx_queue_dma_chan_map(priv);
2483

2484
	/* Enable MAC RX Queues */
2485
	stmmac_mac_enable_rx_queues(priv);
2486

2487
	/* Set RX priorities */
2488
	if (rx_queues_count > 1)
2489 2490 2491
		stmmac_mac_config_rx_queues_prio(priv);

	/* Set TX priorities */
2492
	if (tx_queues_count > 1)
2493
		stmmac_mac_config_tx_queues_prio(priv);
2494 2495

	/* Set RX routing */
2496
	if (rx_queues_count > 1)
2497
		stmmac_mac_config_rx_queues_routing(priv);
2498 2499 2500 2501

	/* Receive Side Scaling */
	if (rx_queues_count > 1)
		stmmac_mac_config_rss(priv);
2502 2503
}

2504 2505
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
{
2506
	if (priv->dma_cap.asp) {
2507
		netdev_info(priv->dev, "Enabling Safety Features\n");
2508
		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2509 2510 2511 2512 2513
	} else {
		netdev_info(priv->dev, "No Safety Features support found\n");
	}
}

2514
/**
2515
 * stmmac_hw_setup - setup mac in a usable state.
2516 2517
 *  @dev : pointer to the device structure.
 *  Description:
2518 2519 2520 2521
 *  this is the main function to setup the HW in a usable state because the
 *  dma engine is reset, the core registers are configured (e.g. AXI,
 *  Checksum features, timers). The DMA is ready to start receiving and
 *  transmitting.
2522 2523 2524 2525
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
2526
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2527 2528
{
	struct stmmac_priv *priv = netdev_priv(dev);
2529
	u32 rx_cnt = priv->plat->rx_queues_to_use;
2530 2531
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 chan;
2532 2533 2534 2535 2536
	int ret;

	/* DMA initialization and SW reset */
	ret = stmmac_init_dma_engine(priv);
	if (ret < 0) {
2537 2538
		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
			   __func__);
2539 2540 2541 2542
		return ret;
	}

	/* Copy the MAC addr into the HW  */
2543
	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2544

2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557
	/* PS and related bits will be programmed according to the speed */
	if (priv->hw->pcs) {
		int speed = priv->plat->mac_port_sel_speed;

		if ((speed == SPEED_10) || (speed == SPEED_100) ||
		    (speed == SPEED_1000)) {
			priv->hw->ps = speed;
		} else {
			dev_warn(priv->device, "invalid port speed\n");
			priv->hw->ps = 0;
		}
	}

2558
	/* Initialize the MAC Core */
2559
	stmmac_core_init(priv, priv->hw, dev);
2560

2561
	/* Initialize MTL*/
2562
	stmmac_mtl_configuration(priv);
J
jpinto 已提交
2563

2564
	/* Initialize Safety Features */
2565
	stmmac_safety_feat_configuration(priv);
2566

2567
	ret = stmmac_rx_ipc(priv, priv->hw);
2568
	if (!ret) {
2569
		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2570
		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2571
		priv->hw->rx_csum = 0;
2572 2573
	}

2574
	/* Enable the MAC Rx/Tx */
2575
	stmmac_mac_set(priv, priv->ioaddr, true);
2576

2577 2578 2579
	/* Set the HW DMA mode and the COE */
	stmmac_dma_operation_mode(priv);

2580 2581
	stmmac_mmc_setup(priv);

2582
	if (init_ptp) {
2583 2584 2585 2586
		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
		if (ret < 0)
			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);

2587
		ret = stmmac_init_ptp(priv);
2588 2589 2590 2591
		if (ret == -EOPNOTSUPP)
			netdev_warn(priv->dev, "PTP not supported by HW\n");
		else if (ret)
			netdev_warn(priv->dev, "PTP init failed\n");
2592
	}
2593 2594 2595

	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;

2596
	if (priv->use_riwt) {
2597
		ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
2598
		if (!ret)
2599
			priv->rx_riwt = MIN_DMA_RIWT;
2600 2601
	}

2602 2603
	if (priv->hw->pcs)
		stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2604

2605 2606 2607
	/* set TX and RX rings length */
	stmmac_set_rings_length(priv);

A
Alexandre TORGUE 已提交
2608
	/* Enable TSO */
2609 2610
	if (priv->tso) {
		for (chan = 0; chan < tx_cnt; chan++)
2611
			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2612
	}
A
Alexandre TORGUE 已提交
2613

2614 2615 2616 2617 2618 2619
	/* Enable Split Header */
	if (priv->sph && priv->hw->rx_csum) {
		for (chan = 0; chan < rx_cnt; chan++)
			stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
	}

2620 2621 2622 2623
	/* VLAN Tag Insertion */
	if (priv->dma_cap.vlins)
		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);

2624 2625 2626
	/* Start the ball rolling... */
	stmmac_start_all_dma(priv);

2627 2628 2629
	return 0;
}

2630 2631 2632 2633 2634 2635 2636
static void stmmac_hw_teardown(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	clk_disable_unprepare(priv->plat->clk_ptp_ref);
}

2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
/**
 *  stmmac_open - open entry point of the driver
 *  @dev : pointer to the device structure.
 *  Description:
 *  This function is the open entry point of the driver.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_open(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2649
	u32 chan;
2650 2651
	int ret;

2652 2653 2654
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
2655 2656
		ret = stmmac_init_phy(dev);
		if (ret) {
2657 2658 2659
			netdev_err(priv->dev,
				   "%s: Cannot attach to PHY (error: %d)\n",
				   __func__, ret);
2660
			return ret;
2661
		}
2662
	}
2663

2664 2665 2666 2667
	/* Extra statistics */
	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
	priv->xstats.threshold = tc;

2668
	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2669
	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2670

2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	ret = alloc_dma_desc_resources(priv);
	if (ret < 0) {
		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
			   __func__);
		goto dma_desc_error;
	}

	ret = init_dma_desc_rings(dev, GFP_KERNEL);
	if (ret < 0) {
		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
			   __func__);
		goto init_error;
	}

2685
	ret = stmmac_hw_setup(dev, true);
2686
	if (ret < 0) {
2687
		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2688
		goto init_error;
2689 2690
	}

2691
	stmmac_init_coalesce(priv);
2692

2693
	phylink_start(priv->phylink);
2694

2695 2696
	/* Request the IRQ lines */
	ret = request_irq(dev->irq, stmmac_interrupt,
G
Giuseppe CAVALLARO 已提交
2697
			  IRQF_SHARED, dev->name, dev);
2698
	if (unlikely(ret < 0)) {
2699 2700 2701
		netdev_err(priv->dev,
			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
			   __func__, dev->irq, ret);
2702
		goto irq_error;
2703 2704
	}

2705 2706 2707 2708 2709
	/* Request the Wake IRQ in case of another line is used for WoL */
	if (priv->wol_irq != dev->irq) {
		ret = request_irq(priv->wol_irq, stmmac_interrupt,
				  IRQF_SHARED, dev->name, dev);
		if (unlikely(ret < 0)) {
2710 2711 2712
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
				   __func__, priv->wol_irq, ret);
2713
			goto wolirq_error;
2714 2715 2716
		}
	}

2717
	/* Request the IRQ lines */
2718
	if (priv->lpi_irq > 0) {
2719 2720 2721
		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
				  dev->name, dev);
		if (unlikely(ret < 0)) {
2722 2723 2724
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
				   __func__, priv->lpi_irq, ret);
2725
			goto lpiirq_error;
2726 2727 2728
		}
	}

2729 2730
	stmmac_enable_all_queues(priv);
	stmmac_start_all_queues(priv);
2731

2732
	return 0;
2733

2734
lpiirq_error:
2735 2736
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2737
wolirq_error:
2738
	free_irq(dev->irq, dev);
2739
irq_error:
2740
	phylink_stop(priv->phylink);
2741

2742 2743 2744
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);

2745
	stmmac_hw_teardown(dev);
2746 2747
init_error:
	free_dma_desc_resources(priv);
2748
dma_desc_error:
2749
	phylink_disconnect_phy(priv->phylink);
2750
	return ret;
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
}

/**
 *  stmmac_release - close entry point of the driver
 *  @dev : device pointer.
 *  Description:
 *  This is the stop entry point of the driver.
 */
static int stmmac_release(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2762
	u32 chan;
2763

2764 2765 2766
	if (priv->eee_enabled)
		del_timer_sync(&priv->eee_ctrl_timer);

2767
	/* Stop and disconnect the PHY */
2768 2769
	phylink_stop(priv->phylink);
	phylink_disconnect_phy(priv->phylink);
2770

2771
	stmmac_stop_all_queues(priv);
2772

2773
	stmmac_disable_all_queues(priv);
2774

2775 2776
	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
		del_timer_sync(&priv->tx_queue[chan].txtimer);
2777

2778 2779
	/* Free the IRQ lines */
	free_irq(dev->irq, dev);
2780 2781
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2782
	if (priv->lpi_irq > 0)
2783
		free_irq(priv->lpi_irq, dev);
2784 2785

	/* Stop TX/RX DMA and clear the descriptors */
2786
	stmmac_stop_all_dma(priv);
2787 2788 2789 2790

	/* Release and free the Rx/Tx resources */
	free_dma_desc_resources(priv);

2791
	/* Disable the MAC Rx/Tx */
2792
	stmmac_mac_set(priv, priv->ioaddr, false);
2793 2794 2795

	netif_carrier_off(dev);

2796 2797
	stmmac_release_ptp(priv);

2798 2799 2800
	return 0;
}

2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
			       struct stmmac_tx_queue *tx_q)
{
	u16 tag = 0x0, inner_tag = 0x0;
	u32 inner_type = 0x0;
	struct dma_desc *p;

	if (!priv->dma_cap.vlins)
		return false;
	if (!skb_vlan_tag_present(skb))
		return false;
	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
		inner_tag = skb_vlan_tag_get(skb);
		inner_type = STMMAC_VLAN_INSERT;
	}

	tag = skb_vlan_tag_get(skb);

	p = tx_q->dma_tx + tx_q->cur_tx;
	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
		return false;

	stmmac_set_tx_owner(priv, p);
	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
	return true;
}

A
Alexandre TORGUE 已提交
2828 2829 2830 2831 2832 2833
/**
 *  stmmac_tso_allocator - close entry point of the driver
 *  @priv: driver private structure
 *  @des: buffer start address
 *  @total_len: total length to fill in descriptors
 *  @last_segmant: condition for the last descriptor
2834
 *  @queue: TX queue index
A
Alexandre TORGUE 已提交
2835 2836 2837 2838
 *  Description:
 *  This function fills descriptor and request new descriptors according to
 *  buffer length to fill
 */
2839
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2840
				 int total_len, bool last_segment, u32 queue)
A
Alexandre TORGUE 已提交
2841
{
2842
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
A
Alexandre TORGUE 已提交
2843
	struct dma_desc *desc;
2844
	u32 buff_size;
2845
	int tmp_len;
A
Alexandre TORGUE 已提交
2846 2847 2848 2849

	tmp_len = total_len;

	while (tmp_len > 0) {
2850 2851
		dma_addr_t curr_addr;

2852
		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2853
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2854
		desc = tx_q->dma_tx + tx_q->cur_tx;
A
Alexandre TORGUE 已提交
2855

2856 2857 2858 2859 2860 2861
		curr_addr = des + (total_len - tmp_len);
		if (priv->dma_cap.addr64 <= 32)
			desc->des0 = cpu_to_le32(curr_addr);
		else
			stmmac_set_desc_addr(priv, desc, curr_addr);

A
Alexandre TORGUE 已提交
2862 2863 2864
		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
			    TSO_MAX_BUFF_SIZE : tmp_len;

2865 2866 2867 2868
		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
				0, 1,
				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
				0, 0);
A
Alexandre TORGUE 已提交
2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902

		tmp_len -= TSO_MAX_BUFF_SIZE;
	}
}

/**
 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
 *  @skb : the socket buffer
 *  @dev : device pointer
 *  Description: this is the transmit function that is called on TSO frames
 *  (support available on GMAC4 and newer chips).
 *  Diagram below show the ring programming in case of TSO frames:
 *
 *  First Descriptor
 *   --------
 *   | DES0 |---> buffer1 = L2/L3/L4 header
 *   | DES1 |---> TCP Payload (can continue on next descr...)
 *   | DES2 |---> buffer 1 and 2 len
 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
 *   --------
 *	|
 *     ...
 *	|
 *   --------
 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
 *   | DES1 | --|
 *   | DES2 | --> buffer 1 and 2 len
 *   | DES3 |
 *   --------
 *
 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
 */
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
2903
	struct dma_desc *desc, *first, *mss_desc = NULL;
A
Alexandre TORGUE 已提交
2904 2905
	struct stmmac_priv *priv = netdev_priv(dev);
	int nfrags = skb_shinfo(skb)->nr_frags;
2906 2907
	u32 queue = skb_get_queue_mapping(skb);
	struct stmmac_tx_queue *tx_q;
2908
	unsigned int first_entry;
2909 2910
	int tmp_pay_len = 0;
	u32 pay_len, mss;
A
Alexandre TORGUE 已提交
2911
	u8 proto_hdr_len;
2912
	dma_addr_t des;
2913
	bool has_vlan;
A
Alexandre TORGUE 已提交
2914 2915
	int i;

2916 2917
	tx_q = &priv->tx_queue[queue];

A
Alexandre TORGUE 已提交
2918 2919 2920 2921
	/* Compute header lengths */
	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	/* Desc availability based on threshold should be enough safe */
2922
	if (unlikely(stmmac_tx_avail(priv, queue) <
A
Alexandre TORGUE 已提交
2923
		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2924 2925 2926
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
								queue));
A
Alexandre TORGUE 已提交
2927
			/* This is a hard error, log it. */
2928 2929 2930
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
A
Alexandre TORGUE 已提交
2931 2932 2933 2934 2935 2936 2937 2938 2939
		}
		return NETDEV_TX_BUSY;
	}

	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */

	mss = skb_shinfo(skb)->gso_size;

	/* set new MSS value if needed */
2940
	if (mss != tx_q->mss) {
2941
		mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2942
		stmmac_set_mss(priv, mss_desc, mss);
2943
		tx_q->mss = mss;
2944
		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2945
		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
A
Alexandre TORGUE 已提交
2946 2947 2948 2949 2950 2951 2952 2953 2954
	}

	if (netif_msg_tx_queued(priv)) {
		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
			skb->data_len);
	}

2955 2956 2957
	/* Check if VLAN can be inserted by HW */
	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);

2958
	first_entry = tx_q->cur_tx;
2959
	WARN_ON(tx_q->tx_skbuff[first_entry]);
A
Alexandre TORGUE 已提交
2960

2961
	desc = tx_q->dma_tx + first_entry;
A
Alexandre TORGUE 已提交
2962 2963
	first = desc;

2964 2965 2966
	if (has_vlan)
		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);

A
Alexandre TORGUE 已提交
2967 2968 2969 2970 2971 2972
	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, des))
		goto dma_map_err;

2973 2974
	tx_q->tx_skbuff_dma[first_entry].buf = des;
	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
A
Alexandre TORGUE 已提交
2975

2976 2977
	if (priv->dma_cap.addr64 <= 32) {
		first->des0 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2978

2979 2980 2981
		/* Fill start of payload in buff2 of first descriptor */
		if (pay_len)
			first->des1 = cpu_to_le32(des + proto_hdr_len);
A
Alexandre TORGUE 已提交
2982

2983 2984 2985 2986 2987 2988
		/* If needed take extra descriptors to fill the remaining payload */
		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
	} else {
		stmmac_set_desc_addr(priv, first, des);
		tmp_pay_len = pay_len;
	}
A
Alexandre TORGUE 已提交
2989

2990
	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
A
Alexandre TORGUE 已提交
2991 2992 2993 2994 2995 2996 2997 2998

	/* Prepare fragments */
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(priv->device, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
2999 3000
		if (dma_mapping_error(priv->device, des))
			goto dma_map_err;
A
Alexandre TORGUE 已提交
3001 3002

		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3003
				     (i == nfrags - 1), queue);
A
Alexandre TORGUE 已提交
3004

3005 3006 3007
		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
A
Alexandre TORGUE 已提交
3008 3009
	}

3010
	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
A
Alexandre TORGUE 已提交
3011

3012 3013 3014 3015 3016 3017 3018 3019
	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[tx_q->cur_tx] = skb;

	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
3020
	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
A
Alexandre TORGUE 已提交
3021

3022
	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3023 3024
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
3025
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
A
Alexandre TORGUE 已提交
3026 3027 3028 3029 3030 3031 3032
	}

	dev->stats.tx_bytes += skb->len;
	priv->xstats.tx_tso_frames++;
	priv->xstats.tx_tso_nfrags += nfrags;

	/* Manage tx mitigation */
3033
	tx_q->tx_count_frames += nfrags + 1;
3034 3035 3036 3037 3038 3039 3040
	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
	    !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
	    priv->hwts_tx_en)) {
		stmmac_tx_timer_arm(priv, queue);
	} else {
		tx_q->tx_count_frames = 0;
3041
		stmmac_set_tx_ic(priv, desc);
A
Alexandre TORGUE 已提交
3042 3043 3044
		priv->xstats.tx_set_ic_bit++;
	}

3045 3046 3047
	if (priv->sarc_type)
		stmmac_set_desc_sarc(priv, first, priv->sarc_type);

3048
	skb_tx_timestamp(skb);
A
Alexandre TORGUE 已提交
3049 3050 3051 3052 3053

	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		     priv->hwts_tx_en)) {
		/* declare that device is doing timestamping */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3054
		stmmac_enable_tx_timestamp(priv, first);
A
Alexandre TORGUE 已提交
3055 3056 3057
	}

	/* Complete the first descriptor before granting the DMA */
3058
	stmmac_prepare_tso_tx_desc(priv, first, 1,
A
Alexandre TORGUE 已提交
3059 3060
			proto_hdr_len,
			pay_len,
3061
			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
A
Alexandre TORGUE 已提交
3062 3063 3064
			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));

	/* If context desc is used to change MSS */
3065 3066 3067 3068 3069 3070 3071
	if (mss_desc) {
		/* Make sure that first descriptor has been completely
		 * written, including its own bit. This is because MSS is
		 * actually before first descriptor, so we need to make
		 * sure that MSS's own bit is the last thing written.
		 */
		dma_wmb();
3072
		stmmac_set_tx_owner(priv, mss_desc);
3073
	}
A
Alexandre TORGUE 已提交
3074 3075 3076 3077 3078

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
3079
	wmb();
A
Alexandre TORGUE 已提交
3080 3081 3082

	if (netif_msg_pktdata(priv)) {
		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3083 3084
			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
			tx_q->cur_tx, first, nfrags);
A
Alexandre TORGUE 已提交
3085

3086
		stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
A
Alexandre TORGUE 已提交
3087 3088 3089 3090 3091

		pr_info(">>> frame to be transmitted: ");
		print_pkt(skb->data, skb_headlen(skb));
	}

3092
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
A
Alexandre TORGUE 已提交
3093

3094
	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3095
	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
A
Alexandre TORGUE 已提交
3096 3097 3098 3099 3100 3101 3102 3103 3104 3105

	return NETDEV_TX_OK;

dma_map_err:
	dev_err(priv->device, "Tx dma map failed\n");
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}

3106
/**
3107
 *  stmmac_xmit - Tx entry point of the driver
3108 3109
 *  @skb : the socket buffer
 *  @dev : device pointer
3110 3111 3112
 *  Description : this is the tx entry point of the driver.
 *  It programs the chain or the ring and supports oversized frames
 *  and SG feature.
3113 3114 3115 3116
 */
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
3117
	unsigned int nopaged_len = skb_headlen(skb);
3118
	int i, csum_insertion = 0, is_jumbo = 0;
3119
	u32 queue = skb_get_queue_mapping(skb);
3120 3121
	int nfrags = skb_shinfo(skb)->nr_frags;
	struct dma_desc *desc, *first;
3122
	struct stmmac_tx_queue *tx_q;
3123
	unsigned int first_entry;
3124
	unsigned int enh_desc;
3125
	dma_addr_t des;
3126
	bool has_vlan;
3127
	int entry;
A
Alexandre TORGUE 已提交
3128

3129 3130
	tx_q = &priv->tx_queue[queue];

3131 3132 3133
	if (priv->tx_path_in_lpi_mode)
		stmmac_disable_eee_mode(priv);

A
Alexandre TORGUE 已提交
3134 3135
	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb) && priv->tso) {
3136
		if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
A
Alexandre TORGUE 已提交
3137 3138
			return stmmac_tso_xmit(skb, dev);
	}
3139

3140
	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3141 3142 3143
		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
								queue));
3144
			/* This is a hard error, log it. */
3145 3146 3147
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
3148 3149 3150 3151
		}
		return NETDEV_TX_BUSY;
	}

3152 3153 3154
	/* Check if VLAN can be inserted by HW */
	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);

3155
	entry = tx_q->cur_tx;
3156
	first_entry = entry;
3157
	WARN_ON(tx_q->tx_skbuff[first_entry]);
3158

3159
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3160

3161
	if (likely(priv->extend_desc))
3162
		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3163
	else
3164
		desc = tx_q->dma_tx + entry;
3165

3166 3167
	first = desc;

3168 3169 3170
	if (has_vlan)
		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);

3171
	enh_desc = priv->plat->enh_desc;
3172
	/* To program the descriptors according to the size of the frame */
G
Giuseppe CAVALLARO 已提交
3173
	if (enh_desc)
3174
		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
G
Giuseppe CAVALLARO 已提交
3175

3176
	if (unlikely(is_jumbo)) {
3177
		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3178
		if (unlikely(entry < 0) && (entry != -EINVAL))
G
Giuseppe CAVALLARO 已提交
3179
			goto dma_map_err;
G
Giuseppe CAVALLARO 已提交
3180
	}
3181 3182

	for (i = 0; i < nfrags; i++) {
E
Eric Dumazet 已提交
3183 3184
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);
3185
		bool last_segment = (i == (nfrags - 1));
3186

3187
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3188
		WARN_ON(tx_q->tx_skbuff[entry]);
3189

3190
		if (likely(priv->extend_desc))
3191
			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3192
		else
3193
			desc = tx_q->dma_tx + entry;
3194

A
Alexandre TORGUE 已提交
3195 3196 3197
		des = skb_frag_dma_map(priv->device, frag, 0, len,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
G
Giuseppe CAVALLARO 已提交
3198 3199
			goto dma_map_err; /* should reuse desc w/o issues */

3200
		tx_q->tx_skbuff_dma[entry].buf = des;
3201 3202

		stmmac_set_desc_addr(priv, desc, des);
A
Alexandre TORGUE 已提交
3203

3204 3205 3206
		tx_q->tx_skbuff_dma[entry].map_as_page = true;
		tx_q->tx_skbuff_dma[entry].len = len;
		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3207 3208

		/* Prepare the descriptor and set the own bit too */
3209 3210
		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
				priv->mode, 1, last_segment, skb->len);
3211 3212
	}

3213 3214
	/* Only the last descriptor gets to point to the skb. */
	tx_q->tx_skbuff[entry] = skb;
3215

3216 3217 3218 3219 3220 3221
	/* We've used all descriptors we need for this skb, however,
	 * advance cur_tx so that it references a fresh descriptor.
	 * ndo_start_xmit will fill this descriptor the next time it's
	 * called and stmmac_tx_clean may clean up to this descriptor.
	 */
	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3222
	tx_q->cur_tx = entry;
3223 3224

	if (netif_msg_pktdata(priv)) {
3225 3226
		void *tx_head;

3227 3228
		netdev_dbg(priv->dev,
			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3229
			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3230
			   entry, first, nfrags);
3231

3232
		if (priv->extend_desc)
3233
			tx_head = (void *)tx_q->dma_etx;
3234
		else
3235
			tx_head = (void *)tx_q->dma_tx;
3236

3237
		stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3238

3239
		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3240 3241
		print_pkt(skb->data, skb->len);
	}
3242

3243
	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3244 3245
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
3246
		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3247 3248 3249 3250
	}

	dev->stats.tx_bytes += skb->len;

3251 3252 3253 3254 3255
	/* According to the coalesce parameter the IC bit for the latest
	 * segment is reset and the timer re-started to clean the tx status.
	 * This approach takes care about the fragments: desc is the first
	 * element in case of no SG.
	 */
3256
	tx_q->tx_count_frames += nfrags + 1;
3257 3258 3259 3260 3261 3262 3263
	if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
	    !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
	    (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
	    priv->hwts_tx_en)) {
		stmmac_tx_timer_arm(priv, queue);
	} else {
		tx_q->tx_count_frames = 0;
3264
		stmmac_set_tx_ic(priv, desc);
3265
		priv->xstats.tx_set_ic_bit++;
3266 3267
	}

3268 3269 3270
	if (priv->sarc_type)
		stmmac_set_desc_sarc(priv, first, priv->sarc_type);

3271
	skb_tx_timestamp(skb);
3272

3273 3274 3275 3276 3277 3278 3279
	/* Ready to fill the first descriptor and set the OWN bit w/o any
	 * problems because all the descriptors are actually ready to be
	 * passed to the DMA engine.
	 */
	if (likely(!is_jumbo)) {
		bool last_segment = (nfrags == 0);

A
Alexandre TORGUE 已提交
3280 3281 3282
		des = dma_map_single(priv->device, skb->data,
				     nopaged_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
3283 3284
			goto dma_map_err;

3285
		tx_q->tx_skbuff_dma[first_entry].buf = des;
3286 3287

		stmmac_set_desc_addr(priv, first, des);
A
Alexandre TORGUE 已提交
3288

3289 3290
		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3291 3292 3293 3294 3295

		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
			     priv->hwts_tx_en)) {
			/* declare that device is doing timestamping */
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3296
			stmmac_enable_tx_timestamp(priv, first);
3297 3298 3299
		}

		/* Prepare the first descriptor setting the OWN bit too */
3300 3301 3302
		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
				csum_insertion, priv->mode, 1, last_segment,
				skb->len);
3303 3304
	} else {
		stmmac_set_tx_owner(priv, first);
3305 3306
	}

3307 3308 3309 3310 3311 3312
	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
	wmb();

3313
	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
A
Alexandre TORGUE 已提交
3314

3315
	stmmac_enable_dma_transmission(priv, priv->ioaddr);
3316

3317
	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3318
	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3319

G
Giuseppe CAVALLARO 已提交
3320
	return NETDEV_TX_OK;
3321

G
Giuseppe CAVALLARO 已提交
3322
dma_map_err:
3323
	netdev_err(priv->dev, "Tx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
3324 3325
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
3326 3327 3328
	return NETDEV_TX_OK;
}

3329 3330
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
3331 3332
	struct vlan_ethhdr *veth;
	__be16 vlan_proto;
3333 3334
	u16 vlanid;

3335 3336 3337 3338 3339 3340 3341
	veth = (struct vlan_ethhdr *)skb->data;
	vlan_proto = veth->h_vlan_proto;

	if ((vlan_proto == htons(ETH_P_8021Q) &&
	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
	    (vlan_proto == htons(ETH_P_8021AD) &&
	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3342
		/* pop the vlan tag */
3343 3344
		vlanid = ntohs(veth->h_vlan_TCI);
		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3345
		skb_pull(skb, VLAN_HLEN);
3346
		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3347 3348 3349 3350
	}
}


3351
static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3352
{
3353
	if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3354 3355 3356 3357 3358
		return 0;

	return 1;
}

3359
/**
3360
 * stmmac_rx_refill - refill used skb preallocated buffers
3361
 * @priv: driver private structure
3362
 * @queue: RX queue index
3363 3364 3365
 * Description : this is to reallocate the skb for the reception process
 * that is based on zero-copy.
 */
3366
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3367
{
3368
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3369
	int len, dirty = stmmac_rx_dirty(priv, queue);
3370 3371
	unsigned int entry = rx_q->dirty_rx;

3372 3373
	len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;

3374
	while (dirty-- > 0) {
3375
		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3376
		struct dma_desc *p;
3377
		bool use_rx_wd;
3378 3379

		if (priv->extend_desc)
3380
			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3381
		else
3382
			p = rx_q->dma_rx + entry;
3383

3384 3385 3386
		if (!buf->page) {
			buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
			if (!buf->page)
G
Giuseppe CAVALLARO 已提交
3387
				break;
3388
		}
3389

3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400
		if (priv->sph && !buf->sec_page) {
			buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
			if (!buf->sec_page)
				break;

			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);

			dma_sync_single_for_device(priv->device, buf->sec_addr,
						   len, DMA_FROM_DEVICE);
		}

3401
		buf->addr = page_pool_get_dma_addr(buf->page);
3402 3403 3404 3405 3406 3407 3408

		/* Sync whole allocation to device. This will invalidate old
		 * data.
		 */
		dma_sync_single_for_device(priv->device, buf->addr, len,
					   DMA_FROM_DEVICE);

3409
		stmmac_set_desc_addr(priv, p, buf->addr);
3410
		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3411
		stmmac_refill_desc3(priv, rx_q, p);
A
Alexandre TORGUE 已提交
3412

3413 3414 3415 3416
		rx_q->rx_count_frames++;
		rx_q->rx_count_frames %= priv->rx_coal_frames;
		use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;

P
Pavel Machek 已提交
3417
		dma_wmb();
3418
		stmmac_set_rx_owner(priv, p, use_rx_wd);
3419 3420

		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3421
	}
3422
	rx_q->dirty_rx = entry;
3423 3424
	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
			    (rx_q->dirty_rx * sizeof(struct dma_desc));
3425
	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3426 3427
}

3428
/**
3429
 * stmmac_rx - manage the receive process
3430
 * @priv: driver private structure
3431 3432
 * @limit: napi bugget
 * @queue: RX queue index.
3433 3434 3435
 * Description :  this the function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
3436
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3437
{
3438
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3439
	struct stmmac_channel *ch = &priv->channel[queue];
3440 3441
	unsigned int count = 0, error = 0, len = 0;
	int status = 0, coe = priv->hw->rx_csum;
3442
	unsigned int next_entry = rx_q->cur_rx;
3443
	struct sk_buff *skb = NULL;
3444

3445
	if (netif_msg_rx_status(priv)) {
3446 3447
		void *rx_head;

3448
		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3449
		if (priv->extend_desc)
3450
			rx_head = (void *)rx_q->dma_erx;
3451
		else
3452
			rx_head = (void *)rx_q->dma_rx;
3453

3454
		stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3455
	}
3456
	while (count < limit) {
3457
		unsigned int hlen = 0, prev_len = 0;
3458
		enum pkt_hash_types hash_type;
3459 3460
		struct stmmac_rx_buffer *buf;
		struct dma_desc *np, *p;
3461
		unsigned int sec_len;
3462 3463
		int entry;
		u32 hash;
3464

3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479
		if (!count && rx_q->state_saved) {
			skb = rx_q->state.skb;
			error = rx_q->state.error;
			len = rx_q->state.len;
		} else {
			rx_q->state_saved = false;
			skb = NULL;
			error = 0;
			len = 0;
		}

		if (count >= limit)
			break;

read_again:
3480
		sec_len = 0;
3481
		entry = next_entry;
3482
		buf = &rx_q->buf_pool[entry];
3483

3484
		if (priv->extend_desc)
3485
			p = (struct dma_desc *)(rx_q->dma_erx + entry);
3486
		else
3487
			p = rx_q->dma_rx + entry;
3488

3489
		/* read the status of the incoming frame */
3490 3491
		status = stmmac_rx_status(priv, &priv->dev->stats,
				&priv->xstats, p);
3492 3493
		/* check if managed by the DMA otherwise go ahead */
		if (unlikely(status & dma_own))
3494 3495 3496 3497
			break;

		count++;

3498 3499
		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
		next_entry = rx_q->cur_rx;
3500

3501
		if (priv->extend_desc)
3502
			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3503
		else
3504
			np = rx_q->dma_rx + next_entry;
3505 3506

		prefetch(np);
3507
		prefetch(page_address(buf->page));
3508

3509 3510 3511
		if (priv->extend_desc)
			stmmac_rx_extended_status(priv, &priv->dev->stats,
					&priv->xstats, rx_q->dma_erx + entry);
3512
		if (unlikely(status == discard_frame)) {
3513 3514
			page_pool_recycle_direct(rx_q->page_pool, buf->page);
			buf->page = NULL;
3515
			error = 1;
3516 3517
			if (!priv->hwts_rx_en)
				priv->dev->stats.rx_errors++;
3518 3519 3520 3521 3522
		}

		if (unlikely(error && (status & rx_not_ls)))
			goto read_again;
		if (unlikely(error)) {
3523
			dev_kfree_skb(skb);
3524 3525 3526 3527 3528 3529 3530
			continue;
		}

		/* Buffer is good. Go on. */

		if (likely(status & rx_not_ls)) {
			len += priv->dma_buf_sz;
3531
		} else {
3532 3533
			prev_len = len;
			len = stmmac_get_rx_frame_len(priv, p, coe);
3534

3535
			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
G
Giuseppe CAVALLARO 已提交
3536
			 * Type frames (LLC/LLC-SNAP)
3537 3538 3539 3540
			 *
			 * llc_snap is never checked in GMAC >= 4, so this ACS
			 * feature is always disabled and packets need to be
			 * stripped manually.
G
Giuseppe CAVALLARO 已提交
3541
			 */
3542 3543
			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
			    unlikely(status != llc_snap))
3544 3545
				len -= ETH_FCS_LEN;
		}
3546

3547
		if (!skb) {
3548 3549 3550 3551 3552 3553 3554 3555 3556
			int ret = stmmac_get_rx_header_len(priv, p, &hlen);

			if (priv->sph && !ret && (hlen > 0)) {
				sec_len = len;
				if (!(status & rx_not_ls))
					sec_len = sec_len - hlen;
				len = hlen;

				prefetch(page_address(buf->sec_page));
3557
				priv->xstats.rx_split_hdr_pkt_n++;
3558 3559
			}

3560 3561
			skb = napi_alloc_skb(&ch->rx_napi, len);
			if (!skb) {
3562 3563
				priv->dev->stats.rx_dropped++;
				continue;
3564 3565
			}

3566 3567
			dma_sync_single_for_cpu(priv->device, buf->addr, len,
						DMA_FROM_DEVICE);
3568
			skb_copy_to_linear_data(skb, page_address(buf->page),
3569 3570
						len);
			skb_put(skb, len);
3571

3572 3573 3574 3575 3576
			/* Data payload copied into SKB, page ready for recycle */
			page_pool_recycle_direct(rx_q->page_pool, buf->page);
			buf->page = NULL;
		} else {
			unsigned int buf_len = len - prev_len;
3577

3578 3579
			if (likely(status & rx_not_ls))
				buf_len = priv->dma_buf_sz;
3580

3581 3582 3583 3584 3585
			dma_sync_single_for_cpu(priv->device, buf->addr,
						buf_len, DMA_FROM_DEVICE);
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
					buf->page, 0, buf_len,
					priv->dma_buf_sz);
3586

3587 3588 3589 3590
			/* Data payload appended into SKB */
			page_pool_release_page(rx_q->page_pool, buf->page);
			buf->page = NULL;
		}
3591

3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605
		if (sec_len > 0) {
			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
						sec_len, DMA_FROM_DEVICE);
			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
					buf->sec_page, 0, sec_len,
					priv->dma_buf_sz);

			len += sec_len;

			/* Data payload appended into SKB */
			page_pool_release_page(rx_q->page_pool, buf->sec_page);
			buf->sec_page = NULL;
		}

3606 3607
		if (likely(status & rx_not_ls))
			goto read_again;
3608

3609
		/* Got entire packet into SKB. Finish it. */
3610

3611 3612 3613
		stmmac_get_rx_hwtstamp(priv, p, np, skb);
		stmmac_rx_vlan(priv->dev, skb);
		skb->protocol = eth_type_trans(skb, priv->dev);
3614

3615 3616 3617 3618
		if (unlikely(!coe))
			skb_checksum_none_assert(skb);
		else
			skb->ip_summed = CHECKSUM_UNNECESSARY;
3619

3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634
		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
			skb_set_hash(skb, hash, hash_type);

		skb_record_rx_queue(skb, queue);
		napi_gro_receive(&ch->rx_napi, skb);

		priv->dev->stats.rx_packets++;
		priv->dev->stats.rx_bytes += len;
	}

	if (status & rx_not_ls) {
		rx_q->state_saved = true;
		rx_q->state.skb = skb;
		rx_q->state.error = error;
		rx_q->state.len = len;
3635 3636
	}

3637
	stmmac_rx_refill(priv, queue);
3638 3639 3640 3641 3642 3643

	priv->xstats.rx_pkt_n += count;

	return count;
}

3644
static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3645
{
3646
	struct stmmac_channel *ch =
3647
		container_of(napi, struct stmmac_channel, rx_napi);
3648 3649
	struct stmmac_priv *priv = ch->priv_data;
	u32 chan = ch->index;
3650
	int work_done;
3651

3652
	priv->xstats.napi_poll++;
3653

3654 3655 3656 3657 3658
	work_done = stmmac_rx(priv, budget, chan);
	if (work_done < budget && napi_complete_done(napi, work_done))
		stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
	return work_done;
}
3659

3660 3661 3662 3663 3664 3665 3666 3667
static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
{
	struct stmmac_channel *ch =
		container_of(napi, struct stmmac_channel, tx_napi);
	struct stmmac_priv *priv = ch->priv_data;
	struct stmmac_tx_queue *tx_q;
	u32 chan = ch->index;
	int work_done;
3668

3669 3670 3671 3672
	priv->xstats.napi_poll++;

	work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
	work_done = min(work_done, budget);
3673

3674 3675
	if (work_done < budget)
		napi_complete_done(napi, work_done);
3676 3677 3678 3679 3680 3681 3682

	/* Force transmission restart */
	tx_q = &priv->tx_queue[chan];
	if (tx_q->cur_tx != tx_q->dirty_tx) {
		stmmac_enable_dma_transmission(priv, priv->ioaddr);
		stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
				       chan);
3683
	}
3684

3685 3686 3687 3688 3689 3690 3691
	return work_done;
}

/**
 *  stmmac_tx_timeout
 *  @dev : Pointer to net device structure
 *  Description: this function is called when a packet transmission fails to
3692
 *   complete within a reasonable time. The driver will mark the error in the
3693 3694 3695 3696 3697 3698 3699
 *   netdev structure and arrange for the device to be reset to a sane state
 *   in order to transmit a new packet.
 */
static void stmmac_tx_timeout(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

3700
	stmmac_global_err(priv);
3701 3702 3703
}

/**
3704
 *  stmmac_set_rx_mode - entry point for multicast addressing
3705 3706 3707 3708 3709 3710 3711
 *  @dev : pointer to the device structure
 *  Description:
 *  This function is a driver entry point which gets called by the kernel
 *  whenever multicast addresses must be enabled/disabled.
 *  Return value:
 *  void.
 */
3712
static void stmmac_set_rx_mode(struct net_device *dev)
3713 3714 3715
{
	struct stmmac_priv *priv = netdev_priv(dev);

3716
	stmmac_set_filter(priv, priv->hw, dev);
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731
}

/**
 *  stmmac_change_mtu - entry point to change MTU size for the device.
 *  @dev : device pointer.
 *  @new_mtu : the new MTU size for the device.
 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
3732 3733
	struct stmmac_priv *priv = netdev_priv(dev);

3734
	if (netif_running(dev)) {
3735
		netdev_err(priv->dev, "must be stopped to change its MTU\n");
3736 3737 3738
		return -EBUSY;
	}

3739
	dev->mtu = new_mtu;
A
Alexandre TORGUE 已提交
3740

3741 3742 3743 3744 3745
	netdev_update_features(dev);

	return 0;
}

3746
static netdev_features_t stmmac_fix_features(struct net_device *dev,
G
Giuseppe CAVALLARO 已提交
3747
					     netdev_features_t features)
3748 3749 3750
{
	struct stmmac_priv *priv = netdev_priv(dev);

3751
	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3752
		features &= ~NETIF_F_RXCSUM;
3753

3754
	if (!priv->plat->tx_coe)
3755
		features &= ~NETIF_F_CSUM_MASK;
3756

3757 3758 3759
	/* Some GMAC devices have a bugged Jumbo frame support that
	 * needs to have the Tx COE disabled for oversized frames
	 * (due to limited buffer sizes). In this case we disable
3760
	 * the TX csum insertion in the TDES and not use SF.
G
Giuseppe CAVALLARO 已提交
3761
	 */
3762
	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3763
		features &= ~NETIF_F_CSUM_MASK;
3764

A
Alexandre TORGUE 已提交
3765 3766 3767 3768 3769 3770 3771 3772
	/* Disable tso if asked by ethtool */
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		if (features & NETIF_F_TSO)
			priv->tso = true;
		else
			priv->tso = false;
	}

3773
	return features;
3774 3775
}

3776 3777 3778 3779
static int stmmac_set_features(struct net_device *netdev,
			       netdev_features_t features)
{
	struct stmmac_priv *priv = netdev_priv(netdev);
3780 3781
	bool sph_en;
	u32 chan;
3782 3783 3784 3785 3786 3787 3788 3789 3790

	/* Keep the COE Type in case of csum is supporting */
	if (features & NETIF_F_RXCSUM)
		priv->hw->rx_csum = priv->plat->rx_coe;
	else
		priv->hw->rx_csum = 0;
	/* No check needed because rx_coe has been set before and it will be
	 * fixed in case of issue.
	 */
3791
	stmmac_rx_ipc(priv, priv->hw);
3792

3793 3794 3795 3796
	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
	for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);

3797 3798 3799
	return 0;
}

3800 3801 3802 3803 3804
/**
 *  stmmac_interrupt - main ISR
 *  @irq: interrupt number.
 *  @dev_id: to pass the net device pointer.
 *  Description: this is the main driver interrupt service routine.
3805 3806 3807 3808 3809
 *  It can call:
 *  o DMA service routine (to manage incoming frame reception and transmission
 *    status)
 *  o Core interrupts to manage: remote wake-up, management counter, LPI
 *    interrupts.
3810
 */
3811 3812 3813 3814
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct stmmac_priv *priv = netdev_priv(dev);
3815 3816 3817 3818
	u32 rx_cnt = priv->plat->rx_queues_to_use;
	u32 tx_cnt = priv->plat->tx_queues_to_use;
	u32 queues_count;
	u32 queue;
3819
	bool xmac;
3820

3821
	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3822
	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3823

3824 3825 3826
	if (priv->irq_wake)
		pm_wakeup_event(priv->device, 0);

3827
	if (unlikely(!dev)) {
3828
		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3829 3830 3831
		return IRQ_NONE;
	}

3832 3833 3834
	/* Check if adapter is up */
	if (test_bit(STMMAC_DOWN, &priv->state))
		return IRQ_HANDLED;
3835 3836 3837
	/* Check if a fatal error happened */
	if (stmmac_safety_feat_interrupt(priv))
		return IRQ_HANDLED;
3838

3839
	/* To handle GMAC own interrupts */
3840
	if ((priv->plat->has_gmac) || xmac) {
3841
		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3842
		int mtl_status;
3843

3844 3845
		if (unlikely(status)) {
			/* For LPI we need to save the tx status */
3846
			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3847
				priv->tx_path_in_lpi_mode = true;
3848
			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3849
				priv->tx_path_in_lpi_mode = false;
3850 3851
		}

3852 3853
		for (queue = 0; queue < queues_count; queue++) {
			struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3854

3855 3856 3857 3858
			mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
								queue);
			if (mtl_status != -EINVAL)
				status |= mtl_status;
3859

3860 3861 3862 3863
			if (status & CORE_IRQ_MTL_RX_OVERFLOW)
				stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
						       rx_q->rx_tail_addr,
						       queue);
3864
		}
3865 3866

		/* PCS link status */
3867
		if (priv->hw->pcs) {
3868 3869 3870 3871 3872
			if (priv->xstats.pcs_link)
				netif_carrier_on(dev);
			else
				netif_carrier_off(dev);
		}
3873
	}
3874

3875
	/* To handle DMA interrupts */
3876
	stmmac_dma_interrupt(priv);
3877 3878 3879 3880 3881 3882

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
G
Giuseppe CAVALLARO 已提交
3883 3884
 * to allow network I/O with interrupts disabled.
 */
3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899
static void stmmac_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	stmmac_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

/**
 *  stmmac_ioctl - Entry point for the Ioctl
 *  @dev: Device pointer.
 *  @rq: An IOCTL specefic structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  @cmd: IOCTL command
 *  Description:
3900
 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3901 3902 3903
 */
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
3904
	struct stmmac_priv *priv = netdev_priv (dev);
3905
	int ret = -EOPNOTSUPP;
3906 3907 3908 3909

	if (!netif_running(dev))
		return -EINVAL;

3910 3911 3912 3913
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
3914
		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
3915 3916
		break;
	case SIOCSHWTSTAMP:
3917 3918 3919 3920
		ret = stmmac_hwtstamp_set(dev, rq);
		break;
	case SIOCGHWTSTAMP:
		ret = stmmac_hwtstamp_get(dev, rq);
3921 3922 3923 3924
		break;
	default:
		break;
	}
3925

3926 3927 3928
	return ret;
}

3929 3930 3931 3932 3933 3934
static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
				    void *cb_priv)
{
	struct stmmac_priv *priv = cb_priv;
	int ret = -EOPNOTSUPP;

3935 3936 3937
	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
		return ret;

3938 3939 3940 3941
	stmmac_disable_all_queues(priv);

	switch (type) {
	case TC_SETUP_CLSU32:
3942 3943 3944 3945
		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
		break;
	case TC_SETUP_CLSFLOWER:
		ret = stmmac_tc_setup_cls(priv, priv, type_data);
3946 3947 3948 3949 3950 3951 3952 3953 3954
		break;
	default:
		break;
	}

	stmmac_enable_all_queues(priv);
	return ret;
}

3955 3956
static LIST_HEAD(stmmac_block_cb_list);

3957 3958 3959 3960 3961 3962 3963
static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
			   void *type_data)
{
	struct stmmac_priv *priv = netdev_priv(ndev);

	switch (type) {
	case TC_SETUP_BLOCK:
3964 3965
		return flow_block_cb_setup_simple(type_data,
						  &stmmac_block_cb_list,
3966 3967
						  stmmac_setup_tc_block_cb,
						  priv, priv, true);
3968 3969
	case TC_SETUP_QDISC_CBS:
		return stmmac_tc_setup_cbs(priv, priv, type_data);
3970 3971 3972 3973 3974
	default:
		return -EOPNOTSUPP;
	}
}

3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
			       struct net_device *sb_dev)
{
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
		/*
		 * There is no way to determine the number of TSO
		 * capable Queues. Let's use always the Queue 0
		 * because if TSO is supported then at least this
		 * one will be capable.
		 */
		return 0;
	}

	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}

3991 3992 3993 3994 3995 3996 3997 3998 3999
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	int ret = 0;

	ret = eth_mac_addr(ndev, addr);
	if (ret)
		return ret;

4000
	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4001 4002 4003 4004

	return ret;
}

4005
#ifdef CONFIG_DEBUG_FS
4006 4007
static struct dentry *stmmac_fs_dir;

4008
static void sysfs_display_ring(void *head, int size, int extend_desc,
G
Giuseppe CAVALLARO 已提交
4009
			       struct seq_file *seq)
4010 4011
{
	int i;
G
Giuseppe CAVALLARO 已提交
4012 4013
	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
	struct dma_desc *p = (struct dma_desc *)head;
4014

4015 4016 4017
	for (i = 0; i < size; i++) {
		if (extend_desc) {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
4018
				   i, (unsigned int)virt_to_phys(ep),
4019 4020 4021 4022
				   le32_to_cpu(ep->basic.des0),
				   le32_to_cpu(ep->basic.des1),
				   le32_to_cpu(ep->basic.des2),
				   le32_to_cpu(ep->basic.des3));
4023 4024 4025
			ep++;
		} else {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4026
				   i, (unsigned int)virt_to_phys(p),
4027 4028
				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4029 4030
			p++;
		}
4031 4032
		seq_printf(seq, "\n");
	}
4033
}
4034

4035
static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4036 4037 4038
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);
4039
	u32 rx_count = priv->plat->rx_queues_to_use;
4040
	u32 tx_count = priv->plat->tx_queues_to_use;
4041 4042
	u32 queue;

4043 4044 4045
	if ((dev->flags & IFF_UP) == 0)
		return 0;

4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060
	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		seq_printf(seq, "RX Queue %d:\n", queue);

		if (priv->extend_desc) {
			seq_printf(seq, "Extended descriptor ring:\n");
			sysfs_display_ring((void *)rx_q->dma_erx,
					   DMA_RX_SIZE, 1, seq);
		} else {
			seq_printf(seq, "Descriptor ring:\n");
			sysfs_display_ring((void *)rx_q->dma_rx,
					   DMA_RX_SIZE, 0, seq);
		}
	}
4061

4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075
	for (queue = 0; queue < tx_count; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		seq_printf(seq, "TX Queue %d:\n", queue);

		if (priv->extend_desc) {
			seq_printf(seq, "Extended descriptor ring:\n");
			sysfs_display_ring((void *)tx_q->dma_etx,
					   DMA_TX_SIZE, 1, seq);
		} else {
			seq_printf(seq, "Descriptor ring:\n");
			sysfs_display_ring((void *)tx_q->dma_tx,
					   DMA_TX_SIZE, 0, seq);
		}
4076 4077 4078 4079
	}

	return 0;
}
4080
DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4081

4082
static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4083 4084 4085 4086
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);

4087
	if (!priv->hw_cap_support) {
4088 4089 4090 4091 4092 4093 4094 4095
		seq_printf(seq, "DMA HW features not supported\n");
		return 0;
	}

	seq_printf(seq, "==============================\n");
	seq_printf(seq, "\tDMA HW features\n");
	seq_printf(seq, "==============================\n");

4096
	seq_printf(seq, "\t10/100 Mbps: %s\n",
4097
		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4098
	seq_printf(seq, "\t1000 Mbps: %s\n",
4099
		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4100
	seq_printf(seq, "\tHalf duplex: %s\n",
4101 4102 4103 4104 4105
		   (priv->dma_cap.half_duplex) ? "Y" : "N");
	seq_printf(seq, "\tHash Filter: %s\n",
		   (priv->dma_cap.hash_filter) ? "Y" : "N");
	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
		   (priv->dma_cap.multi_addr) ? "Y" : "N");
4106
	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117
		   (priv->dma_cap.pcs) ? "Y" : "N");
	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
	seq_printf(seq, "\tPMT Remote wake up: %s\n",
		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
	seq_printf(seq, "\tPMT Magic Frame: %s\n",
		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
	seq_printf(seq, "\tRMON module: %s\n",
		   (priv->dma_cap.rmon) ? "Y" : "N");
	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
		   (priv->dma_cap.time_stamp) ? "Y" : "N");
4118
	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4119
		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
4120
	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4121 4122 4123 4124
		   (priv->dma_cap.eee) ? "Y" : "N");
	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
		   (priv->dma_cap.tx_coe) ? "Y" : "N");
A
Alexandre TORGUE 已提交
4125 4126 4127 4128 4129 4130 4131 4132 4133
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
			   (priv->dma_cap.rx_coe) ? "Y" : "N");
	} else {
		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
	}
4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144
	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
		   priv->dma_cap.number_rx_channel);
	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
		   priv->dma_cap.number_tx_channel);
	seq_printf(seq, "\tEnhanced descriptors: %s\n",
		   (priv->dma_cap.enh_desc) ? "Y" : "N");

	return 0;
}
4145
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4146

4147
static void stmmac_init_fs(struct net_device *dev)
4148
{
4149 4150 4151 4152
	struct stmmac_priv *priv = netdev_priv(dev);

	/* Create per netdev entries */
	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4153 4154

	/* Entry to report DMA RX/TX rings */
4155 4156
	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
			    &stmmac_rings_status_fops);
4157

4158
	/* Entry to report the DMA HW features */
4159 4160
	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
			    &stmmac_dma_cap_fops);
4161 4162
}

4163
static void stmmac_exit_fs(struct net_device *dev)
4164
{
4165 4166 4167
	struct stmmac_priv *priv = netdev_priv(dev);

	debugfs_remove_recursive(priv->dbgfs_dir);
4168
}
4169
#endif /* CONFIG_DEBUG_FS */
4170

4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
static u32 stmmac_vid_crc32_le(__le16 vid_le)
{
	unsigned char *data = (unsigned char *)&vid_le;
	unsigned char data_byte = 0;
	u32 crc = ~0x0;
	u32 temp = 0;
	int i, bits;

	bits = get_bitmask_order(VLAN_VID_MASK);
	for (i = 0; i < bits; i++) {
		if ((i % 8) == 0)
			data_byte = data[i / 8];

		temp = ((crc & 1) ^ data_byte) & 1;
		crc >>= 1;
		data_byte >>= 1;

		if (temp)
			crc ^= 0xedb88320;
	}

	return crc;
}

static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
	u32 crc, hash = 0;
	u16 vid;

	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
		__le16 vid_le = cpu_to_le16(vid);
		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
		hash |= (1 << crc);
	}

	return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
}

static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	bool is_double = false;
	int ret;

	if (!priv->dma_cap.vlhash)
		return -EOPNOTSUPP;
	if (be16_to_cpu(proto) == ETH_P_8021AD)
		is_double = true;

	set_bit(vid, priv->active_vlans);
	ret = stmmac_vlan_update(priv, is_double);
	if (ret) {
		clear_bit(vid, priv->active_vlans);
		return ret;
	}

	return ret;
}

static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
	struct stmmac_priv *priv = netdev_priv(ndev);
	bool is_double = false;

	if (!priv->dma_cap.vlhash)
		return -EOPNOTSUPP;
	if (be16_to_cpu(proto) == ETH_P_8021AD)
		is_double = true;

	clear_bit(vid, priv->active_vlans);
	return stmmac_vlan_update(priv, is_double);
}

4244 4245 4246 4247 4248
static const struct net_device_ops stmmac_netdev_ops = {
	.ndo_open = stmmac_open,
	.ndo_start_xmit = stmmac_xmit,
	.ndo_stop = stmmac_release,
	.ndo_change_mtu = stmmac_change_mtu,
4249
	.ndo_fix_features = stmmac_fix_features,
4250
	.ndo_set_features = stmmac_set_features,
4251
	.ndo_set_rx_mode = stmmac_set_rx_mode,
4252 4253
	.ndo_tx_timeout = stmmac_tx_timeout,
	.ndo_do_ioctl = stmmac_ioctl,
4254
	.ndo_setup_tc = stmmac_setup_tc,
4255
	.ndo_select_queue = stmmac_select_queue,
4256 4257 4258
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = stmmac_poll_controller,
#endif
4259
	.ndo_set_mac_address = stmmac_set_mac_address,
4260 4261
	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4262 4263
};

4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279
static void stmmac_reset_subtask(struct stmmac_priv *priv)
{
	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
		return;
	if (test_bit(STMMAC_DOWN, &priv->state))
		return;

	netdev_err(priv->dev, "Reset adapter.\n");

	rtnl_lock();
	netif_trans_update(priv->dev);
	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
		usleep_range(1000, 2000);

	set_bit(STMMAC_DOWN, &priv->state);
	dev_close(priv->dev);
4280
	dev_open(priv->dev, NULL);
4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294
	clear_bit(STMMAC_DOWN, &priv->state);
	clear_bit(STMMAC_RESETING, &priv->state);
	rtnl_unlock();
}

static void stmmac_service_task(struct work_struct *work)
{
	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
			service_task);

	stmmac_reset_subtask(priv);
	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
}

4295 4296
/**
 *  stmmac_hw_init - Init the MAC device
4297
 *  @priv: driver private structure
4298 4299 4300 4301
 *  Description: this function is to configure the MAC device according to
 *  some platform parameters or the HW capability register. It prepares the
 *  driver to use either ring or chain modes and to setup either enhanced or
 *  normal descriptors.
4302 4303 4304
 */
static int stmmac_hw_init(struct stmmac_priv *priv)
{
4305
	int ret;
4306

4307 4308 4309
	/* dwmac-sun8i only work in chain mode */
	if (priv->plat->has_sun8i)
		chain_mode = 1;
4310
	priv->chain_mode = chain_mode;
4311

4312 4313 4314 4315
	/* Initialize HW Interface */
	ret = stmmac_hwif_init(priv);
	if (ret)
		return ret;
4316

4317 4318 4319
	/* Get the HW capability (new GMAC newer than 3.50a) */
	priv->hw_cap_support = stmmac_get_hw_features(priv);
	if (priv->hw_cap_support) {
4320
		dev_info(priv->device, "DMA HW capability register supported\n");
4321 4322 4323 4324 4325 4326 4327 4328

		/* We can override some gmac/dma configuration fields: e.g.
		 * enh_desc, tx_coe (e.g. that are passed through the
		 * platform) with the values from the HW capability
		 * register (if supported).
		 */
		priv->plat->enh_desc = priv->dma_cap.enh_desc;
		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4329
		priv->hw->pmt = priv->plat->pmt;
4330 4331 4332 4333 4334 4335
		if (priv->dma_cap.hash_tb_sz) {
			priv->hw->multicast_filter_bins =
					(BIT(priv->dma_cap.hash_tb_sz) << 5);
			priv->hw->mcast_bits_log2 =
					ilog2(priv->hw->multicast_filter_bins);
		}
4336

4337 4338 4339 4340 4341 4342
		/* TXCOE doesn't work in thresh DMA mode */
		if (priv->plat->force_thresh_dma_mode)
			priv->plat->tx_coe = 0;
		else
			priv->plat->tx_coe = priv->dma_cap.tx_coe;

A
Alexandre TORGUE 已提交
4343 4344
		/* In case of GMAC4 rx_coe is from HW cap register. */
		priv->plat->rx_coe = priv->dma_cap.rx_coe;
4345 4346 4347 4348 4349 4350

		if (priv->dma_cap.rx_coe_type2)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
		else if (priv->dma_cap.rx_coe_type1)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;

4351 4352 4353
	} else {
		dev_info(priv->device, "No HW DMA feature register supported\n");
	}
4354

4355 4356
	if (priv->plat->rx_coe) {
		priv->hw->rx_csum = priv->plat->rx_coe;
4357
		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
A
Alexandre TORGUE 已提交
4358
		if (priv->synopsys_id < DWMAC_CORE_4_00)
4359
			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4360
	}
4361
	if (priv->plat->tx_coe)
4362
		dev_info(priv->device, "TX Checksum insertion supported\n");
4363 4364

	if (priv->plat->pmt) {
4365
		dev_info(priv->device, "Wake-Up On Lan supported\n");
4366 4367 4368
		device_set_wakeup_capable(priv->device, 1);
	}

A
Alexandre TORGUE 已提交
4369
	if (priv->dma_cap.tsoen)
4370
		dev_info(priv->device, "TSO supported\n");
A
Alexandre TORGUE 已提交
4371

4372 4373 4374 4375 4376 4377 4378
	/* Run HW quirks, if any */
	if (priv->hwif_quirks) {
		ret = priv->hwif_quirks(priv);
		if (ret)
			return ret;
	}

4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390
	/* Rx Watchdog is available in the COREs newer than the 3.40.
	 * In some case, for example on bugged HW this feature
	 * has to be disable and this can be done by passing the
	 * riwt_off field from the platform.
	 */
	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
		priv->use_riwt = 1;
		dev_info(priv->device,
			 "Enable RX Mitigation via HW Watchdog Timer\n");
	}

4391
	return 0;
4392 4393
}

4394
/**
4395 4396
 * stmmac_dvr_probe
 * @device: device pointer
4397
 * @plat_dat: platform data pointer
4398
 * @res: stmmac resource pointer
4399 4400
 * Description: this is the main probe function used to
 * call the alloc_etherdev, allocate the priv structure.
4401
 * Return:
4402
 * returns 0 on success, otherwise errno.
4403
 */
4404 4405 4406
int stmmac_dvr_probe(struct device *device,
		     struct plat_stmmacenet_data *plat_dat,
		     struct stmmac_resources *res)
4407
{
4408 4409
	struct net_device *ndev = NULL;
	struct stmmac_priv *priv;
4410 4411
	u32 queue, rxq, maxq;
	int i, ret = 0;
4412

4413 4414
	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4415
	if (!ndev)
4416
		return -ENOMEM;
4417 4418 4419 4420 4421 4422

	SET_NETDEV_DEV(ndev, device);

	priv = netdev_priv(ndev);
	priv->device = device;
	priv->dev = ndev;
4423

4424
	stmmac_set_ethtool_ops(ndev);
4425 4426
	priv->pause = pause;
	priv->plat = plat_dat;
4427 4428 4429 4430 4431 4432 4433
	priv->ioaddr = res->addr;
	priv->dev->base_addr = (unsigned long)res->addr;

	priv->dev->irq = res->irq;
	priv->wol_irq = res->wol_irq;
	priv->lpi_irq = res->lpi_irq;

4434
	if (!IS_ERR_OR_NULL(res->mac))
4435
		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4436

4437
	dev_set_drvdata(device, priv->dev);
4438

4439 4440
	/* Verify driver arguments */
	stmmac_verify_args();
4441

4442 4443 4444 4445
	/* Allocate workqueue */
	priv->wq = create_singlethread_workqueue("stmmac_wq");
	if (!priv->wq) {
		dev_err(priv->device, "failed to create workqueue\n");
4446
		return -ENOMEM;
4447 4448 4449 4450
	}

	INIT_WORK(&priv->service_task, stmmac_service_task);

4451
	/* Override with kernel parameters if supplied XXX CRS XXX
G
Giuseppe CAVALLARO 已提交
4452 4453
	 * this needs to have multiple instances
	 */
4454 4455 4456
	if ((phyaddr >= 0) && (phyaddr <= 31))
		priv->plat->phy_addr = phyaddr;

4457 4458
	if (priv->plat->stmmac_rst) {
		ret = reset_control_assert(priv->plat->stmmac_rst);
4459
		reset_control_deassert(priv->plat->stmmac_rst);
4460 4461 4462 4463 4464 4465
		/* Some reset controllers have only reset callback instead of
		 * assert + deassert callbacks pair.
		 */
		if (ret == -ENOTSUPP)
			reset_control_reset(priv->plat->stmmac_rst);
	}
4466

4467
	/* Init MAC and get the capabilities */
4468 4469
	ret = stmmac_hw_init(priv);
	if (ret)
4470
		goto error_hw_init;
4471

4472 4473
	stmmac_check_ether_addr(priv);

4474
	/* Configure real RX and TX queues */
4475 4476
	netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
	netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4477

4478
	ndev->netdev_ops = &stmmac_netdev_ops;
4479

4480 4481
	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			    NETIF_F_RXCSUM;
A
Alexandre TORGUE 已提交
4482

4483 4484 4485 4486 4487
	ret = stmmac_tc_init(priv, priv);
	if (!ret) {
		ndev->hw_features |= NETIF_F_HW_TC;
	}

A
Alexandre TORGUE 已提交
4488
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
N
Niklas Cassel 已提交
4489
		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
A
Alexandre TORGUE 已提交
4490
		priv->tso = true;
4491
		dev_info(priv->device, "TSO feature enabled\n");
A
Alexandre TORGUE 已提交
4492
	}
4493

4494 4495 4496 4497 4498 4499
	if (priv->dma_cap.sphen) {
		ndev->hw_features |= NETIF_F_GRO;
		priv->sph = true;
		dev_info(priv->device, "SPH feature enabled\n");
	}

4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516
	if (priv->dma_cap.addr64) {
		ret = dma_set_mask_and_coherent(device,
				DMA_BIT_MASK(priv->dma_cap.addr64));
		if (!ret) {
			dev_info(priv->device, "Using %d bits DMA width\n",
				 priv->dma_cap.addr64);
		} else {
			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
			if (ret) {
				dev_err(priv->device, "Failed to set DMA Mask\n");
				goto error_hw_init;
			}

			priv->dma_cap.addr64 = 32;
		}
	}

4517 4518
	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4519 4520
#ifdef STMMAC_VLAN_TAG_USED
	/* Both mac100 and gmac support receive VLAN tag detection */
4521
	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4522 4523 4524 4525
	if (priv->dma_cap.vlhash) {
		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
	}
4526 4527 4528 4529 4530
	if (priv->dma_cap.vlins) {
		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
		if (priv->dma_cap.dvlan)
			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
	}
4531 4532 4533
#endif
	priv->msg_enable = netif_msg_init(debug, default_msg_level);

4534 4535 4536 4537 4538 4539 4540 4541 4542
	/* Initialize RSS */
	rxq = priv->plat->rx_queues_to_use;
	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);

	if (priv->dma_cap.rssen && priv->plat->rss_en)
		ndev->features |= NETIF_F_RXHASH;

4543 4544
	/* MTU range: 46 - hw-specific max */
	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4545
	if (priv->plat->has_xgmac)
4546
		ndev->max_mtu = XGMAC_JUMBO_LEN;
4547 4548
	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
		ndev->max_mtu = JUMBO_LEN;
4549 4550
	else
		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4551 4552 4553 4554 4555
	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
	 */
	if ((priv->plat->maxmtu < ndev->max_mtu) &&
	    (priv->plat->maxmtu >= ndev->min_mtu))
4556
		ndev->max_mtu = priv->plat->maxmtu;
4557
	else if (priv->plat->maxmtu < ndev->min_mtu)
4558 4559 4560
		dev_warn(priv->device,
			 "%s: warning: maxmtu having invalid value (%d)\n",
			 __func__, priv->plat->maxmtu);
4561

4562 4563 4564
	if (flow_ctrl)
		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */

4565 4566
	/* Setup channels NAPI */
	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4567

4568 4569 4570 4571 4572 4573
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];

		ch->priv_data = priv;
		ch->index = queue;

4574 4575 4576 4577 4578
		if (queue < priv->plat->rx_queues_to_use) {
			netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
				       NAPI_POLL_WEIGHT);
		}
		if (queue < priv->plat->tx_queues_to_use) {
4579 4580 4581
			netif_tx_napi_add(ndev, &ch->tx_napi,
					  stmmac_napi_poll_tx,
					  NAPI_POLL_WEIGHT);
4582
		}
4583
	}
4584

4585
	mutex_init(&priv->lock);
4586

4587 4588 4589 4590 4591 4592
	/* If a specific clk_csr value is passed from the platform
	 * this means that the CSR Clock Range selection cannot be
	 * changed at run-time and it is fixed. Viceversa the driver'll try to
	 * set the MDC clock dynamically according to the csr actual
	 * clock input.
	 */
4593
	if (priv->plat->clk_csr >= 0)
4594
		priv->clk_csr = priv->plat->clk_csr;
4595 4596
	else
		stmmac_clk_csr_set(priv);
4597

4598 4599
	stmmac_check_pcs_mode(priv);

4600 4601 4602
	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
4603 4604 4605
		/* MDIO bus Registration */
		ret = stmmac_mdio_register(ndev);
		if (ret < 0) {
4606 4607 4608
			dev_err(priv->device,
				"%s: MDIO bus (id: %d) registration failed",
				__func__, priv->plat->bus_id);
4609 4610
			goto error_mdio_register;
		}
4611 4612
	}

4613 4614 4615 4616 4617 4618
	ret = stmmac_phy_setup(priv);
	if (ret) {
		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
		goto error_phy_setup;
	}

4619
	ret = register_netdev(ndev);
4620
	if (ret) {
4621 4622
		dev_err(priv->device, "%s: ERROR %i registering the device\n",
			__func__, ret);
4623 4624
		goto error_netdev_register;
	}
4625

4626
#ifdef CONFIG_DEBUG_FS
4627
	stmmac_init_fs(ndev);
4628 4629
#endif

4630
	return ret;
4631

4632
error_netdev_register:
4633 4634
	phylink_destroy(priv->phylink);
error_phy_setup:
4635 4636 4637 4638
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
		stmmac_mdio_unregister(ndev);
4639
error_mdio_register:
4640 4641
	for (queue = 0; queue < maxq; queue++) {
		struct stmmac_channel *ch = &priv->channel[queue];
4642

4643 4644 4645 4646
		if (queue < priv->plat->rx_queues_to_use)
			netif_napi_del(&ch->rx_napi);
		if (queue < priv->plat->tx_queues_to_use)
			netif_napi_del(&ch->tx_napi);
4647
	}
4648
error_hw_init:
4649
	destroy_workqueue(priv->wq);
4650

4651
	return ret;
4652
}
4653
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4654 4655 4656

/**
 * stmmac_dvr_remove
4657
 * @dev: device pointer
4658
 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4659
 * changes the link status, releases the DMA descriptor rings.
4660
 */
4661
int stmmac_dvr_remove(struct device *dev)
4662
{
4663
	struct net_device *ndev = dev_get_drvdata(dev);
4664
	struct stmmac_priv *priv = netdev_priv(ndev);
4665

4666
	netdev_info(priv->dev, "%s: removing driver", __func__);
4667

4668 4669 4670
#ifdef CONFIG_DEBUG_FS
	stmmac_exit_fs(ndev);
#endif
4671
	stmmac_stop_all_dma(priv);
4672

4673
	stmmac_mac_set(priv, priv->ioaddr, false);
4674 4675
	netif_carrier_off(ndev);
	unregister_netdev(ndev);
4676
	phylink_destroy(priv->phylink);
4677 4678 4679 4680
	if (priv->plat->stmmac_rst)
		reset_control_assert(priv->plat->stmmac_rst);
	clk_disable_unprepare(priv->plat->pclk);
	clk_disable_unprepare(priv->plat->stmmac_clk);
4681 4682 4683
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
4684
		stmmac_mdio_unregister(ndev);
4685
	destroy_workqueue(priv->wq);
4686
	mutex_destroy(&priv->lock);
4687 4688 4689

	return 0;
}
4690
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4691

4692 4693
/**
 * stmmac_suspend - suspend callback
4694
 * @dev: device pointer
4695 4696 4697 4698
 * Description: this is the function to suspend the device and it is called
 * by the platform driver to stop the network queue, release the resources,
 * program the PMT register (for WoL), clean and release driver resources.
 */
4699
int stmmac_suspend(struct device *dev)
4700
{
4701
	struct net_device *ndev = dev_get_drvdata(dev);
4702
	struct stmmac_priv *priv = netdev_priv(ndev);
4703

4704
	if (!ndev || !netif_running(ndev))
4705 4706
		return 0;

4707
	phylink_stop(priv->phylink);
4708

4709
	mutex_lock(&priv->lock);
4710

4711
	netif_device_detach(ndev);
4712
	stmmac_stop_all_queues(priv);
4713

4714
	stmmac_disable_all_queues(priv);
4715 4716

	/* Stop TX/RX DMA */
4717
	stmmac_stop_all_dma(priv);
4718

4719
	/* Enable Power down mode by programming the PMT regs */
4720
	if (device_may_wakeup(priv->device)) {
4721
		stmmac_pmt(priv, priv->hw, priv->wolopts);
4722 4723
		priv->irq_wake = 1;
	} else {
4724
		stmmac_mac_set(priv, priv->ioaddr, false);
4725
		pinctrl_pm_select_sleep_state(priv->device);
4726
		/* Disable clock in case of PWM is off */
4727 4728
		clk_disable(priv->plat->pclk);
		clk_disable(priv->plat->stmmac_clk);
4729
	}
4730
	mutex_unlock(&priv->lock);
4731

4732
	priv->speed = SPEED_UNKNOWN;
4733 4734
	return 0;
}
4735
EXPORT_SYMBOL_GPL(stmmac_suspend);
4736

4737 4738 4739 4740 4741 4742 4743
/**
 * stmmac_reset_queues_param - reset queue parameters
 * @dev: device pointer
 */
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{
	u32 rx_cnt = priv->plat->rx_queues_to_use;
4744
	u32 tx_cnt = priv->plat->tx_queues_to_use;
4745 4746 4747 4748 4749 4750 4751 4752 4753
	u32 queue;

	for (queue = 0; queue < rx_cnt; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		rx_q->cur_rx = 0;
		rx_q->dirty_rx = 0;
	}

4754 4755 4756 4757 4758
	for (queue = 0; queue < tx_cnt; queue++) {
		struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];

		tx_q->cur_tx = 0;
		tx_q->dirty_tx = 0;
4759
		tx_q->mss = 0;
4760
	}
4761 4762
}

4763 4764
/**
 * stmmac_resume - resume callback
4765
 * @dev: device pointer
4766 4767 4768
 * Description: when resume this function is invoked to setup the DMA and CORE
 * in a usable state.
 */
4769
int stmmac_resume(struct device *dev)
4770
{
4771
	struct net_device *ndev = dev_get_drvdata(dev);
4772
	struct stmmac_priv *priv = netdev_priv(ndev);
4773

4774
	if (!netif_running(ndev))
4775 4776 4777 4778 4779 4780
		return 0;

	/* Power Down bit, into the PM register, is cleared
	 * automatically as soon as a magic packet or a Wake-up frame
	 * is received. Anyway, it's better to manually clear
	 * this bit because it can generate problems while resuming
G
Giuseppe CAVALLARO 已提交
4781 4782
	 * from another devices (e.g. serial console).
	 */
4783
	if (device_may_wakeup(priv->device)) {
4784
		mutex_lock(&priv->lock);
4785
		stmmac_pmt(priv, priv->hw, 0);
4786
		mutex_unlock(&priv->lock);
4787
		priv->irq_wake = 0;
4788
	} else {
4789
		pinctrl_pm_select_default_state(priv->device);
4790
		/* enable the clk previously disabled */
4791 4792
		clk_enable(priv->plat->stmmac_clk);
		clk_enable(priv->plat->pclk);
4793 4794 4795 4796
		/* reset the phy so that it's ready */
		if (priv->mii)
			stmmac_mdio_reset(priv->mii);
	}
4797

4798
	netif_device_attach(ndev);
4799

4800
	mutex_lock(&priv->lock);
4801

4802 4803
	stmmac_reset_queues_param(priv);

4804 4805
	stmmac_clear_descriptors(priv);

4806
	stmmac_hw_setup(ndev, false);
4807
	stmmac_init_coalesce(priv);
4808
	stmmac_set_rx_mode(ndev);
4809

4810
	stmmac_enable_all_queues(priv);
4811

4812
	stmmac_start_all_queues(priv);
4813

4814
	mutex_unlock(&priv->lock);
4815

4816
	phylink_start(priv->phylink);
4817

4818 4819
	return 0;
}
4820
EXPORT_SYMBOL_GPL(stmmac_resume);
4821

4822 4823 4824 4825 4826 4827 4828 4829
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
	char *opt;

	if (!str || !*str)
		return -EINVAL;
	while ((opt = strsep(&str, ",")) != NULL) {
4830
		if (!strncmp(opt, "debug:", 6)) {
4831
			if (kstrtoint(opt + 6, 0, &debug))
4832 4833
				goto err;
		} else if (!strncmp(opt, "phyaddr:", 8)) {
4834
			if (kstrtoint(opt + 8, 0, &phyaddr))
4835 4836
				goto err;
		} else if (!strncmp(opt, "buf_sz:", 7)) {
4837
			if (kstrtoint(opt + 7, 0, &buf_sz))
4838 4839
				goto err;
		} else if (!strncmp(opt, "tc:", 3)) {
4840
			if (kstrtoint(opt + 3, 0, &tc))
4841 4842
				goto err;
		} else if (!strncmp(opt, "watchdog:", 9)) {
4843
			if (kstrtoint(opt + 9, 0, &watchdog))
4844 4845
				goto err;
		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
4846
			if (kstrtoint(opt + 10, 0, &flow_ctrl))
4847 4848
				goto err;
		} else if (!strncmp(opt, "pause:", 6)) {
4849
			if (kstrtoint(opt + 6, 0, &pause))
4850
				goto err;
4851
		} else if (!strncmp(opt, "eee_timer:", 10)) {
4852 4853
			if (kstrtoint(opt + 10, 0, &eee_timer))
				goto err;
4854 4855 4856
		} else if (!strncmp(opt, "chain_mode:", 11)) {
			if (kstrtoint(opt + 11, 0, &chain_mode))
				goto err;
4857
		}
4858 4859
	}
	return 0;
4860 4861 4862 4863

err:
	pr_err("%s: ERROR broken module parameter conversion", __func__);
	return -EINVAL;
4864 4865 4866
}

__setup("stmmaceth=", stmmac_cmdline_opt);
G
Giuseppe CAVALLARO 已提交
4867
#endif /* MODULE */
4868

4869 4870 4871 4872
static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
	/* Create debugfs main directory if it doesn't exist yet */
4873
	if (!stmmac_fs_dir)
4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889
		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
#endif

	return 0;
}

static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
	debugfs_remove_recursive(stmmac_fs_dir);
#endif
}

module_init(stmmac_init)
module_exit(stmmac_exit)

4890 4891 4892
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");