stmmac_main.c 100.5 KB
Newer Older
1 2 3 4
/*******************************************************************************
  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
  ST Ethernet IPs are built around a Synopsys IP Core.

5
	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>

  Documentation available at:
	http://www.stlinux.com
  Support available at:
	https://bugzilla.stlinux.com/
*******************************************************************************/

27
#include <linux/clk.h>
28 29 30 31 32 33 34 35 36
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
37
#include <linux/if.h>
38 39
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
40
#include <linux/slab.h>
41
#include <linux/prefetch.h>
42
#include <linux/pinctrl/consumer.h>
43
#ifdef CONFIG_DEBUG_FS
44 45
#include <linux/debugfs.h>
#include <linux/seq_file.h>
46
#endif /* CONFIG_DEBUG_FS */
47 48
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
49
#include "stmmac.h"
50
#include <linux/reset.h>
51
#include <linux/of_mdio.h>
52
#include "dwmac1000.h"
53 54

#define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
A
Alexandre TORGUE 已提交
55
#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 57

/* Module parameters */
58
#define TX_TIMEO	5000
59 60
static int watchdog = TX_TIMEO;
module_param(watchdog, int, S_IRUGO | S_IWUSR);
61
MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62

63
static int debug = -1;
64
module_param(debug, int, S_IRUGO | S_IWUSR);
65
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66

67
static int phyaddr = -1;
68 69 70
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");

71
#define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72
#define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 74 75 76 77 78 79 80 81 82 83 84 85 86

static int flow_ctrl = FLOW_OFF;
module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");

static int pause = PAUSE_TIME;
module_param(pause, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pause, "Flow Control Pause Time");

#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");

87 88
#define	DEFAULT_BUFSIZE	1536
static int buf_sz = DEFAULT_BUFSIZE;
89 90 91
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");

92 93
#define	STMMAC_RX_COPYBREAK	256

94 95 96 97
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);

98 99 100 101
#define STMMAC_DEFAULT_LPI_TIMER	1000
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
G
Giuseppe CAVALLARO 已提交
102
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103

104 105
/* By default the driver will use the ring mode to manage tx and rx descriptors,
 * but allow user to force to use the chain instead of the ring
106 107 108 109 110
 */
static unsigned int chain_mode;
module_param(chain_mode, int, S_IRUGO);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");

111 112
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);

113
#ifdef CONFIG_DEBUG_FS
114
static int stmmac_init_fs(struct net_device *dev);
115
static void stmmac_exit_fs(struct net_device *dev);
116 117
#endif

118 119
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))

120 121
/**
 * stmmac_verify_args - verify the driver parameters.
122 123
 * Description: it checks the driver parameters and set a default in case of
 * errors.
124 125 126 127 128
 */
static void stmmac_verify_args(void)
{
	if (unlikely(watchdog < 0))
		watchdog = TX_TIMEO;
129 130
	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
		buf_sz = DEFAULT_BUFSIZE;
131 132 133 134 135 136
	if (unlikely(flow_ctrl > 1))
		flow_ctrl = FLOW_AUTO;
	else if (likely(flow_ctrl < 0))
		flow_ctrl = FLOW_OFF;
	if (unlikely((pause < 0) || (pause > 0xffff)))
		pause = PAUSE_TIME;
137 138
	if (eee_timer < 0)
		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 140
}

141 142 143 144 145 146 147 148 149 150 151 152
/**
 * stmmac_clk_csr_set - dynamically set the MDC clock
 * @priv: driver private structure
 * Description: this is to dynamically set the MDC clock according to the csr
 * clock input.
 * Note:
 *	If a specific clk_csr value is passed from the platform
 *	this means that the CSR Clock Range selection cannot be
 *	changed at run-time and it is fixed (as reported in the driver
 *	documentation). Viceversa the driver will try to set the MDC
 *	clock dynamically according to the actual clock input.
 */
153 154 155 156
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
	u32 clk_rate;

157
	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158 159

	/* Platform provided default clk_csr would be assumed valid
G
Giuseppe CAVALLARO 已提交
160 161 162 163 164 165
	 * for all other cases except for the below mentioned ones.
	 * For values higher than the IEEE 802.3 specified frequency
	 * we can not estimate the proper divider as it is not known
	 * the frequency of clk_csr_i. So we do not change the default
	 * divider.
	 */
166 167 168 169 170 171 172 173 174 175 176
	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
		if (clk_rate < CSR_F_35M)
			priv->clk_csr = STMMAC_CSR_20_35M;
		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
			priv->clk_csr = STMMAC_CSR_35_60M;
		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
			priv->clk_csr = STMMAC_CSR_60_100M;
		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
			priv->clk_csr = STMMAC_CSR_100_150M;
		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
			priv->clk_csr = STMMAC_CSR_150_250M;
177
		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178
			priv->clk_csr = STMMAC_CSR_250_300M;
G
Giuseppe CAVALLARO 已提交
179
	}
180 181
}

182 183
static void print_pkt(unsigned char *buf, int len)
{
184 185
	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 187 188 189
}

static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
{
190
	u32 avail;
191 192 193 194 195 196 197 198 199 200 201

	if (priv->dirty_tx > priv->cur_tx)
		avail = priv->dirty_tx - priv->cur_tx - 1;
	else
		avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;

	return avail;
}

static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
{
202
	u32 dirty;
203 204 205 206 207 208 209

	if (priv->dirty_rx <= priv->cur_rx)
		dirty = priv->cur_rx - priv->dirty_rx;
	else
		dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;

	return dirty;
210 211
}

212
/**
213
 * stmmac_hw_fix_mac_speed - callback for speed selection
214
 * @priv: driver private structure
215
 * Description: on some platforms (e.g. ST), some HW system configuration
216
 * registers have to be set according to the link speed negotiated.
217 218 219
 */
static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
{
220 221
	struct net_device *ndev = priv->dev;
	struct phy_device *phydev = ndev->phydev;
222 223

	if (likely(priv->plat->fix_mac_speed))
G
Giuseppe CAVALLARO 已提交
224
		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 226
}

227
/**
228
 * stmmac_enable_eee_mode - check and enter in LPI mode
229
 * @priv: driver private structure
230 231
 * Description: this function is to verify and enter in LPI mode in case of
 * EEE.
232
 */
233 234 235 236 237
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
	/* Check and enter in LPI mode */
	if ((priv->dirty_tx == priv->cur_tx) &&
	    (priv->tx_path_in_lpi_mode == false))
238 239
		priv->hw->mac->set_eee_mode(priv->hw,
					    priv->plat->en_tx_lpi_clockgating);
240 241
}

242
/**
243
 * stmmac_disable_eee_mode - disable and exit from LPI mode
244 245 246 247
 * @priv: driver private structure
 * Description: this function is to exit and disable EEE in case of
 * LPI state is true. This is called by the xmit.
 */
248 249
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
250
	priv->hw->mac->reset_eee_mode(priv->hw);
251 252 253 254 255
	del_timer_sync(&priv->eee_ctrl_timer);
	priv->tx_path_in_lpi_mode = false;
}

/**
256
 * stmmac_eee_ctrl_timer - EEE TX SW timer.
257 258
 * @arg : data hook
 * Description:
259
 *  if there is no data transfer and if we are not in LPI state,
260 261 262 263 264 265 266
 *  then MAC Transmitter can be moved to LPI state.
 */
static void stmmac_eee_ctrl_timer(unsigned long arg)
{
	struct stmmac_priv *priv = (struct stmmac_priv *)arg;

	stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
267
	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 269 270
}

/**
271
 * stmmac_eee_init - init EEE
272
 * @priv: driver private structure
273
 * Description:
274 275 276
 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 *  can also manage EEE, this function enable the LPI state and start related
 *  timer.
277 278 279
 */
bool stmmac_eee_init(struct stmmac_priv *priv)
{
280
	struct net_device *ndev = priv->dev;
281
	unsigned long flags;
282 283
	bool ret = false;

G
Giuseppe CAVALLARO 已提交
284 285 286
	/* Using PCS we cannot dial with the phy registers at this stage
	 * so we do not support extra feature like EEE.
	 */
287 288 289
	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
	    (priv->hw->pcs == STMMAC_PCS_RTBI))
G
Giuseppe CAVALLARO 已提交
290 291
		goto out;

292 293
	/* MAC core supports the EEE feature. */
	if (priv->dma_cap.eee) {
294 295
		int tx_lpi_timer = priv->tx_lpi_timer;

296
		/* Check if the PHY supports EEE */
297
		if (phy_init_eee(ndev->phydev, 1)) {
298 299 300 301 302
			/* To manage at run-time if the EEE cannot be supported
			 * anymore (for example because the lp caps have been
			 * changed).
			 * In that case the driver disable own timers.
			 */
303
			spin_lock_irqsave(&priv->lock, flags);
304
			if (priv->eee_active) {
305
				netdev_dbg(priv->dev, "disable EEE\n");
306
				del_timer_sync(&priv->eee_ctrl_timer);
307
				priv->hw->mac->set_eee_timer(priv->hw, 0,
308 309 310
							     tx_lpi_timer);
			}
			priv->eee_active = 0;
311
			spin_unlock_irqrestore(&priv->lock, flags);
312
			goto out;
313 314
		}
		/* Activate the EEE and start timers */
315
		spin_lock_irqsave(&priv->lock, flags);
G
Giuseppe CAVALLARO 已提交
316 317
		if (!priv->eee_active) {
			priv->eee_active = 1;
318 319 320 321 322
			setup_timer(&priv->eee_ctrl_timer,
				    stmmac_eee_ctrl_timer,
				    (unsigned long)priv);
			mod_timer(&priv->eee_ctrl_timer,
				  STMMAC_LPI_T(eee_timer));
G
Giuseppe CAVALLARO 已提交
323

324
			priv->hw->mac->set_eee_timer(priv->hw,
G
Giuseppe CAVALLARO 已提交
325
						     STMMAC_DEFAULT_LIT_LS,
326
						     tx_lpi_timer);
327 328
		}
		/* Set HW EEE according to the speed */
329
		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330 331

		ret = true;
332 333
		spin_unlock_irqrestore(&priv->lock, flags);

334
		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335 336 337 338 339
	}
out:
	return ret;
}

340
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
341
 * @priv: driver private structure
342
 * @p : descriptor pointer
343 344 345 346 347 348
 * @skb : the socket buffer
 * Description :
 * This function will read timestamp from the descriptor & pass it to stack.
 * and also perform some sanity checks.
 */
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349
				   struct dma_desc *p, struct sk_buff *skb)
350 351 352 353 354 355 356
{
	struct skb_shared_hwtstamps shhwtstamp;
	u64 ns;

	if (!priv->hwts_tx_en)
		return;

G
Giuseppe CAVALLARO 已提交
357
	/* exit if skb doesn't support hw tstamp */
358
	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359 360 361
		return;

	/* check tx tstamp status */
362 363 364
	if (!priv->hw->desc->get_tx_timestamp_status(p)) {
		/* get the valid tstamp */
		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365

366 367
		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp.hwtstamp = ns_to_ktime(ns);
368

369 370 371 372
		netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
		/* pass tstamp to stack */
		skb_tstamp_tx(skb, &shhwtstamp);
	}
373 374 375 376

	return;
}

377
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
378
 * @priv: driver private structure
379 380
 * @p : descriptor pointer
 * @np : next descriptor pointer
381 382 383 384 385
 * @skb : the socket buffer
 * Description :
 * This function will read received packet's timestamp from the descriptor
 * and pass it to stack. It also perform some sanity checks.
 */
386 387
static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
				   struct dma_desc *np, struct sk_buff *skb)
388 389 390 391 392 393 394
{
	struct skb_shared_hwtstamps *shhwtstamp = NULL;
	u64 ns;

	if (!priv->hwts_rx_en)
		return;

395 396 397 398 399 400 401
	/* Check if timestamp is available */
	if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
		/* For GMAC4, the valid timestamp is from CTX next desc. */
		if (priv->plat->has_gmac4)
			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
		else
			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402

403 404 405 406 407 408 409
		netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
		shhwtstamp = skb_hwtstamps(skb);
		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp->hwtstamp = ns_to_ktime(ns);
	} else  {
		netdev_err(priv->dev, "cannot get RX hw timestamp\n");
	}
410 411 412 413 414
}

/**
 *  stmmac_hwtstamp_ioctl - control hardware timestamping.
 *  @dev: device pointer.
415
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
416 417 418 419 420 421 422 423 424 425 426
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function configures the MAC to enable/disable both outgoing(TX)
 *  and incoming(RX) packets time stamping based on user input.
 *  Return Value:
 *  0 on success and an appropriate -ve integer on failure.
 */
static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config config;
A
Arnd Bergmann 已提交
427
	struct timespec64 now;
428 429 430 431 432 433 434 435 436 437
	u64 temp = 0;
	u32 ptp_v2 = 0;
	u32 tstamp_all = 0;
	u32 ptp_over_ipv4_udp = 0;
	u32 ptp_over_ipv6_udp = 0;
	u32 ptp_over_ethernet = 0;
	u32 snap_type_sel = 0;
	u32 ts_master_en = 0;
	u32 ts_event_en = 0;
	u32 value = 0;
438
	u32 sec_inc;
439 440 441 442 443 444 445 446 447 448

	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
		netdev_alert(priv->dev, "No support for HW time stamping\n");
		priv->hwts_tx_en = 0;
		priv->hwts_rx_en = 0;

		return -EOPNOTSUPP;
	}

	if (copy_from_user(&config, ifr->ifr_data,
G
Giuseppe CAVALLARO 已提交
449
			   sizeof(struct hwtstamp_config)))
450 451
		return -EFAULT;

452 453
	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
		   __func__, config.flags, config.tx_type, config.rx_filter);
454 455 456 457 458

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

459 460
	if (config.tx_type != HWTSTAMP_TX_OFF &&
	    config.tx_type != HWTSTAMP_TX_ON)
461 462 463 464 465
		return -ERANGE;

	if (priv->adv_ts) {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
G
Giuseppe CAVALLARO 已提交
466
			/* time stamp no incoming packet at all */
467 468 469 470
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
471
			/* PTP v1, UDP, any kind of event packet */
472 473 474 475 476 477 478 479 480
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			/* take time stamp for all event messages */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
481
			/* PTP v1, UDP, Sync packet */
482 483 484 485 486 487 488 489 490
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
491
			/* PTP v1, UDP, Delay_req packet */
492 493 494 495 496 497 498 499 500 501
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
502
			/* PTP v2, UDP, any kind of event packet */
503 504 505 506 507 508 509 510 511 512
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
513
			/* PTP v2, UDP, Sync packet */
514 515 516 517 518 519 520 521 522 523
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
524
			/* PTP v2, UDP, Delay_req packet */
525 526 527 528 529 530 531 532 533 534 535
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_EVENT:
G
Giuseppe CAVALLARO 已提交
536
			/* PTP v2/802.AS1 any layer, any kind of event packet */
537 538 539 540 541 542 543 544 545 546 547
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_SYNC:
G
Giuseppe CAVALLARO 已提交
548
			/* PTP v2/802.AS1, any layer, Sync packet */
549 550 551 552 553 554 555 556 557 558 559
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
560
			/* PTP v2/802.AS1, any layer, Delay_req packet */
561 562 563 564 565 566 567 568 569 570 571 572
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_ALL:
G
Giuseppe CAVALLARO 已提交
573
			/* time stamp any incoming packet */
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
			config.rx_filter = HWTSTAMP_FILTER_ALL;
			tstamp_all = PTP_TCR_TSENALL;
			break;

		default:
			return -ERANGE;
		}
	} else {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;
		default:
			/* PTP v1, UDP, any kind of event packet */
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			break;
		}
	}
	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593
	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594 595

	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596
		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597 598
	else {
		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
G
Giuseppe CAVALLARO 已提交
599 600 601
			 tstamp_all | ptp_v2 | ptp_over_ethernet |
			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
			 ts_master_en | snap_type_sel);
602
		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603 604

		/* program Sub Second Increment reg */
605
		sec_inc = priv->hw->ptp->config_sub_second_increment(
606
			priv->ptpaddr, priv->plat->clk_ptp_rate,
607
			priv->plat->has_gmac4);
608
		temp = div_u64(1000000000ULL, sec_inc);
609 610 611 612

		/* calculate default added value:
		 * formula is :
		 * addend = (2^32)/freq_div_ratio;
613
		 * where, freq_div_ratio = 1e9ns/sec_inc
614
		 */
615
		temp = (u64)(temp << 32);
616
		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617
		priv->hw->ptp->config_addend(priv->ptpaddr,
618 619 620
					     priv->default_addend);

		/* initialize system time */
A
Arnd Bergmann 已提交
621 622 623
		ktime_get_real_ts64(&now);

		/* lower 32 bits of tv_sec are safe until y2106 */
624
		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625 626 627 628 629 630 631
					    now.tv_nsec);
	}

	return copy_to_user(ifr->ifr_data, &config,
			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
}

632
/**
633
 * stmmac_init_ptp - init PTP
634
 * @priv: driver private structure
635
 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636
 * This is done by looking at the HW cap. register.
637
 * This function also registers the ptp driver.
638
 */
639
static int stmmac_init_ptp(struct stmmac_priv *priv)
640
{
641 642 643
	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

644
	priv->adv_ts = 0;
645 646 647 648 649
	/* Check if adv_ts can be enabled for dwmac 4.x core */
	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
		priv->adv_ts = 1;
	/* Dwmac 3.x core with extend_desc can support adv_ts */
	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650 651
		priv->adv_ts = 1;

652 653
	if (priv->dma_cap.time_stamp)
		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654

655 656 657
	if (priv->adv_ts)
		netdev_info(priv->dev,
			    "IEEE 1588-2008 Advanced Timestamp supported\n");
658 659 660 661

	priv->hw->ptp = &stmmac_ptp;
	priv->hwts_tx_en = 0;
	priv->hwts_rx_en = 0;
662

663 664 665
	stmmac_ptp_register(priv);

	return 0;
666 667 668 669
}

static void stmmac_release_ptp(struct stmmac_priv *priv)
{
670 671
	if (priv->plat->clk_ptp_ref)
		clk_disable_unprepare(priv->plat->clk_ptp_ref);
672
	stmmac_ptp_unregister(priv);
673 674
}

675
/**
676
 * stmmac_adjust_link - adjusts the link parameters
677
 * @dev: net device structure
678 679 680 681 682
 * Description: this is the helper called by the physical abstraction layer
 * drivers to communicate the phy link status. According the speed and duplex
 * this driver can invoke registered glue-logic as well.
 * It also invoke the eee initialization because it could happen when switch
 * on different networks (that are eee capable).
683 684 685 686
 */
static void stmmac_adjust_link(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
687
	struct phy_device *phydev = dev->phydev;
688 689 690 691
	unsigned long flags;
	int new_state = 0;
	unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;

692
	if (!phydev)
693 694 695
		return;

	spin_lock_irqsave(&priv->lock, flags);
696

697
	if (phydev->link) {
698
		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
699 700 701 702 703 704

		/* Now we make sure that we can be in full duplex mode.
		 * If not, we operate in half-duplex mode. */
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
705
				ctrl &= ~priv->hw->link.duplex;
706
			else
707
				ctrl |= priv->hw->link.duplex;
708 709 710 711
			priv->oldduplex = phydev->duplex;
		}
		/* Flow Control operation */
		if (phydev->pause)
712
			priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
713
						 fc, pause_time);
714 715 716 717 718

		if (phydev->speed != priv->speed) {
			new_state = 1;
			switch (phydev->speed) {
			case 1000:
719 720
				if (priv->plat->has_gmac ||
				    priv->plat->has_gmac4)
721
					ctrl &= ~priv->hw->link.port;
722 723
				break;
			case 100:
724 725 726 727 728 729 730 731
				if (priv->plat->has_gmac ||
				    priv->plat->has_gmac4) {
					ctrl |= priv->hw->link.port;
					ctrl |= priv->hw->link.speed;
				} else {
					ctrl &= ~priv->hw->link.port;
				}
				break;
732
			case 10:
733 734
				if (priv->plat->has_gmac ||
				    priv->plat->has_gmac4) {
735
					ctrl |= priv->hw->link.port;
736
					ctrl &= ~(priv->hw->link.speed);
737
				} else {
738
					ctrl &= ~priv->hw->link.port;
739 740 741
				}
				break;
			default:
742
				netif_warn(priv, link, priv->dev,
743
					   "broken speed: %d\n", phydev->speed);
744
				phydev->speed = SPEED_UNKNOWN;
745 746
				break;
			}
747 748
			if (phydev->speed != SPEED_UNKNOWN)
				stmmac_hw_fix_mac_speed(priv);
749 750 751
			priv->speed = phydev->speed;
		}

752
		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
753 754 755 756 757 758 759 760

		if (!priv->oldlink) {
			new_state = 1;
			priv->oldlink = 1;
		}
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
761 762
		priv->speed = SPEED_UNKNOWN;
		priv->oldduplex = DUPLEX_UNKNOWN;
763 764 765 766 767
	}

	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);

768 769
	spin_unlock_irqrestore(&priv->lock, flags);

770 771 772 773 774 775 776 777 778 779
	if (phydev->is_pseudo_fixed_link)
		/* Stop PHY layer to call the hook to adjust the link in case
		 * of a switch is attached to the stmmac driver.
		 */
		phydev->irq = PHY_IGNORE_INTERRUPT;
	else
		/* At this stage, init the EEE if supported.
		 * Never called in case of fixed_link.
		 */
		priv->eee_enabled = stmmac_eee_init(priv);
780 781
}

782
/**
783
 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
784 785 786 787 788
 * @priv: driver private structure
 * Description: this is to verify if the HW supports the PCS.
 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 * configured for the TBI, RTBI, or SGMII PHY interface.
 */
789 790 791 792 793
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
	int interface = priv->plat->interface;

	if (priv->dma_cap.pcs) {
B
Byungho An 已提交
794 795 796 797
		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
798
			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
799
			priv->hw->pcs = STMMAC_PCS_RGMII;
B
Byungho An 已提交
800
		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
801
			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
802
			priv->hw->pcs = STMMAC_PCS_SGMII;
803 804 805 806
		}
	}
}

807 808 809 810 811 812 813 814 815 816 817 818
/**
 * stmmac_init_phy - PHY initialization
 * @dev: net device structure
 * Description: it initializes the driver's PHY state, and attaches the PHY
 * to the mac driver.
 *  Return value:
 *  0 on success
 */
static int stmmac_init_phy(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct phy_device *phydev;
819
	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
820
	char bus_id[MII_BUS_ID_SIZE];
821
	int interface = priv->plat->interface;
822
	int max_speed = priv->plat->max_speed;
823
	priv->oldlink = 0;
824 825
	priv->speed = SPEED_UNKNOWN;
	priv->oldduplex = DUPLEX_UNKNOWN;
826

827 828 829 830
	if (priv->plat->phy_node) {
		phydev = of_phy_connect(dev, priv->plat->phy_node,
					&stmmac_adjust_link, 0, interface);
	} else {
G
Giuseppe CAVALLARO 已提交
831 832
		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
			 priv->plat->bus_id);
833 834 835

		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
			 priv->plat->phy_addr);
836
		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
837
			   phy_id_fmt);
838 839 840 841

		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
				     interface);
	}
842

843
	if (IS_ERR_OR_NULL(phydev)) {
844
		netdev_err(priv->dev, "Could not attach to PHY\n");
845 846 847
		if (!phydev)
			return -ENODEV;

848 849 850
		return PTR_ERR(phydev);
	}

851
	/* Stop Advertising 1000BASE Capability if interface is not GMII */
852
	if ((interface == PHY_INTERFACE_MODE_MII) ||
853
	    (interface == PHY_INTERFACE_MODE_RMII) ||
P
Pavel Machek 已提交
854
		(max_speed < 1000 && max_speed > 0))
855 856
		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
					 SUPPORTED_1000baseT_Full);
857

858 859 860 861 862 863 864
	/*
	 * Broken HW is sometimes missing the pull-up resistor on the
	 * MDIO line, which results in reads to non-existent devices returning
	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
	 * device as well.
	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
	 */
865
	if (!priv->plat->phy_node && phydev->phy_id == 0) {
866 867 868
		phy_disconnect(phydev);
		return -ENODEV;
	}
869

870 871 872 873 874 875 876
	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
	 * subsequent PHY polling, make sure we force a link transition if
	 * we have a UP/DOWN/UP transition
	 */
	if (phydev->is_pseudo_fixed_link)
		phydev->irq = PHY_POLL;

877
	phy_attached_info(phydev);
878 879 880
	return 0;
}

881 882
static void stmmac_display_rings(struct stmmac_priv *priv)
{
883 884
	void *head_rx, *head_tx;

885
	if (priv->extend_desc) {
886 887
		head_rx = (void *)priv->dma_erx;
		head_tx = (void *)priv->dma_etx;
888
	} else {
889 890
		head_rx = (void *)priv->dma_rx;
		head_tx = (void *)priv->dma_tx;
891
	}
892 893 894 895 896

	/* Display Rx ring */
	priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
	/* Display Tx ring */
	priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 898
}

899 900 901 902 903 904 905 906
static int stmmac_set_bfsize(int mtu, int bufsize)
{
	int ret = bufsize;

	if (mtu >= BUF_SIZE_4KiB)
		ret = BUF_SIZE_8KiB;
	else if (mtu >= BUF_SIZE_2KiB)
		ret = BUF_SIZE_4KiB;
907
	else if (mtu > DEFAULT_BUFSIZE)
908 909
		ret = BUF_SIZE_2KiB;
	else
910
		ret = DEFAULT_BUFSIZE;
911 912 913 914

	return ret;
}

915
/**
916
 * stmmac_clear_descriptors - clear descriptors
917 918 919 920
 * @priv: driver private structure
 * Description: this function is called to clear the tx and rx descriptors
 * in case of both basic and extended descriptors are used.
 */
921 922 923 924 925
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
	int i;

	/* Clear the Rx/Tx descriptors */
926
	for (i = 0; i < DMA_RX_SIZE; i++)
927 928 929
		if (priv->extend_desc)
			priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
						     priv->use_riwt, priv->mode,
930
						     (i == DMA_RX_SIZE - 1));
931 932 933
		else
			priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
						     priv->use_riwt, priv->mode,
934 935
						     (i == DMA_RX_SIZE - 1));
	for (i = 0; i < DMA_TX_SIZE; i++)
936 937 938
		if (priv->extend_desc)
			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
						     priv->mode,
939
						     (i == DMA_TX_SIZE - 1));
940 941 942
		else
			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
						     priv->mode,
943
						     (i == DMA_TX_SIZE - 1));
944 945
}

946 947 948 949 950 951 952 953 954
/**
 * stmmac_init_rx_buffers - init the RX descriptor buffer.
 * @priv: driver private structure
 * @p: descriptor pointer
 * @i: descriptor index
 * @flags: gfp flag.
 * Description: this function is called to allocate a receive buffer, perform
 * the DMA mapping and init the descriptor.
 */
955
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956
				  int i, gfp_t flags)
957 958 959
{
	struct sk_buff *skb;

960
	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961
	if (!skb) {
962 963
		netdev_err(priv->dev,
			   "%s: Rx init fails; skb is NULL\n", __func__);
964
		return -ENOMEM;
965 966 967 968 969
	}
	priv->rx_skbuff[i] = skb;
	priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
						priv->dma_buf_sz,
						DMA_FROM_DEVICE);
970
	if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
971
		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972 973 974
		dev_kfree_skb_any(skb);
		return -EINVAL;
	}
975

A
Alexandre TORGUE 已提交
976
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
977
		p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
A
Alexandre TORGUE 已提交
978
	else
979
		p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
980

G
Giuseppe CAVALLARO 已提交
981
	if ((priv->hw->mode->init_desc3) &&
982
	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
G
Giuseppe CAVALLARO 已提交
983
		priv->hw->mode->init_desc3(p);
984 985 986 987

	return 0;
}

988 989 990 991 992 993 994 995 996 997
static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
{
	if (priv->rx_skbuff[i]) {
		dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
				 priv->dma_buf_sz, DMA_FROM_DEVICE);
		dev_kfree_skb_any(priv->rx_skbuff[i]);
	}
	priv->rx_skbuff[i] = NULL;
}

998 999 1000
/**
 * init_dma_desc_rings - init the RX/TX descriptor rings
 * @dev: net device structure
1001 1002
 * @flags: gfp flag.
 * Description: this function initializes the DMA RX/TX descriptors
1003
 * and allocates the socket buffers. It supports the chained and ring
1004
 * modes.
1005
 */
1006
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007 1008 1009
{
	int i;
	struct stmmac_priv *priv = netdev_priv(dev);
1010
	unsigned int bfsize = 0;
1011
	int ret = -ENOMEM;
1012

G
Giuseppe CAVALLARO 已提交
1013 1014
	if (priv->hw->mode->set_16kib_bfsize)
		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1015

1016
	if (bfsize < BUF_SIZE_16KiB)
1017
		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1018

1019 1020
	priv->dma_buf_sz = bfsize;

1021 1022 1023 1024 1025 1026 1027
	netif_dbg(priv, probe, priv->dev,
		  "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
		  __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);

	/* RX INITIALIZATION */
	netif_dbg(priv, probe, priv->dev,
		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028

1029
	for (i = 0; i < DMA_RX_SIZE; i++) {
1030 1031 1032 1033 1034
		struct dma_desc *p;
		if (priv->extend_desc)
			p = &((priv->dma_erx + i)->basic);
		else
			p = priv->dma_rx + i;
1035

1036
		ret = stmmac_init_rx_buffers(priv, p, i, flags);
1037 1038
		if (ret)
			goto err_init_rx_buffers;
1039

1040 1041 1042
		netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
			  priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
			  (unsigned int)priv->rx_skbuff_dma[i]);
1043 1044
	}
	priv->cur_rx = 0;
1045
	priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046 1047
	buf_sz = bfsize;

1048 1049 1050
	/* Setup the chained descriptor addresses */
	if (priv->mode == STMMAC_CHAIN_MODE) {
		if (priv->extend_desc) {
G
Giuseppe CAVALLARO 已提交
1051
			priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1052
					     DMA_RX_SIZE, 1);
G
Giuseppe CAVALLARO 已提交
1053
			priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1054
					     DMA_TX_SIZE, 1);
1055
		} else {
G
Giuseppe CAVALLARO 已提交
1056
			priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1057
					     DMA_RX_SIZE, 0);
G
Giuseppe CAVALLARO 已提交
1058
			priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1059
					     DMA_TX_SIZE, 0);
1060 1061 1062
		}
	}

1063
	/* TX INITIALIZATION */
1064
	for (i = 0; i < DMA_TX_SIZE; i++) {
1065 1066 1067 1068 1069
		struct dma_desc *p;
		if (priv->extend_desc)
			p = &((priv->dma_etx + i)->basic);
		else
			p = priv->dma_tx + i;
A
Alexandre TORGUE 已提交
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079

		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
			p->des0 = 0;
			p->des1 = 0;
			p->des2 = 0;
			p->des3 = 0;
		} else {
			p->des2 = 0;
		}

G
Giuseppe CAVALLARO 已提交
1080 1081
		priv->tx_skbuff_dma[i].buf = 0;
		priv->tx_skbuff_dma[i].map_as_page = false;
1082
		priv->tx_skbuff_dma[i].len = 0;
1083
		priv->tx_skbuff_dma[i].last_segment = false;
1084 1085
		priv->tx_skbuff[i] = NULL;
	}
1086

1087 1088
	priv->dirty_tx = 0;
	priv->cur_tx = 0;
B
Beniamino Galvani 已提交
1089
	netdev_reset_queue(priv->dev);
1090

1091
	stmmac_clear_descriptors(priv);
1092

1093 1094
	if (netif_msg_hw(priv))
		stmmac_display_rings(priv);
1095 1096 1097 1098 1099 1100

	return 0;
err_init_rx_buffers:
	while (--i >= 0)
		stmmac_free_rx_buffers(priv, i);
	return ret;
1101 1102 1103 1104 1105 1106
}

static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
	int i;

1107
	for (i = 0; i < DMA_RX_SIZE; i++)
1108
		stmmac_free_rx_buffers(priv, i);
1109 1110 1111 1112 1113 1114
}

static void dma_free_tx_skbufs(struct stmmac_priv *priv)
{
	int i;

1115
	for (i = 0; i < DMA_TX_SIZE; i++) {
G
Giuseppe CAVALLARO 已提交
1116 1117 1118 1119
		if (priv->tx_skbuff_dma[i].buf) {
			if (priv->tx_skbuff_dma[i].map_as_page)
				dma_unmap_page(priv->device,
					       priv->tx_skbuff_dma[i].buf,
1120
					       priv->tx_skbuff_dma[i].len,
G
Giuseppe CAVALLARO 已提交
1121 1122 1123 1124
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
						 priv->tx_skbuff_dma[i].buf,
1125
						 priv->tx_skbuff_dma[i].len,
G
Giuseppe CAVALLARO 已提交
1126
						 DMA_TO_DEVICE);
1127
		}
1128

1129
		if (priv->tx_skbuff[i]) {
1130 1131
			dev_kfree_skb_any(priv->tx_skbuff[i]);
			priv->tx_skbuff[i] = NULL;
G
Giuseppe CAVALLARO 已提交
1132 1133
			priv->tx_skbuff_dma[i].buf = 0;
			priv->tx_skbuff_dma[i].map_as_page = false;
1134 1135 1136 1137
		}
	}
}

1138 1139 1140 1141 1142 1143 1144 1145
/**
 * alloc_dma_desc_resources - alloc TX/RX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
1146 1147 1148 1149
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
	int ret = -ENOMEM;

1150
	priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1151 1152 1153 1154
					    GFP_KERNEL);
	if (!priv->rx_skbuff_dma)
		return -ENOMEM;

1155
	priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1156 1157 1158 1159
					GFP_KERNEL);
	if (!priv->rx_skbuff)
		goto err_rx_skbuff;

1160
	priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
G
Giuseppe CAVALLARO 已提交
1161
					    sizeof(*priv->tx_skbuff_dma),
1162 1163 1164 1165
					    GFP_KERNEL);
	if (!priv->tx_skbuff_dma)
		goto err_tx_skbuff_dma;

1166
	priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167 1168 1169 1170 1171
					GFP_KERNEL);
	if (!priv->tx_skbuff)
		goto err_tx_skbuff;

	if (priv->extend_desc) {
1172
		priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173 1174 1175 1176
						    sizeof(struct
							   dma_extended_desc),
						    &priv->dma_rx_phy,
						    GFP_KERNEL);
1177 1178 1179
		if (!priv->dma_erx)
			goto err_dma;

1180
		priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1181 1182 1183 1184
						    sizeof(struct
							   dma_extended_desc),
						    &priv->dma_tx_phy,
						    GFP_KERNEL);
1185
		if (!priv->dma_etx) {
1186
			dma_free_coherent(priv->device, DMA_RX_SIZE *
1187 1188
					  sizeof(struct dma_extended_desc),
					  priv->dma_erx, priv->dma_rx_phy);
1189 1190 1191
			goto err_dma;
		}
	} else {
1192
		priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1193 1194 1195
						   sizeof(struct dma_desc),
						   &priv->dma_rx_phy,
						   GFP_KERNEL);
1196 1197 1198
		if (!priv->dma_rx)
			goto err_dma;

1199
		priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1200 1201 1202
						   sizeof(struct dma_desc),
						   &priv->dma_tx_phy,
						   GFP_KERNEL);
1203
		if (!priv->dma_tx) {
1204
			dma_free_coherent(priv->device, DMA_RX_SIZE *
1205 1206
					  sizeof(struct dma_desc),
					  priv->dma_rx, priv->dma_rx_phy);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
			goto err_dma;
		}
	}

	return 0;

err_dma:
	kfree(priv->tx_skbuff);
err_tx_skbuff:
	kfree(priv->tx_skbuff_dma);
err_tx_skbuff_dma:
	kfree(priv->rx_skbuff);
err_rx_skbuff:
	kfree(priv->rx_skbuff_dma);
	return ret;
}

1224 1225 1226 1227 1228 1229
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
	/* Release the DMA TX/RX socket buffers */
	dma_free_rx_skbufs(priv);
	dma_free_tx_skbufs(priv);

G
Giuseppe CAVALLARO 已提交
1230
	/* Free DMA regions of consistent memory previously allocated */
1231 1232
	if (!priv->extend_desc) {
		dma_free_coherent(priv->device,
1233
				  DMA_TX_SIZE * sizeof(struct dma_desc),
1234 1235
				  priv->dma_tx, priv->dma_tx_phy);
		dma_free_coherent(priv->device,
1236
				  DMA_RX_SIZE * sizeof(struct dma_desc),
1237 1238
				  priv->dma_rx, priv->dma_rx_phy);
	} else {
1239
		dma_free_coherent(priv->device, DMA_TX_SIZE *
1240 1241
				  sizeof(struct dma_extended_desc),
				  priv->dma_etx, priv->dma_tx_phy);
1242
		dma_free_coherent(priv->device, DMA_RX_SIZE *
1243 1244 1245
				  sizeof(struct dma_extended_desc),
				  priv->dma_erx, priv->dma_rx_phy);
	}
1246 1247
	kfree(priv->rx_skbuff_dma);
	kfree(priv->rx_skbuff);
1248
	kfree(priv->tx_skbuff_dma);
1249 1250 1251
	kfree(priv->tx_skbuff);
}

J
jpinto 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
/**
 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
 *  @priv: driver private structure
 *  Description: It is used for enabling the rx queues in the MAC
 */
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
	int rx_count = priv->dma_cap.number_rx_queues;
	int queue = 0;

	/* If GMAC does not have multiple queues, then this is not necessary*/
	if (rx_count == 1)
		return;

	/**
	 *  If the core is synthesized with multiple rx queues / multiple
	 *  dma channels, then rx queues will be disabled by default.
	 *  For now only rx queue 0 is enabled.
	 */
	priv->hw->mac->rx_queue_enable(priv->hw, queue);
}

1274 1275
/**
 *  stmmac_dma_operation_mode - HW DMA operation mode
1276
 *  @priv: driver private structure
1277 1278
 *  Description: it is used for configuring the DMA operation mode register in
 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1279 1280 1281
 */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
1282 1283
	int rxfifosz = priv->plat->rx_fifo_size;

1284 1285 1286
	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;

1287
	if (priv->plat->force_thresh_dma_mode)
1288
		priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1289
	else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1290 1291 1292
		/*
		 * In case of GMAC, SF mode can be enabled
		 * to perform the TX COE in HW. This depends on:
1293 1294 1295 1296
		 * 1) TX COE if actually supported
		 * 2) There is no bugged Jumbo frame support
		 *    that needs to not insert csum in the TDES.
		 */
1297 1298
		priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
					rxfifosz);
1299
		priv->xstats.threshold = SF_DMA_MODE;
1300
	} else
1301 1302
		priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
					rxfifosz);
1303 1304 1305
}

/**
1306
 * stmmac_tx_clean - to manage the transmission completion
1307
 * @priv: driver private structure
1308
 * Description: it reclaims the transmit resources after transmission completes.
1309
 */
1310
static void stmmac_tx_clean(struct stmmac_priv *priv)
1311
{
B
Beniamino Galvani 已提交
1312
	unsigned int bytes_compl = 0, pkts_compl = 0;
1313
	unsigned int entry = priv->dirty_tx;
1314

1315
	netif_tx_lock(priv->dev);
1316

1317 1318
	priv->xstats.tx_clean++;

1319
	while (entry != priv->cur_tx) {
1320
		struct sk_buff *skb = priv->tx_skbuff[entry];
1321
		struct dma_desc *p;
1322
		int status;
1323 1324

		if (priv->extend_desc)
G
Giuseppe CAVALLARO 已提交
1325
			p = (struct dma_desc *)(priv->dma_etx + entry);
1326 1327
		else
			p = priv->dma_tx + entry;
1328

1329
		status = priv->hw->desc->tx_status(&priv->dev->stats,
G
Giuseppe CAVALLARO 已提交
1330 1331
						      &priv->xstats, p,
						      priv->ioaddr);
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
		/* Check if the descriptor is owned by the DMA */
		if (unlikely(status & tx_dma_own))
			break;

		/* Just consider the last segment and ...*/
		if (likely(!(status & tx_not_ls))) {
			/* ... verify the status error condition */
			if (unlikely(status & tx_err)) {
				priv->dev->stats.tx_errors++;
			} else {
1342 1343
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
1344
			}
1345
			stmmac_get_tx_hwtstamp(priv, p, skb);
1346 1347
		}

G
Giuseppe CAVALLARO 已提交
1348 1349 1350 1351
		if (likely(priv->tx_skbuff_dma[entry].buf)) {
			if (priv->tx_skbuff_dma[entry].map_as_page)
				dma_unmap_page(priv->device,
					       priv->tx_skbuff_dma[entry].buf,
1352
					       priv->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1353 1354 1355 1356
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
						 priv->tx_skbuff_dma[entry].buf,
1357
						 priv->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1358 1359
						 DMA_TO_DEVICE);
			priv->tx_skbuff_dma[entry].buf = 0;
A
Alexandre TORGUE 已提交
1360
			priv->tx_skbuff_dma[entry].len = 0;
G
Giuseppe CAVALLARO 已提交
1361
			priv->tx_skbuff_dma[entry].map_as_page = false;
1362
		}
A
Alexandre TORGUE 已提交
1363 1364 1365 1366

		if (priv->hw->mode->clean_desc3)
			priv->hw->mode->clean_desc3(priv, p);

1367
		priv->tx_skbuff_dma[entry].last_segment = false;
1368
		priv->tx_skbuff_dma[entry].is_jumbo = false;
1369 1370

		if (likely(skb != NULL)) {
B
Beniamino Galvani 已提交
1371 1372
			pkts_compl++;
			bytes_compl += skb->len;
1373
			dev_consume_skb_any(skb);
1374 1375 1376
			priv->tx_skbuff[entry] = NULL;
		}

1377
		priv->hw->desc->release_tx_desc(p, priv->mode);
1378

1379
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1380
	}
1381
	priv->dirty_tx = entry;
B
Beniamino Galvani 已提交
1382 1383 1384

	netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);

1385
	if (unlikely(netif_queue_stopped(priv->dev) &&
1386 1387 1388 1389
	    stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
		netif_dbg(priv, tx_done, priv->dev,
			  "%s: restart transmit\n", __func__);
		netif_wake_queue(priv->dev);
1390
	}
1391 1392 1393

	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
		stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
1394
		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1395
	}
1396
	netif_tx_unlock(priv->dev);
1397 1398
}

1399
static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1400
{
1401
	priv->hw->dma->enable_dma_irq(priv->ioaddr);
1402 1403
}

1404
static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1405
{
1406
	priv->hw->dma->disable_dma_irq(priv->ioaddr);
1407 1408 1409
}

/**
1410
 * stmmac_tx_err - to manage the tx error
1411
 * @priv: driver private structure
1412
 * Description: it cleans the descriptors and restarts the transmission
1413
 * in case of transmission errors.
1414 1415 1416
 */
static void stmmac_tx_err(struct stmmac_priv *priv)
{
1417
	int i;
1418 1419
	netif_stop_queue(priv->dev);

1420
	priv->hw->dma->stop_tx(priv->ioaddr);
1421
	dma_free_tx_skbufs(priv);
1422
	for (i = 0; i < DMA_TX_SIZE; i++)
1423 1424 1425
		if (priv->extend_desc)
			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
						     priv->mode,
1426
						     (i == DMA_TX_SIZE - 1));
1427 1428 1429
		else
			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
						     priv->mode,
1430
						     (i == DMA_TX_SIZE - 1));
1431 1432
	priv->dirty_tx = 0;
	priv->cur_tx = 0;
B
Beniamino Galvani 已提交
1433
	netdev_reset_queue(priv->dev);
1434
	priv->hw->dma->start_tx(priv->ioaddr);
1435 1436 1437 1438 1439

	priv->dev->stats.tx_errors++;
	netif_wake_queue(priv->dev);
}

1440
/**
1441
 * stmmac_dma_interrupt - DMA ISR
1442 1443
 * @priv: driver private structure
 * Description: this is the DMA ISR. It is called by the main ISR.
1444 1445
 * It calls the dwmac dma routine and schedule poll method in case of some
 * work can be done.
1446
 */
1447 1448 1449
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
	int status;
1450
	int rxfifosz = priv->plat->rx_fifo_size;
1451

1452
	status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1453 1454 1455 1456 1457 1458 1459
	if (likely((status & handle_rx)) || (status & handle_tx)) {
		if (likely(napi_schedule_prep(&priv->napi))) {
			stmmac_disable_dma_irq(priv);
			__napi_schedule(&priv->napi);
		}
	}
	if (unlikely(status & tx_hard_error_bump_tc)) {
1460
		/* Try to bump up the dma threshold on this failure */
1461 1462
		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
		    (tc <= 256)) {
1463
			tc += 64;
1464
			if (priv->plat->force_thresh_dma_mode)
1465 1466
				priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
							rxfifosz);
1467 1468
			else
				priv->hw->dma->dma_mode(priv->ioaddr, tc,
1469
							SF_DMA_MODE, rxfifosz);
1470
			priv->xstats.threshold = tc;
1471
		}
1472 1473
	} else if (unlikely(status == tx_hard_error))
		stmmac_tx_err(priv);
1474 1475
}

1476 1477 1478 1479 1480
/**
 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
 * @priv: driver private structure
 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
 */
1481 1482 1483
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1484
			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1485

1486 1487
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
A
Alexandre TORGUE 已提交
1488
		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1489 1490
	} else {
		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
A
Alexandre TORGUE 已提交
1491
		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1492
	}
1493 1494

	dwmac_mmc_intr_all_mask(priv->mmcaddr);
G
Giuseppe CAVALLARO 已提交
1495 1496

	if (priv->dma_cap.rmon) {
1497
		dwmac_mmc_ctrl(priv->mmcaddr, mode);
G
Giuseppe CAVALLARO 已提交
1498 1499
		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
	} else
1500
		netdev_info(priv->dev, "No MAC Management Counters available\n");
1501 1502
}

1503
/**
1504
 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1505 1506
 * @priv: driver private structure
 * Description: select the Enhanced/Alternate or Normal descriptors.
1507 1508
 * In case of Enhanced/Alternate, it checks if the extended descriptors are
 * supported by the HW capability register.
1509
 */
1510 1511 1512
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
	if (priv->plat->enh_desc) {
1513
		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1514 1515 1516

		/* GMAC older than 3.50 has no extended descriptors */
		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1517
			dev_info(priv->device, "Enabled extended descriptors\n");
1518 1519
			priv->extend_desc = 1;
		} else
1520
			dev_warn(priv->device, "Extended descriptors not supported\n");
1521

1522 1523
		priv->hw->desc = &enh_desc_ops;
	} else {
1524
		dev_info(priv->device, "Normal descriptors\n");
1525 1526 1527 1528 1529
		priv->hw->desc = &ndesc_ops;
	}
}

/**
1530
 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1531
 * @priv: driver private structure
1532 1533 1534 1535 1536
 * Description:
 *  new GMAC chip generations have a new register to indicate the
 *  presence of the optional feature/functions.
 *  This can be also used to override the value passed through the
 *  platform and necessary for old MAC10/100 and GMAC chips.
1537 1538 1539
 */
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
1540
	u32 ret = 0;
1541

1542
	if (priv->hw->dma->get_hw_feature) {
1543 1544 1545
		priv->hw->dma->get_hw_feature(priv->ioaddr,
					      &priv->dma_cap);
		ret = 1;
1546
	}
1547

1548
	return ret;
1549 1550
}

1551
/**
1552
 * stmmac_check_ether_addr - check if the MAC addr is valid
1553 1554 1555 1556 1557
 * @priv: driver private structure
 * Description:
 * it is to verify if the MAC address is valid, in case of failures it
 * generates a random MAC address
 */
1558 1559 1560
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1561
		priv->hw->mac->get_umac_addr(priv->hw,
1562
					     priv->dev->dev_addr, 0);
G
Giuseppe CAVALLARO 已提交
1563
		if (!is_valid_ether_addr(priv->dev->dev_addr))
1564
			eth_hw_addr_random(priv->dev);
1565 1566
		netdev_info(priv->dev, "device MAC address %pM\n",
			    priv->dev->dev_addr);
1567 1568 1569
	}
}

1570
/**
1571
 * stmmac_init_dma_engine - DMA init.
1572 1573 1574 1575 1576 1577
 * @priv: driver private structure
 * Description:
 * It inits the DMA invoking the specific MAC/GMAC callback.
 * Some DMA parameters can be passed from the platform;
 * in case of these are not passed a default is kept for the MAC or GMAC.
 */
1578 1579
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
1580
	int atds = 0;
1581
	int ret = 0;
1582

1583 1584
	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
		dev_err(priv->device, "Invalid DMA configuration\n");
1585
		return -EINVAL;
1586 1587
	}

1588 1589 1590
	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
		atds = 1;

1591 1592 1593 1594 1595 1596
	ret = priv->hw->dma->reset(priv->ioaddr);
	if (ret) {
		dev_err(priv->device, "Failed to reset the dma\n");
		return ret;
	}

1597
	priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1598
			    priv->dma_tx_phy, priv->dma_rx_phy, atds);
1599

A
Alexandre TORGUE 已提交
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->rx_tail_addr = priv->dma_rx_phy +
			    (DMA_RX_SIZE * sizeof(struct dma_desc));
		priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
					       STMMAC_CHAN0);

		priv->tx_tail_addr = priv->dma_tx_phy +
			    (DMA_TX_SIZE * sizeof(struct dma_desc));
		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
					       STMMAC_CHAN0);
	}

	if (priv->plat->axi && priv->hw->dma->axi)
1613 1614
		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);

1615
	return ret;
1616 1617
}

1618
/**
1619
 * stmmac_tx_timer - mitigation sw timer for tx.
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
 * @data: data pointer
 * Description:
 * This is the timer handler to directly invoke the stmmac_tx_clean.
 */
static void stmmac_tx_timer(unsigned long data)
{
	struct stmmac_priv *priv = (struct stmmac_priv *)data;

	stmmac_tx_clean(priv);
}

/**
1632
 * stmmac_init_tx_coalesce - init tx mitigation options.
1633
 * @priv: driver private structure
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
 * Description:
 * This inits the transmit coalesce parameters: i.e. timer rate,
 * timer handler and default threshold used for enabling the
 * interrupt on completion bit.
 */
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
	priv->tx_coal_frames = STMMAC_TX_FRAMES;
	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
	init_timer(&priv->txtimer);
	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
	priv->txtimer.data = (unsigned long)priv;
	priv->txtimer.function = stmmac_tx_timer;
	add_timer(&priv->txtimer);
}

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
/**
 *  stmmac_mtl_configuration - Configure MTL
 *  @priv: driver private structure
 *  Description: It is used for configurring MTL
 */
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 tx_queues_count = priv->plat->tx_queues_to_use;

	/* Configure MTL RX algorithms */
	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
						priv->plat->rx_sched_algorithm);

	/* Configure MTL TX algorithms */
	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
						priv->plat->tx_sched_algorithm);

	/* Enable MAC RX Queues */
	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
		stmmac_mac_enable_rx_queues(priv);
}

1675
/**
1676
 * stmmac_hw_setup - setup mac in a usable state.
1677 1678
 *  @dev : pointer to the device structure.
 *  Description:
1679 1680 1681 1682
 *  this is the main function to setup the HW in a usable state because the
 *  dma engine is reset, the core registers are configured (e.g. AXI,
 *  Checksum features, timers). The DMA is ready to start receiving and
 *  transmitting.
1683 1684 1685 1686
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
1687
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1688 1689 1690 1691 1692 1693 1694
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

	/* DMA initialization and SW reset */
	ret = stmmac_init_dma_engine(priv);
	if (ret < 0) {
1695 1696
		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
			   __func__);
1697 1698 1699 1700
		return ret;
	}

	/* Copy the MAC addr into the HW  */
1701
	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1702

1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
	/* PS and related bits will be programmed according to the speed */
	if (priv->hw->pcs) {
		int speed = priv->plat->mac_port_sel_speed;

		if ((speed == SPEED_10) || (speed == SPEED_100) ||
		    (speed == SPEED_1000)) {
			priv->hw->ps = speed;
		} else {
			dev_warn(priv->device, "invalid port speed\n");
			priv->hw->ps = 0;
		}
	}

1716
	/* Initialize the MAC Core */
1717
	priv->hw->mac->core_init(priv->hw, dev->mtu);
1718

1719 1720 1721
	/* Initialize MTL*/
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
		stmmac_mtl_configuration(priv);
J
jpinto 已提交
1722

1723 1724
	ret = priv->hw->mac->rx_ipc(priv->hw);
	if (!ret) {
1725
		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1726
		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1727
		priv->hw->rx_csum = 0;
1728 1729
	}

1730
	/* Enable the MAC Rx/Tx */
A
Alexandre TORGUE 已提交
1731 1732 1733 1734
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
		stmmac_dwmac4_set_mac(priv->ioaddr, true);
	else
		stmmac_set_mac(priv->ioaddr, true);
1735 1736 1737 1738 1739 1740

	/* Set the HW DMA mode and the COE */
	stmmac_dma_operation_mode(priv);

	stmmac_mmc_setup(priv);

1741
	if (init_ptp) {
1742 1743 1744 1745
		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
		if (ret < 0)
			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);

1746
		ret = stmmac_init_ptp(priv);
1747 1748 1749 1750
		if (ret == -EOPNOTSUPP)
			netdev_warn(priv->dev, "PTP not supported by HW\n");
		else if (ret)
			netdev_warn(priv->dev, "PTP init failed\n");
1751
	}
1752

1753
#ifdef CONFIG_DEBUG_FS
1754 1755
	ret = stmmac_init_fs(dev);
	if (ret < 0)
1756 1757
		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
			    __func__);
1758 1759
#endif
	/* Start the ball rolling... */
1760
	netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
	priv->hw->dma->start_tx(priv->ioaddr);
	priv->hw->dma->start_rx(priv->ioaddr);

	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;

	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
		priv->rx_riwt = MAX_DMA_RIWT;
		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
	}

1771
	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1772
		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1773

A
Alexandre TORGUE 已提交
1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	/*  set TX ring length */
	if (priv->hw->dma->set_tx_ring_len)
		priv->hw->dma->set_tx_ring_len(priv->ioaddr,
					       (DMA_TX_SIZE - 1));
	/*  set RX ring length */
	if (priv->hw->dma->set_rx_ring_len)
		priv->hw->dma->set_rx_ring_len(priv->ioaddr,
					       (DMA_RX_SIZE - 1));
	/* Enable TSO */
	if (priv->tso)
		priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);

1786 1787 1788
	return 0;
}

1789 1790 1791 1792 1793 1794 1795
static void stmmac_hw_teardown(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	clk_disable_unprepare(priv->plat->clk_ptp_ref);
}

1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809
/**
 *  stmmac_open - open entry point of the driver
 *  @dev : pointer to the device structure.
 *  Description:
 *  This function is the open entry point of the driver.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_open(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

1810 1811
	stmmac_check_ether_addr(priv);

1812 1813 1814
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
1815 1816
		ret = stmmac_init_phy(dev);
		if (ret) {
1817 1818 1819
			netdev_err(priv->dev,
				   "%s: Cannot attach to PHY (error: %d)\n",
				   __func__, ret);
1820
			return ret;
1821
		}
1822
	}
1823

1824 1825 1826 1827
	/* Extra statistics */
	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
	priv->xstats.threshold = tc;

1828
	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1829
	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1830

1831
	ret = alloc_dma_desc_resources(priv);
1832
	if (ret < 0) {
1833 1834
		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
			   __func__);
1835 1836 1837
		goto dma_desc_error;
	}

1838 1839
	ret = init_dma_desc_rings(dev, GFP_KERNEL);
	if (ret < 0) {
1840 1841
		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
			   __func__);
1842 1843 1844
		goto init_error;
	}

1845
	ret = stmmac_hw_setup(dev, true);
1846
	if (ret < 0) {
1847
		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1848
		goto init_error;
1849 1850
	}

1851 1852
	stmmac_init_tx_coalesce(priv);

1853 1854
	if (dev->phydev)
		phy_start(dev->phydev);
1855

1856 1857
	/* Request the IRQ lines */
	ret = request_irq(dev->irq, stmmac_interrupt,
G
Giuseppe CAVALLARO 已提交
1858
			  IRQF_SHARED, dev->name, dev);
1859
	if (unlikely(ret < 0)) {
1860 1861 1862
		netdev_err(priv->dev,
			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
			   __func__, dev->irq, ret);
1863
		goto irq_error;
1864 1865
	}

1866 1867 1868 1869 1870
	/* Request the Wake IRQ in case of another line is used for WoL */
	if (priv->wol_irq != dev->irq) {
		ret = request_irq(priv->wol_irq, stmmac_interrupt,
				  IRQF_SHARED, dev->name, dev);
		if (unlikely(ret < 0)) {
1871 1872 1873
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
				   __func__, priv->wol_irq, ret);
1874
			goto wolirq_error;
1875 1876 1877
		}
	}

1878
	/* Request the IRQ lines */
1879
	if (priv->lpi_irq > 0) {
1880 1881 1882
		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
				  dev->name, dev);
		if (unlikely(ret < 0)) {
1883 1884 1885
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
				   __func__, priv->lpi_irq, ret);
1886
			goto lpiirq_error;
1887 1888 1889
		}
	}

1890 1891
	napi_enable(&priv->napi);
	netif_start_queue(dev);
1892

1893
	return 0;
1894

1895
lpiirq_error:
1896 1897
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
1898
wolirq_error:
1899
	free_irq(dev->irq, dev);
1900 1901 1902
irq_error:
	if (dev->phydev)
		phy_stop(dev->phydev);
1903

1904
	del_timer_sync(&priv->txtimer);
1905
	stmmac_hw_teardown(dev);
1906 1907
init_error:
	free_dma_desc_resources(priv);
1908
dma_desc_error:
1909 1910
	if (dev->phydev)
		phy_disconnect(dev->phydev);
1911

1912
	return ret;
1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
}

/**
 *  stmmac_release - close entry point of the driver
 *  @dev : device pointer.
 *  Description:
 *  This is the stop entry point of the driver.
 */
static int stmmac_release(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

1925 1926 1927
	if (priv->eee_enabled)
		del_timer_sync(&priv->eee_ctrl_timer);

1928
	/* Stop and disconnect the PHY */
1929 1930 1931
	if (dev->phydev) {
		phy_stop(dev->phydev);
		phy_disconnect(dev->phydev);
1932 1933 1934 1935 1936 1937
	}

	netif_stop_queue(dev);

	napi_disable(&priv->napi);

1938 1939
	del_timer_sync(&priv->txtimer);

1940 1941
	/* Free the IRQ lines */
	free_irq(dev->irq, dev);
1942 1943
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
1944
	if (priv->lpi_irq > 0)
1945
		free_irq(priv->lpi_irq, dev);
1946 1947

	/* Stop TX/RX DMA and clear the descriptors */
1948 1949
	priv->hw->dma->stop_tx(priv->ioaddr);
	priv->hw->dma->stop_rx(priv->ioaddr);
1950 1951 1952 1953

	/* Release and free the Rx/Tx resources */
	free_dma_desc_resources(priv);

1954
	/* Disable the MAC Rx/Tx */
1955
	stmmac_set_mac(priv->ioaddr, false);
1956 1957 1958

	netif_carrier_off(dev);

1959
#ifdef CONFIG_DEBUG_FS
1960
	stmmac_exit_fs(dev);
1961 1962
#endif

1963 1964
	stmmac_release_ptp(priv);

1965 1966 1967
	return 0;
}

A
Alexandre TORGUE 已提交
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
/**
 *  stmmac_tso_allocator - close entry point of the driver
 *  @priv: driver private structure
 *  @des: buffer start address
 *  @total_len: total length to fill in descriptors
 *  @last_segmant: condition for the last descriptor
 *  Description:
 *  This function fills descriptor and request new descriptors according to
 *  buffer length to fill
 */
static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
				 int total_len, bool last_segment)
{
	struct dma_desc *desc;
	int tmp_len;
	u32 buff_size;

	tmp_len = total_len;

	while (tmp_len > 0) {
		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
		desc = priv->dma_tx + priv->cur_tx;

1991
		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
A
Alexandre TORGUE 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
			    TSO_MAX_BUFF_SIZE : tmp_len;

		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
			0, 1,
			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
			0, 0);

		tmp_len -= TSO_MAX_BUFF_SIZE;
	}
}

/**
 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
 *  @skb : the socket buffer
 *  @dev : device pointer
 *  Description: this is the transmit function that is called on TSO frames
 *  (support available on GMAC4 and newer chips).
 *  Diagram below show the ring programming in case of TSO frames:
 *
 *  First Descriptor
 *   --------
 *   | DES0 |---> buffer1 = L2/L3/L4 header
 *   | DES1 |---> TCP Payload (can continue on next descr...)
 *   | DES2 |---> buffer 1 and 2 len
 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
 *   --------
 *	|
 *     ...
 *	|
 *   --------
 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
 *   | DES1 | --|
 *   | DES2 | --> buffer 1 and 2 len
 *   | DES3 |
 *   --------
 *
 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
 */
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
	u32 pay_len, mss;
	int tmp_pay_len = 0;
	struct stmmac_priv *priv = netdev_priv(dev);
	int nfrags = skb_shinfo(skb)->nr_frags;
	unsigned int first_entry, des;
	struct dma_desc *desc, *first, *mss_desc = NULL;
	u8 proto_hdr_len;
	int i;

	/* Compute header lengths */
	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	/* Desc availability based on threshold should be enough safe */
	if (unlikely(stmmac_tx_avail(priv) <
		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
		if (!netif_queue_stopped(dev)) {
			netif_stop_queue(dev);
			/* This is a hard error, log it. */
2051 2052 2053
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
A
Alexandre TORGUE 已提交
2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
		}
		return NETDEV_TX_BUSY;
	}

	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */

	mss = skb_shinfo(skb)->gso_size;

	/* set new MSS value if needed */
	if (mss != priv->mss) {
		mss_desc = priv->dma_tx + priv->cur_tx;
		priv->hw->desc->set_mss(mss_desc, mss);
		priv->mss = mss;
		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
	}

	if (netif_msg_tx_queued(priv)) {
		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
			skb->data_len);
	}

	first_entry = priv->cur_tx;

	desc = priv->dma_tx + first_entry;
	first = desc;

	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, des))
		goto dma_map_err;

	priv->tx_skbuff_dma[first_entry].buf = des;
	priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
	priv->tx_skbuff[first_entry] = skb;

2092
	first->des0 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2093 2094 2095

	/* Fill start of payload in buff2 of first descriptor */
	if (pay_len)
2096
		first->des1 = cpu_to_le32(des + proto_hdr_len);
A
Alexandre TORGUE 已提交
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109

	/* If needed take extra descriptors to fill the remaining payload */
	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;

	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));

	/* Prepare fragments */
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(priv->device, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
2110 2111
		if (dma_mapping_error(priv->device, des))
			goto dma_map_err;
A
Alexandre TORGUE 已提交
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126

		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
				     (i == nfrags - 1));

		priv->tx_skbuff_dma[priv->cur_tx].buf = des;
		priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
		priv->tx_skbuff[priv->cur_tx] = NULL;
		priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
	}

	priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;

	priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);

	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2127 2128
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
A
Alexandre TORGUE 已提交
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
		netif_stop_queue(dev);
	}

	dev->stats.tx_bytes += skb->len;
	priv->xstats.tx_tso_frames++;
	priv->xstats.tx_tso_nfrags += nfrags;

	/* Manage tx mitigation */
	priv->tx_count_frames += nfrags + 1;
	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
		mod_timer(&priv->txtimer,
			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
	} else {
		priv->tx_count_frames = 0;
		priv->hw->desc->set_tx_ic(desc);
		priv->xstats.tx_set_ic_bit++;
	}

	if (!priv->hwts_tx_en)
		skb_tx_timestamp(skb);

	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		     priv->hwts_tx_en)) {
		/* declare that device is doing timestamping */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
		priv->hw->desc->enable_tx_timestamp(first);
	}

	/* Complete the first descriptor before granting the DMA */
	priv->hw->desc->prepare_tso_tx_desc(first, 1,
			proto_hdr_len,
			pay_len,
			1, priv->tx_skbuff_dma[first_entry].last_segment,
			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));

	/* If context desc is used to change MSS */
	if (mss_desc)
		priv->hw->desc->set_tx_owner(mss_desc);

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
P
Pavel Machek 已提交
2172
	dma_wmb();
A
Alexandre TORGUE 已提交
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199

	if (netif_msg_pktdata(priv)) {
		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
			__func__, priv->cur_tx, priv->dirty_tx, first_entry,
			priv->cur_tx, first, nfrags);

		priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
					     0);

		pr_info(">>> frame to be transmitted: ");
		print_pkt(skb->data, skb_headlen(skb));
	}

	netdev_sent_queue(dev, skb->len);

	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
				       STMMAC_CHAN0);

	return NETDEV_TX_OK;

dma_map_err:
	dev_err(priv->device, "Tx dma map failed\n");
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}

2200
/**
2201
 *  stmmac_xmit - Tx entry point of the driver
2202 2203
 *  @skb : the socket buffer
 *  @dev : device pointer
2204 2205 2206
 *  Description : this is the tx entry point of the driver.
 *  It programs the chain or the ring and supports oversized frames
 *  and SG feature.
2207 2208 2209 2210
 */
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2211
	unsigned int nopaged_len = skb_headlen(skb);
2212
	int i, csum_insertion = 0, is_jumbo = 0;
2213
	int nfrags = skb_shinfo(skb)->nr_frags;
2214
	unsigned int entry, first_entry;
2215
	struct dma_desc *desc, *first;
2216
	unsigned int enh_desc;
A
Alexandre TORGUE 已提交
2217 2218 2219 2220 2221 2222 2223
	unsigned int des;

	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb) && priv->tso) {
		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
			return stmmac_tso_xmit(skb, dev);
	}
2224 2225 2226 2227 2228

	if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
		if (!netif_queue_stopped(dev)) {
			netif_stop_queue(dev);
			/* This is a hard error, log it. */
2229 2230 2231
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
2232 2233 2234 2235
		}
		return NETDEV_TX_BUSY;
	}

2236 2237 2238
	if (priv->tx_path_in_lpi_mode)
		stmmac_disable_eee_mode(priv);

2239
	entry = priv->cur_tx;
2240
	first_entry = entry;
2241

2242
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2243

2244
	if (likely(priv->extend_desc))
G
Giuseppe CAVALLARO 已提交
2245
		desc = (struct dma_desc *)(priv->dma_etx + entry);
2246 2247 2248
	else
		desc = priv->dma_tx + entry;

2249 2250
	first = desc;

2251 2252 2253
	priv->tx_skbuff[first_entry] = skb;

	enh_desc = priv->plat->enh_desc;
2254
	/* To program the descriptors according to the size of the frame */
G
Giuseppe CAVALLARO 已提交
2255 2256 2257
	if (enh_desc)
		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);

A
Alexandre TORGUE 已提交
2258 2259
	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
					 DWMAC_CORE_4_00)) {
G
Giuseppe CAVALLARO 已提交
2260
		entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
G
Giuseppe CAVALLARO 已提交
2261 2262
		if (unlikely(entry < 0))
			goto dma_map_err;
G
Giuseppe CAVALLARO 已提交
2263
	}
2264 2265

	for (i = 0; i < nfrags; i++) {
E
Eric Dumazet 已提交
2266 2267
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);
2268
		bool last_segment = (i == (nfrags - 1));
2269

2270 2271
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);

2272
		if (likely(priv->extend_desc))
G
Giuseppe CAVALLARO 已提交
2273
			desc = (struct dma_desc *)(priv->dma_etx + entry);
2274 2275
		else
			desc = priv->dma_tx + entry;
2276

A
Alexandre TORGUE 已提交
2277 2278 2279
		des = skb_frag_dma_map(priv->device, frag, 0, len,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
G
Giuseppe CAVALLARO 已提交
2280 2281
			goto dma_map_err; /* should reuse desc w/o issues */

2282
		priv->tx_skbuff[entry] = NULL;
A
Alexandre TORGUE 已提交
2283

2284 2285 2286 2287 2288
		priv->tx_skbuff_dma[entry].buf = des;
		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			desc->des0 = cpu_to_le32(des);
		else
			desc->des2 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2289

G
Giuseppe CAVALLARO 已提交
2290
		priv->tx_skbuff_dma[entry].map_as_page = true;
2291
		priv->tx_skbuff_dma[entry].len = len;
2292 2293 2294
		priv->tx_skbuff_dma[entry].last_segment = last_segment;

		/* Prepare the descriptor and set the own bit too */
2295
		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2296
						priv->mode, 1, last_segment);
2297 2298
	}

2299 2300 2301
	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);

	priv->cur_tx = entry;
2302 2303

	if (netif_msg_pktdata(priv)) {
2304 2305
		void *tx_head;

2306 2307 2308 2309
		netdev_dbg(priv->dev,
			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
			   __func__, priv->cur_tx, priv->dirty_tx, first_entry,
			   entry, first, nfrags);
2310

2311
		if (priv->extend_desc)
2312
			tx_head = (void *)priv->dma_etx;
2313
		else
2314 2315 2316
			tx_head = (void *)priv->dma_tx;

		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2317

2318
		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2319 2320
		print_pkt(skb->data, skb->len);
	}
2321

2322
	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2323 2324
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
2325 2326 2327 2328 2329
		netif_stop_queue(dev);
	}

	dev->stats.tx_bytes += skb->len;

2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	/* According to the coalesce parameter the IC bit for the latest
	 * segment is reset and the timer re-started to clean the tx status.
	 * This approach takes care about the fragments: desc is the first
	 * element in case of no SG.
	 */
	priv->tx_count_frames += nfrags + 1;
	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
		mod_timer(&priv->txtimer,
			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
	} else {
		priv->tx_count_frames = 0;
		priv->hw->desc->set_tx_ic(desc);
		priv->xstats.tx_set_ic_bit++;
2343 2344 2345 2346
	}

	if (!priv->hwts_tx_en)
		skb_tx_timestamp(skb);
2347

2348 2349 2350 2351 2352 2353 2354
	/* Ready to fill the first descriptor and set the OWN bit w/o any
	 * problems because all the descriptors are actually ready to be
	 * passed to the DMA engine.
	 */
	if (likely(!is_jumbo)) {
		bool last_segment = (nfrags == 0);

A
Alexandre TORGUE 已提交
2355 2356 2357
		des = dma_map_single(priv->device, skb->data,
				     nopaged_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
2358 2359
			goto dma_map_err;

2360 2361 2362 2363 2364
		priv->tx_skbuff_dma[first_entry].buf = des;
		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			first->des0 = cpu_to_le32(des);
		else
			first->des2 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2365

2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
		priv->tx_skbuff_dma[first_entry].len = nopaged_len;
		priv->tx_skbuff_dma[first_entry].last_segment = last_segment;

		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
			     priv->hwts_tx_en)) {
			/* declare that device is doing timestamping */
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
			priv->hw->desc->enable_tx_timestamp(first);
		}

		/* Prepare the first descriptor setting the OWN bit too */
		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
						csum_insertion, priv->mode, 1,
						last_segment);

		/* The own bit must be the latest setting done when prepare the
		 * descriptor and then barrier is needed to make sure that
		 * all is coherent before granting the DMA engine.
		 */
P
Pavel Machek 已提交
2385
		dma_wmb();
2386 2387
	}

B
Beniamino Galvani 已提交
2388
	netdev_sent_queue(dev, skb->len);
A
Alexandre TORGUE 已提交
2389 2390 2391 2392 2393 2394

	if (priv->synopsys_id < DWMAC_CORE_4_00)
		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
	else
		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
					       STMMAC_CHAN0);
2395

G
Giuseppe CAVALLARO 已提交
2396
	return NETDEV_TX_OK;
2397

G
Giuseppe CAVALLARO 已提交
2398
dma_map_err:
2399
	netdev_err(priv->dev, "Tx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
2400 2401
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
2402 2403 2404
	return NETDEV_TX_OK;
}

2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
	struct ethhdr *ehdr;
	u16 vlanid;

	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
	    NETIF_F_HW_VLAN_CTAG_RX &&
	    !__vlan_get_tag(skb, &vlanid)) {
		/* pop the vlan tag */
		ehdr = (struct ethhdr *)skb->data;
		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
		skb_pull(skb, VLAN_HLEN);
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
	}
}


2422 2423 2424 2425 2426 2427 2428 2429
static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
{
	if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
		return 0;

	return 1;
}

2430
/**
2431
 * stmmac_rx_refill - refill used skb preallocated buffers
2432 2433 2434 2435
 * @priv: driver private structure
 * Description : this is to reallocate the skb for the reception process
 * that is based on zero-copy.
 */
2436 2437 2438
static inline void stmmac_rx_refill(struct stmmac_priv *priv)
{
	int bfsize = priv->dma_buf_sz;
2439 2440
	unsigned int entry = priv->dirty_rx;
	int dirty = stmmac_rx_dirty(priv);
2441

2442
	while (dirty-- > 0) {
2443 2444 2445
		struct dma_desc *p;

		if (priv->extend_desc)
G
Giuseppe CAVALLARO 已提交
2446
			p = (struct dma_desc *)(priv->dma_erx + entry);
2447 2448 2449
		else
			p = priv->dma_rx + entry;

2450 2451 2452
		if (likely(priv->rx_skbuff[entry] == NULL)) {
			struct sk_buff *skb;

E
Eric Dumazet 已提交
2453
			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2454 2455 2456 2457 2458 2459 2460
			if (unlikely(!skb)) {
				/* so for a while no zero-copy! */
				priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
				if (unlikely(net_ratelimit()))
					dev_err(priv->device,
						"fail to alloc skb entry %d\n",
						entry);
2461
				break;
2462
			}
2463 2464 2465 2466 2467

			priv->rx_skbuff[entry] = skb;
			priv->rx_skbuff_dma[entry] =
			    dma_map_single(priv->device, skb->data, bfsize,
					   DMA_FROM_DEVICE);
G
Giuseppe CAVALLARO 已提交
2468 2469
			if (dma_mapping_error(priv->device,
					      priv->rx_skbuff_dma[entry])) {
2470
				netdev_err(priv->dev, "Rx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
2471 2472 2473
				dev_kfree_skb(skb);
				break;
			}
2474

A
Alexandre TORGUE 已提交
2475
			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2476
				p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
A
Alexandre TORGUE 已提交
2477 2478
				p->des1 = 0;
			} else {
2479
				p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
A
Alexandre TORGUE 已提交
2480 2481 2482
			}
			if (priv->hw->mode->refill_desc3)
				priv->hw->mode->refill_desc3(priv, p);
2483

2484 2485 2486
			if (priv->rx_zeroc_thresh > 0)
				priv->rx_zeroc_thresh--;

2487 2488
			netif_dbg(priv, rx_status, priv->dev,
				  "refill entry #%d\n", entry);
2489
		}
P
Pavel Machek 已提交
2490
		dma_wmb();
A
Alexandre TORGUE 已提交
2491 2492 2493 2494 2495 2496

		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
		else
			priv->hw->desc->set_rx_owner(p);

P
Pavel Machek 已提交
2497
		dma_wmb();
2498 2499

		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2500
	}
2501
	priv->dirty_rx = entry;
2502 2503
}

2504
/**
2505
 * stmmac_rx - manage the receive process
2506 2507 2508 2509 2510
 * @priv: driver private structure
 * @limit: napi bugget.
 * Description :  this the function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
2511 2512
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
2513
	unsigned int entry = priv->cur_rx;
2514 2515
	unsigned int next_entry;
	unsigned int count = 0;
2516
	int coe = priv->hw->rx_csum;
2517

2518
	if (netif_msg_rx_status(priv)) {
2519 2520
		void *rx_head;

2521
		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2522
		if (priv->extend_desc)
2523
			rx_head = (void *)priv->dma_erx;
2524
		else
2525 2526 2527
			rx_head = (void *)priv->dma_rx;

		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2528
	}
2529
	while (count < limit) {
2530
		int status;
2531
		struct dma_desc *p;
2532
		struct dma_desc *np;
2533

2534
		if (priv->extend_desc)
G
Giuseppe CAVALLARO 已提交
2535
			p = (struct dma_desc *)(priv->dma_erx + entry);
2536
		else
G
Giuseppe CAVALLARO 已提交
2537
			p = priv->dma_rx + entry;
2538

2539 2540 2541 2542 2543
		/* read the status of the incoming frame */
		status = priv->hw->desc->rx_status(&priv->dev->stats,
						   &priv->xstats, p);
		/* check if managed by the DMA otherwise go ahead */
		if (unlikely(status & dma_own))
2544 2545 2546 2547
			break;

		count++;

2548 2549 2550
		priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
		next_entry = priv->cur_rx;

2551
		if (priv->extend_desc)
2552
			np = (struct dma_desc *)(priv->dma_erx + next_entry);
2553
		else
2554 2555 2556
			np = priv->dma_rx + next_entry;

		prefetch(np);
2557

2558 2559 2560 2561 2562
		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
			priv->hw->desc->rx_extended_status(&priv->dev->stats,
							   &priv->xstats,
							   priv->dma_erx +
							   entry);
2563
		if (unlikely(status == discard_frame)) {
2564
			priv->dev->stats.rx_errors++;
2565
			if (priv->hwts_rx_en && !priv->extend_desc) {
2566
				/* DESC2 & DESC3 will be overwritten by device
2567 2568 2569 2570 2571 2572
				 * with timestamp value, hence reinitialize
				 * them in stmmac_rx_refill() function so that
				 * device can reuse it.
				 */
				priv->rx_skbuff[entry] = NULL;
				dma_unmap_single(priv->device,
G
Giuseppe CAVALLARO 已提交
2573 2574 2575
						 priv->rx_skbuff_dma[entry],
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
2576 2577
			}
		} else {
2578
			struct sk_buff *skb;
2579
			int frame_len;
A
Alexandre TORGUE 已提交
2580 2581 2582
			unsigned int des;

			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2583
				des = le32_to_cpu(p->des0);
A
Alexandre TORGUE 已提交
2584
			else
2585
				des = le32_to_cpu(p->des2);
2586

G
Giuseppe CAVALLARO 已提交
2587 2588
			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);

2589
			/*  If frame length is greater than skb buffer size
A
Alexandre TORGUE 已提交
2590 2591 2592
			 *  (preallocated during init) then the packet is
			 *  ignored
			 */
2593
			if (frame_len > priv->dma_buf_sz) {
2594 2595 2596
				netdev_err(priv->dev,
					   "len %d larger than size (%d)\n",
					   frame_len, priv->dma_buf_sz);
2597 2598 2599 2600
				priv->dev->stats.rx_length_errors++;
				break;
			}

2601
			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
G
Giuseppe CAVALLARO 已提交
2602 2603
			 * Type frames (LLC/LLC-SNAP)
			 */
2604 2605
			if (unlikely(status != llc_snap))
				frame_len -= ETH_FCS_LEN;
2606

2607
			if (netif_msg_rx_status(priv)) {
2608 2609
				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
					   p, entry, des);
2610
				if (frame_len > ETH_FRAME_LEN)
2611 2612
					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
						   frame_len, status);
2613
			}
2614

A
Alexandre TORGUE 已提交
2615 2616 2617 2618 2619 2620 2621
			/* The zero-copy is always used for all the sizes
			 * in case of GMAC4 because it needs
			 * to refill the used descriptors, always.
			 */
			if (unlikely(!priv->plat->has_gmac4 &&
				     ((frame_len < priv->rx_copybreak) ||
				     stmmac_rx_threshold_count(priv)))) {
2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648
				skb = netdev_alloc_skb_ip_align(priv->dev,
								frame_len);
				if (unlikely(!skb)) {
					if (net_ratelimit())
						dev_warn(priv->device,
							 "packet dropped\n");
					priv->dev->stats.rx_dropped++;
					break;
				}

				dma_sync_single_for_cpu(priv->device,
							priv->rx_skbuff_dma
							[entry], frame_len,
							DMA_FROM_DEVICE);
				skb_copy_to_linear_data(skb,
							priv->
							rx_skbuff[entry]->data,
							frame_len);

				skb_put(skb, frame_len);
				dma_sync_single_for_device(priv->device,
							   priv->rx_skbuff_dma
							   [entry], frame_len,
							   DMA_FROM_DEVICE);
			} else {
				skb = priv->rx_skbuff[entry];
				if (unlikely(!skb)) {
2649 2650 2651
					netdev_err(priv->dev,
						   "%s: Inconsistent Rx chain\n",
						   priv->dev->name);
2652 2653 2654 2655 2656
					priv->dev->stats.rx_dropped++;
					break;
				}
				prefetch(skb->data - NET_IP_ALIGN);
				priv->rx_skbuff[entry] = NULL;
2657
				priv->rx_zeroc_thresh++;
2658 2659 2660 2661 2662 2663

				skb_put(skb, frame_len);
				dma_unmap_single(priv->device,
						 priv->rx_skbuff_dma[entry],
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
2664 2665 2666
			}

			if (netif_msg_pktdata(priv)) {
2667 2668
				netdev_dbg(priv->dev, "frame received (%dbytes)",
					   frame_len);
2669 2670
				print_pkt(skb->data, frame_len);
			}
2671

2672 2673
			stmmac_get_rx_hwtstamp(priv, p, np, skb);

2674 2675
			stmmac_rx_vlan(priv->dev, skb);

2676 2677
			skb->protocol = eth_type_trans(skb, priv->dev);

G
Giuseppe CAVALLARO 已提交
2678
			if (unlikely(!coe))
2679
				skb_checksum_none_assert(skb);
2680
			else
2681
				skb->ip_summed = CHECKSUM_UNNECESSARY;
2682 2683

			napi_gro_receive(&priv->napi, skb);
2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703

			priv->dev->stats.rx_packets++;
			priv->dev->stats.rx_bytes += frame_len;
		}
		entry = next_entry;
	}

	stmmac_rx_refill(priv);

	priv->xstats.rx_pkt_n += count;

	return count;
}

/**
 *  stmmac_poll - stmmac poll method (NAPI)
 *  @napi : pointer to the napi structure.
 *  @budget : maximum number of packets that the current CPU can receive from
 *	      all interfaces.
 *  Description :
2704
 *  To look at the incoming frames and clear the tx resources.
2705 2706 2707 2708 2709 2710
 */
static int stmmac_poll(struct napi_struct *napi, int budget)
{
	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
	int work_done = 0;

2711 2712
	priv->xstats.napi_poll++;
	stmmac_tx_clean(priv);
2713

2714
	work_done = stmmac_rx(priv, budget);
2715
	if (work_done < budget) {
2716
		napi_complete_done(napi, work_done);
2717
		stmmac_enable_dma_irq(priv);
2718 2719 2720 2721 2722 2723 2724 2725
	}
	return work_done;
}

/**
 *  stmmac_tx_timeout
 *  @dev : Pointer to net device structure
 *  Description: this function is called when a packet transmission fails to
2726
 *   complete within a reasonable time. The driver will mark the error in the
2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
 *   netdev structure and arrange for the device to be reset to a sane state
 *   in order to transmit a new packet.
 */
static void stmmac_tx_timeout(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	/* Clear Tx resources and restart transmitting again */
	stmmac_tx_err(priv);
}

/**
2739
 *  stmmac_set_rx_mode - entry point for multicast addressing
2740 2741 2742 2743 2744 2745 2746
 *  @dev : pointer to the device structure
 *  Description:
 *  This function is a driver entry point which gets called by the kernel
 *  whenever multicast addresses must be enabled/disabled.
 *  Return value:
 *  void.
 */
2747
static void stmmac_set_rx_mode(struct net_device *dev)
2748 2749 2750
{
	struct stmmac_priv *priv = netdev_priv(dev);

2751
	priv->hw->mac->set_filter(priv->hw, dev);
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
}

/**
 *  stmmac_change_mtu - entry point to change MTU size for the device.
 *  @dev : device pointer.
 *  @new_mtu : the new MTU size for the device.
 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
2767 2768
	struct stmmac_priv *priv = netdev_priv(dev);

2769
	if (netif_running(dev)) {
2770
		netdev_err(priv->dev, "must be stopped to change its MTU\n");
2771 2772 2773
		return -EBUSY;
	}

2774
	dev->mtu = new_mtu;
A
Alexandre TORGUE 已提交
2775

2776 2777 2778 2779 2780
	netdev_update_features(dev);

	return 0;
}

2781
static netdev_features_t stmmac_fix_features(struct net_device *dev,
G
Giuseppe CAVALLARO 已提交
2782
					     netdev_features_t features)
2783 2784 2785
{
	struct stmmac_priv *priv = netdev_priv(dev);

2786
	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2787
		features &= ~NETIF_F_RXCSUM;
2788

2789
	if (!priv->plat->tx_coe)
2790
		features &= ~NETIF_F_CSUM_MASK;
2791

2792 2793 2794
	/* Some GMAC devices have a bugged Jumbo frame support that
	 * needs to have the Tx COE disabled for oversized frames
	 * (due to limited buffer sizes). In this case we disable
2795
	 * the TX csum insertion in the TDES and not use SF.
G
Giuseppe CAVALLARO 已提交
2796
	 */
2797
	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2798
		features &= ~NETIF_F_CSUM_MASK;
2799

A
Alexandre TORGUE 已提交
2800 2801 2802 2803 2804 2805 2806 2807
	/* Disable tso if asked by ethtool */
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		if (features & NETIF_F_TSO)
			priv->tso = true;
		else
			priv->tso = false;
	}

2808
	return features;
2809 2810
}

2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
static int stmmac_set_features(struct net_device *netdev,
			       netdev_features_t features)
{
	struct stmmac_priv *priv = netdev_priv(netdev);

	/* Keep the COE Type in case of csum is supporting */
	if (features & NETIF_F_RXCSUM)
		priv->hw->rx_csum = priv->plat->rx_coe;
	else
		priv->hw->rx_csum = 0;
	/* No check needed because rx_coe has been set before and it will be
	 * fixed in case of issue.
	 */
	priv->hw->mac->rx_ipc(priv->hw);

	return 0;
}

2829 2830 2831 2832 2833
/**
 *  stmmac_interrupt - main ISR
 *  @irq: interrupt number.
 *  @dev_id: to pass the net device pointer.
 *  Description: this is the main driver interrupt service routine.
2834 2835 2836 2837 2838
 *  It can call:
 *  o DMA service routine (to manage incoming frame reception and transmission
 *    status)
 *  o Core interrupts to manage: remote wake-up, management counter, LPI
 *    interrupts.
2839
 */
2840 2841 2842 2843 2844
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct stmmac_priv *priv = netdev_priv(dev);

2845 2846 2847
	if (priv->irq_wake)
		pm_wakeup_event(priv->device, 0);

2848
	if (unlikely(!dev)) {
2849
		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2850 2851 2852
		return IRQ_NONE;
	}

2853
	/* To handle GMAC own interrupts */
A
Alexandre TORGUE 已提交
2854
	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2855
		int status = priv->hw->mac->host_irq_status(priv->hw,
2856
							    &priv->xstats);
2857 2858
		if (unlikely(status)) {
			/* For LPI we need to save the tx status */
2859
			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2860
				priv->tx_path_in_lpi_mode = true;
2861
			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2862
				priv->tx_path_in_lpi_mode = false;
2863
			if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
A
Alexandre TORGUE 已提交
2864 2865 2866
				priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
							priv->rx_tail_addr,
							STMMAC_CHAN0);
2867
		}
2868 2869

		/* PCS link status */
2870
		if (priv->hw->pcs) {
2871 2872 2873 2874 2875
			if (priv->xstats.pcs_link)
				netif_carrier_on(dev);
			else
				netif_carrier_off(dev);
		}
2876
	}
2877

2878
	/* To handle DMA interrupts */
2879
	stmmac_dma_interrupt(priv);
2880 2881 2882 2883 2884 2885

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
G
Giuseppe CAVALLARO 已提交
2886 2887
 * to allow network I/O with interrupts disabled.
 */
2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902
static void stmmac_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	stmmac_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

/**
 *  stmmac_ioctl - Entry point for the Ioctl
 *  @dev: Device pointer.
 *  @rq: An IOCTL specefic structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  @cmd: IOCTL command
 *  Description:
2903
 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2904 2905 2906
 */
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
2907
	int ret = -EOPNOTSUPP;
2908 2909 2910 2911

	if (!netif_running(dev))
		return -EINVAL;

2912 2913 2914 2915
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
2916
		if (!dev->phydev)
2917
			return -EINVAL;
2918
		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2919 2920 2921 2922 2923 2924 2925
		break;
	case SIOCSHWTSTAMP:
		ret = stmmac_hwtstamp_ioctl(dev, rq);
		break;
	default:
		break;
	}
2926

2927 2928 2929
	return ret;
}

2930
#ifdef CONFIG_DEBUG_FS
2931 2932
static struct dentry *stmmac_fs_dir;

2933
static void sysfs_display_ring(void *head, int size, int extend_desc,
G
Giuseppe CAVALLARO 已提交
2934
			       struct seq_file *seq)
2935 2936
{
	int i;
G
Giuseppe CAVALLARO 已提交
2937 2938
	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
	struct dma_desc *p = (struct dma_desc *)head;
2939

2940 2941 2942
	for (i = 0; i < size; i++) {
		if (extend_desc) {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
2943
				   i, (unsigned int)virt_to_phys(ep),
2944 2945 2946 2947
				   le32_to_cpu(ep->basic.des0),
				   le32_to_cpu(ep->basic.des1),
				   le32_to_cpu(ep->basic.des2),
				   le32_to_cpu(ep->basic.des3));
2948 2949 2950
			ep++;
		} else {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
2951
				   i, (unsigned int)virt_to_phys(ep),
2952 2953
				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2954 2955
			p++;
		}
2956 2957
		seq_printf(seq, "\n");
	}
2958
}
2959

2960 2961 2962 2963
static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);
2964

2965 2966
	if (priv->extend_desc) {
		seq_printf(seq, "Extended RX descriptor ring:\n");
2967
		sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2968
		seq_printf(seq, "Extended TX descriptor ring:\n");
2969
		sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2970 2971
	} else {
		seq_printf(seq, "RX descriptor ring:\n");
2972
		sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2973
		seq_printf(seq, "TX descriptor ring:\n");
2974
		sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984
	}

	return 0;
}

static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
{
	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
}

2985 2986
/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */

2987 2988 2989 2990 2991
static const struct file_operations stmmac_rings_status_fops = {
	.owner = THIS_MODULE,
	.open = stmmac_sysfs_ring_open,
	.read = seq_read,
	.llseek = seq_lseek,
2992
	.release = single_release,
2993 2994
};

2995 2996 2997 2998 2999
static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);

3000
	if (!priv->hw_cap_support) {
3001 3002 3003 3004 3005 3006 3007 3008
		seq_printf(seq, "DMA HW features not supported\n");
		return 0;
	}

	seq_printf(seq, "==============================\n");
	seq_printf(seq, "\tDMA HW features\n");
	seq_printf(seq, "==============================\n");

3009
	seq_printf(seq, "\t10/100 Mbps: %s\n",
3010
		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3011
	seq_printf(seq, "\t1000 Mbps: %s\n",
3012
		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3013
	seq_printf(seq, "\tHalf duplex: %s\n",
3014 3015 3016 3017 3018
		   (priv->dma_cap.half_duplex) ? "Y" : "N");
	seq_printf(seq, "\tHash Filter: %s\n",
		   (priv->dma_cap.hash_filter) ? "Y" : "N");
	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3019
	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
		   (priv->dma_cap.pcs) ? "Y" : "N");
	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
	seq_printf(seq, "\tPMT Remote wake up: %s\n",
		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
	seq_printf(seq, "\tPMT Magic Frame: %s\n",
		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
	seq_printf(seq, "\tRMON module: %s\n",
		   (priv->dma_cap.rmon) ? "Y" : "N");
	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3031
	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3032
		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3033
	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3034 3035 3036 3037
		   (priv->dma_cap.eee) ? "Y" : "N");
	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
		   (priv->dma_cap.tx_coe) ? "Y" : "N");
A
Alexandre TORGUE 已提交
3038 3039 3040 3041 3042 3043 3044 3045 3046
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
			   (priv->dma_cap.rx_coe) ? "Y" : "N");
	} else {
		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
	}
3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068
	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
		   priv->dma_cap.number_rx_channel);
	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
		   priv->dma_cap.number_tx_channel);
	seq_printf(seq, "\tEnhanced descriptors: %s\n",
		   (priv->dma_cap.enh_desc) ? "Y" : "N");

	return 0;
}

static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
{
	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
}

static const struct file_operations stmmac_dma_cap_fops = {
	.owner = THIS_MODULE,
	.open = stmmac_sysfs_dma_cap_open,
	.read = seq_read,
	.llseek = seq_lseek,
3069
	.release = single_release,
3070 3071
};

3072 3073
static int stmmac_init_fs(struct net_device *dev)
{
3074 3075 3076 3077
	struct stmmac_priv *priv = netdev_priv(dev);

	/* Create per netdev entries */
	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3078

3079
	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3080
		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3081 3082 3083 3084 3085

		return -ENOMEM;
	}

	/* Entry to report DMA RX/TX rings */
3086 3087 3088 3089
	priv->dbgfs_rings_status =
		debugfs_create_file("descriptors_status", S_IRUGO,
				    priv->dbgfs_dir, dev,
				    &stmmac_rings_status_fops);
3090

3091
	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3092
		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3093
		debugfs_remove_recursive(priv->dbgfs_dir);
3094 3095 3096 3097

		return -ENOMEM;
	}

3098
	/* Entry to report the DMA HW features */
3099 3100 3101
	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
					    priv->dbgfs_dir,
					    dev, &stmmac_dma_cap_fops);
3102

3103
	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3104
		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3105
		debugfs_remove_recursive(priv->dbgfs_dir);
3106 3107 3108 3109

		return -ENOMEM;
	}

3110 3111 3112
	return 0;
}

3113
static void stmmac_exit_fs(struct net_device *dev)
3114
{
3115 3116 3117
	struct stmmac_priv *priv = netdev_priv(dev);

	debugfs_remove_recursive(priv->dbgfs_dir);
3118
}
3119
#endif /* CONFIG_DEBUG_FS */
3120

3121 3122 3123 3124 3125
static const struct net_device_ops stmmac_netdev_ops = {
	.ndo_open = stmmac_open,
	.ndo_start_xmit = stmmac_xmit,
	.ndo_stop = stmmac_release,
	.ndo_change_mtu = stmmac_change_mtu,
3126
	.ndo_fix_features = stmmac_fix_features,
3127
	.ndo_set_features = stmmac_set_features,
3128
	.ndo_set_rx_mode = stmmac_set_rx_mode,
3129 3130 3131 3132 3133 3134 3135 3136
	.ndo_tx_timeout = stmmac_tx_timeout,
	.ndo_do_ioctl = stmmac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = stmmac_poll_controller,
#endif
	.ndo_set_mac_address = eth_mac_addr,
};

3137 3138
/**
 *  stmmac_hw_init - Init the MAC device
3139
 *  @priv: driver private structure
3140 3141 3142 3143
 *  Description: this function is to configure the MAC device according to
 *  some platform parameters or the HW capability register. It prepares the
 *  driver to use either ring or chain modes and to setup either enhanced or
 *  normal descriptors.
3144 3145 3146 3147 3148 3149
 */
static int stmmac_hw_init(struct stmmac_priv *priv)
{
	struct mac_device_info *mac;

	/* Identify the MAC HW device */
3150 3151
	if (priv->plat->has_gmac) {
		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3152 3153
		mac = dwmac1000_setup(priv->ioaddr,
				      priv->plat->multicast_filter_bins,
3154 3155
				      priv->plat->unicast_filter_entries,
				      &priv->synopsys_id);
A
Alexandre TORGUE 已提交
3156 3157 3158 3159 3160 3161
	} else if (priv->plat->has_gmac4) {
		priv->dev->priv_flags |= IFF_UNICAST_FLT;
		mac = dwmac4_setup(priv->ioaddr,
				   priv->plat->multicast_filter_bins,
				   priv->plat->unicast_filter_entries,
				   &priv->synopsys_id);
3162
	} else {
3163
		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3164
	}
3165 3166 3167 3168 3169
	if (!mac)
		return -ENOMEM;

	priv->hw = mac;

3170
	/* To use the chained or ring mode */
A
Alexandre TORGUE 已提交
3171 3172
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->hw->mode = &dwmac4_ring_mode_ops;
3173
	} else {
A
Alexandre TORGUE 已提交
3174 3175
		if (chain_mode) {
			priv->hw->mode = &chain_mode_ops;
3176
			dev_info(priv->device, "Chain mode enabled\n");
A
Alexandre TORGUE 已提交
3177 3178 3179
			priv->mode = STMMAC_CHAIN_MODE;
		} else {
			priv->hw->mode = &ring_mode_ops;
3180
			dev_info(priv->device, "Ring mode enabled\n");
A
Alexandre TORGUE 已提交
3181 3182
			priv->mode = STMMAC_RING_MODE;
		}
3183 3184
	}

3185 3186 3187
	/* Get the HW capability (new GMAC newer than 3.50a) */
	priv->hw_cap_support = stmmac_get_hw_features(priv);
	if (priv->hw_cap_support) {
3188
		dev_info(priv->device, "DMA HW capability register supported\n");
3189 3190 3191 3192 3193 3194 3195 3196

		/* We can override some gmac/dma configuration fields: e.g.
		 * enh_desc, tx_coe (e.g. that are passed through the
		 * platform) with the values from the HW capability
		 * register (if supported).
		 */
		priv->plat->enh_desc = priv->dma_cap.enh_desc;
		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3197
		priv->hw->pmt = priv->plat->pmt;
3198

3199 3200 3201 3202 3203 3204
		/* TXCOE doesn't work in thresh DMA mode */
		if (priv->plat->force_thresh_dma_mode)
			priv->plat->tx_coe = 0;
		else
			priv->plat->tx_coe = priv->dma_cap.tx_coe;

A
Alexandre TORGUE 已提交
3205 3206
		/* In case of GMAC4 rx_coe is from HW cap register. */
		priv->plat->rx_coe = priv->dma_cap.rx_coe;
3207 3208 3209 3210 3211 3212

		if (priv->dma_cap.rx_coe_type2)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
		else if (priv->dma_cap.rx_coe_type1)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;

3213 3214 3215
	} else {
		dev_info(priv->device, "No HW DMA feature register supported\n");
	}
3216

A
Alexandre TORGUE 已提交
3217 3218 3219 3220 3221
	/* To use alternate (extended), normal or GMAC4 descriptor structures */
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
		priv->hw->desc = &dwmac4_desc_ops;
	else
		stmmac_selec_desc_mode(priv);
3222

3223 3224
	if (priv->plat->rx_coe) {
		priv->hw->rx_csum = priv->plat->rx_coe;
3225
		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
A
Alexandre TORGUE 已提交
3226
		if (priv->synopsys_id < DWMAC_CORE_4_00)
3227
			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3228
	}
3229
	if (priv->plat->tx_coe)
3230
		dev_info(priv->device, "TX Checksum insertion supported\n");
3231 3232

	if (priv->plat->pmt) {
3233
		dev_info(priv->device, "Wake-Up On Lan supported\n");
3234 3235 3236
		device_set_wakeup_capable(priv->device, 1);
	}

A
Alexandre TORGUE 已提交
3237
	if (priv->dma_cap.tsoen)
3238
		dev_info(priv->device, "TSO supported\n");
A
Alexandre TORGUE 已提交
3239

3240
	return 0;
3241 3242
}

3243
/**
3244 3245
 * stmmac_dvr_probe
 * @device: device pointer
3246
 * @plat_dat: platform data pointer
3247
 * @res: stmmac resource pointer
3248 3249
 * Description: this is the main probe function used to
 * call the alloc_etherdev, allocate the priv structure.
3250
 * Return:
3251
 * returns 0 on success, otherwise errno.
3252
 */
3253 3254 3255
int stmmac_dvr_probe(struct device *device,
		     struct plat_stmmacenet_data *plat_dat,
		     struct stmmac_resources *res)
3256 3257
{
	int ret = 0;
3258 3259
	struct net_device *ndev = NULL;
	struct stmmac_priv *priv;
3260

3261
	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3262
	if (!ndev)
3263
		return -ENOMEM;
3264 3265 3266 3267 3268 3269

	SET_NETDEV_DEV(ndev, device);

	priv = netdev_priv(ndev);
	priv->device = device;
	priv->dev = ndev;
3270

3271
	stmmac_set_ethtool_ops(ndev);
3272 3273
	priv->pause = pause;
	priv->plat = plat_dat;
3274 3275 3276 3277 3278 3279 3280 3281 3282
	priv->ioaddr = res->addr;
	priv->dev->base_addr = (unsigned long)res->addr;

	priv->dev->irq = res->irq;
	priv->wol_irq = res->wol_irq;
	priv->lpi_irq = res->lpi_irq;

	if (res->mac)
		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3283

3284
	dev_set_drvdata(device, priv->dev);
3285

3286 3287
	/* Verify driver arguments */
	stmmac_verify_args();
3288

3289
	/* Override with kernel parameters if supplied XXX CRS XXX
G
Giuseppe CAVALLARO 已提交
3290 3291
	 * this needs to have multiple instances
	 */
3292 3293 3294
	if ((phyaddr >= 0) && (phyaddr <= 31))
		priv->plat->phy_addr = phyaddr;

3295 3296
	if (priv->plat->stmmac_rst)
		reset_control_deassert(priv->plat->stmmac_rst);
3297

3298
	/* Init MAC and get the capabilities */
3299 3300
	ret = stmmac_hw_init(priv);
	if (ret)
3301
		goto error_hw_init;
3302 3303

	ndev->netdev_ops = &stmmac_netdev_ops;
3304

3305 3306
	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			    NETIF_F_RXCSUM;
A
Alexandre TORGUE 已提交
3307 3308 3309 3310

	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		ndev->hw_features |= NETIF_F_TSO;
		priv->tso = true;
3311
		dev_info(priv->device, "TSO feature enabled\n");
A
Alexandre TORGUE 已提交
3312
	}
3313 3314
	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3315 3316
#ifdef STMMAC_VLAN_TAG_USED
	/* Both mac100 and gmac support receive VLAN tag detection */
3317
	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3318 3319 3320
#endif
	priv->msg_enable = netif_msg_init(debug, default_msg_level);

3321 3322 3323 3324 3325 3326
	/* MTU range: 46 - hw-specific max */
	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
		ndev->max_mtu = JUMBO_LEN;
	else
		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3327 3328 3329 3330 3331
	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
	 */
	if ((priv->plat->maxmtu < ndev->max_mtu) &&
	    (priv->plat->maxmtu >= ndev->min_mtu))
3332
		ndev->max_mtu = priv->plat->maxmtu;
3333
	else if (priv->plat->maxmtu < ndev->min_mtu)
3334 3335 3336
		dev_warn(priv->device,
			 "%s: warning: maxmtu having invalid value (%d)\n",
			 __func__, priv->plat->maxmtu);
3337

3338 3339 3340
	if (flow_ctrl)
		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */

3341 3342 3343 3344 3345 3346 3347
	/* Rx Watchdog is available in the COREs newer than the 3.40.
	 * In some case, for example on bugged HW this feature
	 * has to be disable and this can be done by passing the
	 * riwt_off field from the platform.
	 */
	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
		priv->use_riwt = 1;
3348 3349
		dev_info(priv->device,
			 "Enable RX Mitigation via HW Watchdog Timer\n");
3350 3351
	}

3352
	netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3353

3354 3355
	spin_lock_init(&priv->lock);

3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
	/* If a specific clk_csr value is passed from the platform
	 * this means that the CSR Clock Range selection cannot be
	 * changed at run-time and it is fixed. Viceversa the driver'll try to
	 * set the MDC clock dynamically according to the csr actual
	 * clock input.
	 */
	if (!priv->plat->clk_csr)
		stmmac_clk_csr_set(priv);
	else
		priv->clk_csr = priv->plat->clk_csr;

3367 3368
	stmmac_check_pcs_mode(priv);

3369 3370 3371
	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
3372 3373 3374
		/* MDIO bus Registration */
		ret = stmmac_mdio_register(ndev);
		if (ret < 0) {
3375 3376 3377
			dev_err(priv->device,
				"%s: MDIO bus (id: %d) registration failed",
				__func__, priv->plat->bus_id);
3378 3379
			goto error_mdio_register;
		}
3380 3381
	}

3382
	ret = register_netdev(ndev);
3383
	if (ret) {
3384 3385
		dev_err(priv->device, "%s: ERROR %i registering the device\n",
			__func__, ret);
3386 3387
		goto error_netdev_register;
	}
3388 3389

	return ret;
3390

3391
error_netdev_register:
3392 3393 3394 3395
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
		stmmac_mdio_unregister(ndev);
3396 3397
error_mdio_register:
	netif_napi_del(&priv->napi);
3398
error_hw_init:
3399
	free_netdev(ndev);
3400

3401
	return ret;
3402
}
3403
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3404 3405 3406

/**
 * stmmac_dvr_remove
3407
 * @dev: device pointer
3408
 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3409
 * changes the link status, releases the DMA descriptor rings.
3410
 */
3411
int stmmac_dvr_remove(struct device *dev)
3412
{
3413
	struct net_device *ndev = dev_get_drvdata(dev);
3414
	struct stmmac_priv *priv = netdev_priv(ndev);
3415

3416
	netdev_info(priv->dev, "%s: removing driver", __func__);
3417

3418 3419
	priv->hw->dma->stop_rx(priv->ioaddr);
	priv->hw->dma->stop_tx(priv->ioaddr);
3420

3421
	stmmac_set_mac(priv->ioaddr, false);
3422 3423
	netif_carrier_off(ndev);
	unregister_netdev(ndev);
3424 3425 3426 3427
	if (priv->plat->stmmac_rst)
		reset_control_assert(priv->plat->stmmac_rst);
	clk_disable_unprepare(priv->plat->pclk);
	clk_disable_unprepare(priv->plat->stmmac_clk);
3428 3429 3430
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
3431
		stmmac_mdio_unregister(ndev);
3432 3433 3434 3435
	free_netdev(ndev);

	return 0;
}
3436
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3437

3438 3439
/**
 * stmmac_suspend - suspend callback
3440
 * @dev: device pointer
3441 3442 3443 3444
 * Description: this is the function to suspend the device and it is called
 * by the platform driver to stop the network queue, release the resources,
 * program the PMT register (for WoL), clean and release driver resources.
 */
3445
int stmmac_suspend(struct device *dev)
3446
{
3447
	struct net_device *ndev = dev_get_drvdata(dev);
3448
	struct stmmac_priv *priv = netdev_priv(ndev);
3449
	unsigned long flags;
3450

3451
	if (!ndev || !netif_running(ndev))
3452 3453
		return 0;

3454 3455
	if (ndev->phydev)
		phy_stop(ndev->phydev);
3456

3457
	spin_lock_irqsave(&priv->lock, flags);
3458

3459 3460
	netif_device_detach(ndev);
	netif_stop_queue(ndev);
3461

3462 3463 3464 3465 3466
	napi_disable(&priv->napi);

	/* Stop TX/RX DMA */
	priv->hw->dma->stop_tx(priv->ioaddr);
	priv->hw->dma->stop_rx(priv->ioaddr);
3467

3468
	/* Enable Power down mode by programming the PMT regs */
3469
	if (device_may_wakeup(priv->device)) {
3470
		priv->hw->mac->pmt(priv->hw, priv->wolopts);
3471 3472
		priv->irq_wake = 1;
	} else {
3473
		stmmac_set_mac(priv->ioaddr, false);
3474
		pinctrl_pm_select_sleep_state(priv->device);
3475
		/* Disable clock in case of PWM is off */
3476 3477
		clk_disable(priv->plat->pclk);
		clk_disable(priv->plat->stmmac_clk);
3478
	}
3479
	spin_unlock_irqrestore(&priv->lock, flags);
3480 3481

	priv->oldlink = 0;
3482 3483
	priv->speed = SPEED_UNKNOWN;
	priv->oldduplex = DUPLEX_UNKNOWN;
3484 3485
	return 0;
}
3486
EXPORT_SYMBOL_GPL(stmmac_suspend);
3487

3488 3489
/**
 * stmmac_resume - resume callback
3490
 * @dev: device pointer
3491 3492 3493
 * Description: when resume this function is invoked to setup the DMA and CORE
 * in a usable state.
 */
3494
int stmmac_resume(struct device *dev)
3495
{
3496
	struct net_device *ndev = dev_get_drvdata(dev);
3497
	struct stmmac_priv *priv = netdev_priv(ndev);
3498
	unsigned long flags;
3499

3500
	if (!netif_running(ndev))
3501 3502 3503 3504 3505 3506
		return 0;

	/* Power Down bit, into the PM register, is cleared
	 * automatically as soon as a magic packet or a Wake-up frame
	 * is received. Anyway, it's better to manually clear
	 * this bit because it can generate problems while resuming
G
Giuseppe CAVALLARO 已提交
3507 3508
	 * from another devices (e.g. serial console).
	 */
3509
	if (device_may_wakeup(priv->device)) {
3510
		spin_lock_irqsave(&priv->lock, flags);
3511
		priv->hw->mac->pmt(priv->hw, 0);
3512
		spin_unlock_irqrestore(&priv->lock, flags);
3513
		priv->irq_wake = 0;
3514
	} else {
3515
		pinctrl_pm_select_default_state(priv->device);
3516
		/* enable the clk previously disabled */
3517 3518
		clk_enable(priv->plat->stmmac_clk);
		clk_enable(priv->plat->pclk);
3519 3520 3521 3522
		/* reset the phy so that it's ready */
		if (priv->mii)
			stmmac_mdio_reset(priv->mii);
	}
3523

3524
	netif_device_attach(ndev);
3525

3526 3527
	spin_lock_irqsave(&priv->lock, flags);

3528 3529 3530 3531
	priv->cur_rx = 0;
	priv->dirty_rx = 0;
	priv->dirty_tx = 0;
	priv->cur_tx = 0;
A
Alexandre TORGUE 已提交
3532 3533 3534 3535 3536
	/* reset private mss value to force mss context settings at
	 * next tso xmit (only used for gmac4).
	 */
	priv->mss = 0;

3537 3538
	stmmac_clear_descriptors(priv);

3539
	stmmac_hw_setup(ndev, false);
3540
	stmmac_init_tx_coalesce(priv);
3541
	stmmac_set_rx_mode(ndev);
3542 3543 3544

	napi_enable(&priv->napi);

3545
	netif_start_queue(ndev);
3546

3547
	spin_unlock_irqrestore(&priv->lock, flags);
3548

3549 3550
	if (ndev->phydev)
		phy_start(ndev->phydev);
3551

3552 3553
	return 0;
}
3554
EXPORT_SYMBOL_GPL(stmmac_resume);
3555

3556 3557 3558 3559 3560 3561 3562 3563
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
	char *opt;

	if (!str || !*str)
		return -EINVAL;
	while ((opt = strsep(&str, ",")) != NULL) {
3564
		if (!strncmp(opt, "debug:", 6)) {
3565
			if (kstrtoint(opt + 6, 0, &debug))
3566 3567
				goto err;
		} else if (!strncmp(opt, "phyaddr:", 8)) {
3568
			if (kstrtoint(opt + 8, 0, &phyaddr))
3569 3570
				goto err;
		} else if (!strncmp(opt, "buf_sz:", 7)) {
3571
			if (kstrtoint(opt + 7, 0, &buf_sz))
3572 3573
				goto err;
		} else if (!strncmp(opt, "tc:", 3)) {
3574
			if (kstrtoint(opt + 3, 0, &tc))
3575 3576
				goto err;
		} else if (!strncmp(opt, "watchdog:", 9)) {
3577
			if (kstrtoint(opt + 9, 0, &watchdog))
3578 3579
				goto err;
		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
3580
			if (kstrtoint(opt + 10, 0, &flow_ctrl))
3581 3582
				goto err;
		} else if (!strncmp(opt, "pause:", 6)) {
3583
			if (kstrtoint(opt + 6, 0, &pause))
3584
				goto err;
3585
		} else if (!strncmp(opt, "eee_timer:", 10)) {
3586 3587
			if (kstrtoint(opt + 10, 0, &eee_timer))
				goto err;
3588 3589 3590
		} else if (!strncmp(opt, "chain_mode:", 11)) {
			if (kstrtoint(opt + 11, 0, &chain_mode))
				goto err;
3591
		}
3592 3593
	}
	return 0;
3594 3595 3596 3597

err:
	pr_err("%s: ERROR broken module parameter conversion", __func__);
	return -EINVAL;
3598 3599 3600
}

__setup("stmmaceth=", stmmac_cmdline_opt);
G
Giuseppe CAVALLARO 已提交
3601
#endif /* MODULE */
3602

3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631
static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
	/* Create debugfs main directory if it doesn't exist yet */
	if (!stmmac_fs_dir) {
		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);

		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
			pr_err("ERROR %s, debugfs create directory failed\n",
			       STMMAC_RESOURCE_NAME);

			return -ENOMEM;
		}
	}
#endif

	return 0;
}

static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
	debugfs_remove_recursive(stmmac_fs_dir);
#endif
}

module_init(stmmac_init)
module_exit(stmmac_exit)

3632 3633 3634
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");