stmmac_main.c 104.1 KB
Newer Older
1 2 3 4
/*******************************************************************************
  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
  ST Ethernet IPs are built around a Synopsys IP Core.

5
	Copyright(C) 2007-2011 STMicroelectronics Ltd
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>

  Documentation available at:
	http://www.stlinux.com
  Support available at:
	https://bugzilla.stlinux.com/
*******************************************************************************/

27
#include <linux/clk.h>
28 29 30 31 32 33 34 35 36
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/crc32.h>
#include <linux/mii.h>
37
#include <linux/if.h>
38 39
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
40
#include <linux/slab.h>
41
#include <linux/prefetch.h>
42
#include <linux/pinctrl/consumer.h>
43
#ifdef CONFIG_DEBUG_FS
44 45
#include <linux/debugfs.h>
#include <linux/seq_file.h>
46
#endif /* CONFIG_DEBUG_FS */
47 48
#include <linux/net_tstamp.h>
#include "stmmac_ptp.h"
49
#include "stmmac.h"
50
#include <linux/reset.h>
51
#include <linux/of_mdio.h>
52
#include "dwmac1000.h"
53 54

#define STMMAC_ALIGN(x)	L1_CACHE_ALIGN(x)
A
Alexandre TORGUE 已提交
55
#define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
56 57

/* Module parameters */
58
#define TX_TIMEO	5000
59 60
static int watchdog = TX_TIMEO;
module_param(watchdog, int, S_IRUGO | S_IWUSR);
61
MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62

63
static int debug = -1;
64
module_param(debug, int, S_IRUGO | S_IWUSR);
65
MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66

67
static int phyaddr = -1;
68 69 70
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");

71
#define STMMAC_TX_THRESH	(DMA_TX_SIZE / 4)
72
#define STMMAC_RX_THRESH	(DMA_RX_SIZE / 4)
73 74 75 76 77 78 79 80 81 82 83 84 85 86

static int flow_ctrl = FLOW_OFF;
module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");

static int pause = PAUSE_TIME;
module_param(pause, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(pause, "Flow Control Pause Time");

#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");

87 88
#define	DEFAULT_BUFSIZE	1536
static int buf_sz = DEFAULT_BUFSIZE;
89 90 91
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");

92 93
#define	STMMAC_RX_COPYBREAK	256

94 95 96 97
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);

98 99 100 101
#define STMMAC_DEFAULT_LPI_TIMER	1000
static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
G
Giuseppe CAVALLARO 已提交
102
#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103

104 105
/* By default the driver will use the ring mode to manage tx and rx descriptors,
 * but allow user to force to use the chain instead of the ring
106 107 108 109 110
 */
static unsigned int chain_mode;
module_param(chain_mode, int, S_IRUGO);
MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");

111 112
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);

113
#ifdef CONFIG_DEBUG_FS
114
static int stmmac_init_fs(struct net_device *dev);
115
static void stmmac_exit_fs(struct net_device *dev);
116 117
#endif

118 119
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))

120 121
/**
 * stmmac_verify_args - verify the driver parameters.
122 123
 * Description: it checks the driver parameters and set a default in case of
 * errors.
124 125 126 127 128
 */
static void stmmac_verify_args(void)
{
	if (unlikely(watchdog < 0))
		watchdog = TX_TIMEO;
129 130
	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
		buf_sz = DEFAULT_BUFSIZE;
131 132 133 134 135 136
	if (unlikely(flow_ctrl > 1))
		flow_ctrl = FLOW_AUTO;
	else if (likely(flow_ctrl < 0))
		flow_ctrl = FLOW_OFF;
	if (unlikely((pause < 0) || (pause > 0xffff)))
		pause = PAUSE_TIME;
137 138
	if (eee_timer < 0)
		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 140
}

141 142 143 144 145 146 147 148 149 150 151 152
/**
 * stmmac_clk_csr_set - dynamically set the MDC clock
 * @priv: driver private structure
 * Description: this is to dynamically set the MDC clock according to the csr
 * clock input.
 * Note:
 *	If a specific clk_csr value is passed from the platform
 *	this means that the CSR Clock Range selection cannot be
 *	changed at run-time and it is fixed (as reported in the driver
 *	documentation). Viceversa the driver will try to set the MDC
 *	clock dynamically according to the actual clock input.
 */
153 154 155 156
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
	u32 clk_rate;

157
	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158 159

	/* Platform provided default clk_csr would be assumed valid
G
Giuseppe CAVALLARO 已提交
160 161 162 163 164 165
	 * for all other cases except for the below mentioned ones.
	 * For values higher than the IEEE 802.3 specified frequency
	 * we can not estimate the proper divider as it is not known
	 * the frequency of clk_csr_i. So we do not change the default
	 * divider.
	 */
166 167 168 169 170 171 172 173 174 175 176
	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
		if (clk_rate < CSR_F_35M)
			priv->clk_csr = STMMAC_CSR_20_35M;
		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
			priv->clk_csr = STMMAC_CSR_35_60M;
		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
			priv->clk_csr = STMMAC_CSR_60_100M;
		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
			priv->clk_csr = STMMAC_CSR_100_150M;
		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
			priv->clk_csr = STMMAC_CSR_150_250M;
177
		else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178
			priv->clk_csr = STMMAC_CSR_250_300M;
G
Giuseppe CAVALLARO 已提交
179
	}
180 181
}

182 183
static void print_pkt(unsigned char *buf, int len)
{
184 185
	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 187 188 189
}

static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
{
190
	u32 avail;
191 192 193 194 195 196 197 198 199 200 201

	if (priv->dirty_tx > priv->cur_tx)
		avail = priv->dirty_tx - priv->cur_tx - 1;
	else
		avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;

	return avail;
}

static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
{
202
	u32 dirty;
203 204 205 206 207 208 209

	if (priv->dirty_rx <= priv->cur_rx)
		dirty = priv->cur_rx - priv->dirty_rx;
	else
		dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;

	return dirty;
210 211
}

212
/**
213
 * stmmac_hw_fix_mac_speed - callback for speed selection
214
 * @priv: driver private structure
215
 * Description: on some platforms (e.g. ST), some HW system configuration
216
 * registers have to be set according to the link speed negotiated.
217 218 219
 */
static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
{
220 221
	struct net_device *ndev = priv->dev;
	struct phy_device *phydev = ndev->phydev;
222 223

	if (likely(priv->plat->fix_mac_speed))
G
Giuseppe CAVALLARO 已提交
224
		priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 226
}

227
/**
228
 * stmmac_enable_eee_mode - check and enter in LPI mode
229
 * @priv: driver private structure
230 231
 * Description: this function is to verify and enter in LPI mode in case of
 * EEE.
232
 */
233 234 235 236 237
static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
	/* Check and enter in LPI mode */
	if ((priv->dirty_tx == priv->cur_tx) &&
	    (priv->tx_path_in_lpi_mode == false))
238 239
		priv->hw->mac->set_eee_mode(priv->hw,
					    priv->plat->en_tx_lpi_clockgating);
240 241
}

242
/**
243
 * stmmac_disable_eee_mode - disable and exit from LPI mode
244 245 246 247
 * @priv: driver private structure
 * Description: this function is to exit and disable EEE in case of
 * LPI state is true. This is called by the xmit.
 */
248 249
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
250
	priv->hw->mac->reset_eee_mode(priv->hw);
251 252 253 254 255
	del_timer_sync(&priv->eee_ctrl_timer);
	priv->tx_path_in_lpi_mode = false;
}

/**
256
 * stmmac_eee_ctrl_timer - EEE TX SW timer.
257 258
 * @arg : data hook
 * Description:
259
 *  if there is no data transfer and if we are not in LPI state,
260 261 262 263 264 265 266
 *  then MAC Transmitter can be moved to LPI state.
 */
static void stmmac_eee_ctrl_timer(unsigned long arg)
{
	struct stmmac_priv *priv = (struct stmmac_priv *)arg;

	stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
267
	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 269 270
}

/**
271
 * stmmac_eee_init - init EEE
272
 * @priv: driver private structure
273
 * Description:
274 275 276
 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 *  can also manage EEE, this function enable the LPI state and start related
 *  timer.
277 278 279
 */
bool stmmac_eee_init(struct stmmac_priv *priv)
{
280
	struct net_device *ndev = priv->dev;
281
	unsigned long flags;
282 283
	bool ret = false;

G
Giuseppe CAVALLARO 已提交
284 285 286
	/* Using PCS we cannot dial with the phy registers at this stage
	 * so we do not support extra feature like EEE.
	 */
287 288 289
	if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
	    (priv->hw->pcs == STMMAC_PCS_TBI) ||
	    (priv->hw->pcs == STMMAC_PCS_RTBI))
G
Giuseppe CAVALLARO 已提交
290 291
		goto out;

292 293
	/* MAC core supports the EEE feature. */
	if (priv->dma_cap.eee) {
294 295
		int tx_lpi_timer = priv->tx_lpi_timer;

296
		/* Check if the PHY supports EEE */
297
		if (phy_init_eee(ndev->phydev, 1)) {
298 299 300 301 302
			/* To manage at run-time if the EEE cannot be supported
			 * anymore (for example because the lp caps have been
			 * changed).
			 * In that case the driver disable own timers.
			 */
303
			spin_lock_irqsave(&priv->lock, flags);
304
			if (priv->eee_active) {
305
				netdev_dbg(priv->dev, "disable EEE\n");
306
				del_timer_sync(&priv->eee_ctrl_timer);
307
				priv->hw->mac->set_eee_timer(priv->hw, 0,
308 309 310
							     tx_lpi_timer);
			}
			priv->eee_active = 0;
311
			spin_unlock_irqrestore(&priv->lock, flags);
312
			goto out;
313 314
		}
		/* Activate the EEE and start timers */
315
		spin_lock_irqsave(&priv->lock, flags);
G
Giuseppe CAVALLARO 已提交
316 317
		if (!priv->eee_active) {
			priv->eee_active = 1;
318 319 320 321 322
			setup_timer(&priv->eee_ctrl_timer,
				    stmmac_eee_ctrl_timer,
				    (unsigned long)priv);
			mod_timer(&priv->eee_ctrl_timer,
				  STMMAC_LPI_T(eee_timer));
G
Giuseppe CAVALLARO 已提交
323

324
			priv->hw->mac->set_eee_timer(priv->hw,
G
Giuseppe CAVALLARO 已提交
325
						     STMMAC_DEFAULT_LIT_LS,
326
						     tx_lpi_timer);
327 328
		}
		/* Set HW EEE according to the speed */
329
		priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330 331

		ret = true;
332 333
		spin_unlock_irqrestore(&priv->lock, flags);

334
		netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335 336 337 338 339
	}
out:
	return ret;
}

340
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
341
 * @priv: driver private structure
342
 * @p : descriptor pointer
343 344 345 346 347 348
 * @skb : the socket buffer
 * Description :
 * This function will read timestamp from the descriptor & pass it to stack.
 * and also perform some sanity checks.
 */
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349
				   struct dma_desc *p, struct sk_buff *skb)
350 351 352 353 354 355 356
{
	struct skb_shared_hwtstamps shhwtstamp;
	u64 ns;

	if (!priv->hwts_tx_en)
		return;

G
Giuseppe CAVALLARO 已提交
357
	/* exit if skb doesn't support hw tstamp */
358
	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359 360 361
		return;

	/* check tx tstamp status */
362 363 364
	if (!priv->hw->desc->get_tx_timestamp_status(p)) {
		/* get the valid tstamp */
		ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365

366 367
		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp.hwtstamp = ns_to_ktime(ns);
368

369 370 371 372
		netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
		/* pass tstamp to stack */
		skb_tstamp_tx(skb, &shhwtstamp);
	}
373 374 375 376

	return;
}

377
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
378
 * @priv: driver private structure
379 380
 * @p : descriptor pointer
 * @np : next descriptor pointer
381 382 383 384 385
 * @skb : the socket buffer
 * Description :
 * This function will read received packet's timestamp from the descriptor
 * and pass it to stack. It also perform some sanity checks.
 */
386 387
static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
				   struct dma_desc *np, struct sk_buff *skb)
388 389 390 391 392 393 394
{
	struct skb_shared_hwtstamps *shhwtstamp = NULL;
	u64 ns;

	if (!priv->hwts_rx_en)
		return;

395 396 397 398 399 400 401
	/* Check if timestamp is available */
	if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
		/* For GMAC4, the valid timestamp is from CTX next desc. */
		if (priv->plat->has_gmac4)
			ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
		else
			ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402

403 404 405 406 407 408 409
		netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
		shhwtstamp = skb_hwtstamps(skb);
		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
		shhwtstamp->hwtstamp = ns_to_ktime(ns);
	} else  {
		netdev_err(priv->dev, "cannot get RX hw timestamp\n");
	}
410 411 412 413 414
}

/**
 *  stmmac_hwtstamp_ioctl - control hardware timestamping.
 *  @dev: device pointer.
415
 *  @ifr: An IOCTL specific structure, that can contain a pointer to
416 417 418 419 420 421 422 423 424 425 426
 *  a proprietary structure used to pass information to the driver.
 *  Description:
 *  This function configures the MAC to enable/disable both outgoing(TX)
 *  and incoming(RX) packets time stamping based on user input.
 *  Return Value:
 *  0 on success and an appropriate -ve integer on failure.
 */
static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct hwtstamp_config config;
A
Arnd Bergmann 已提交
427
	struct timespec64 now;
428 429 430 431 432 433 434 435 436 437
	u64 temp = 0;
	u32 ptp_v2 = 0;
	u32 tstamp_all = 0;
	u32 ptp_over_ipv4_udp = 0;
	u32 ptp_over_ipv6_udp = 0;
	u32 ptp_over_ethernet = 0;
	u32 snap_type_sel = 0;
	u32 ts_master_en = 0;
	u32 ts_event_en = 0;
	u32 value = 0;
438
	u32 sec_inc;
439 440 441 442 443 444 445 446 447 448

	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
		netdev_alert(priv->dev, "No support for HW time stamping\n");
		priv->hwts_tx_en = 0;
		priv->hwts_rx_en = 0;

		return -EOPNOTSUPP;
	}

	if (copy_from_user(&config, ifr->ifr_data,
G
Giuseppe CAVALLARO 已提交
449
			   sizeof(struct hwtstamp_config)))
450 451
		return -EFAULT;

452 453
	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
		   __func__, config.flags, config.tx_type, config.rx_filter);
454 455 456 457 458

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

459 460
	if (config.tx_type != HWTSTAMP_TX_OFF &&
	    config.tx_type != HWTSTAMP_TX_ON)
461 462 463 464 465
		return -ERANGE;

	if (priv->adv_ts) {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
G
Giuseppe CAVALLARO 已提交
466
			/* time stamp no incoming packet at all */
467 468 469 470
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
471
			/* PTP v1, UDP, any kind of event packet */
472 473 474 475 476 477 478 479 480
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			/* take time stamp for all event messages */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
481
			/* PTP v1, UDP, Sync packet */
482 483 484 485 486 487 488 489 490
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
491
			/* PTP v1, UDP, Delay_req packet */
492 493 494 495 496 497 498 499 500 501
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
G
Giuseppe CAVALLARO 已提交
502
			/* PTP v2, UDP, any kind of event packet */
503 504 505 506 507 508 509 510 511 512
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
G
Giuseppe CAVALLARO 已提交
513
			/* PTP v2, UDP, Sync packet */
514 515 516 517 518 519 520 521 522 523
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
524
			/* PTP v2, UDP, Delay_req packet */
525 526 527 528 529 530 531 532 533 534 535
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_EVENT:
G
Giuseppe CAVALLARO 已提交
536
			/* PTP v2/802.AS1 any layer, any kind of event packet */
537 538 539 540 541 542 543 544 545 546 547
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for all event messages */
			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_SYNC:
G
Giuseppe CAVALLARO 已提交
548
			/* PTP v2/802.AS1, any layer, Sync packet */
549 550 551 552 553 554 555 556 557 558 559
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for SYNC messages only */
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
G
Giuseppe CAVALLARO 已提交
560
			/* PTP v2/802.AS1, any layer, Delay_req packet */
561 562 563 564 565 566 567 568 569 570 571 572
			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
			ptp_v2 = PTP_TCR_TSVER2ENA;
			/* take time stamp for Delay_Req messages only */
			ts_master_en = PTP_TCR_TSMSTRENA;
			ts_event_en = PTP_TCR_TSEVNTENA;

			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
			ptp_over_ethernet = PTP_TCR_TSIPENA;
			break;

		case HWTSTAMP_FILTER_ALL:
G
Giuseppe CAVALLARO 已提交
573
			/* time stamp any incoming packet */
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
			config.rx_filter = HWTSTAMP_FILTER_ALL;
			tstamp_all = PTP_TCR_TSENALL;
			break;

		default:
			return -ERANGE;
		}
	} else {
		switch (config.rx_filter) {
		case HWTSTAMP_FILTER_NONE:
			config.rx_filter = HWTSTAMP_FILTER_NONE;
			break;
		default:
			/* PTP v1, UDP, any kind of event packet */
			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
			break;
		}
	}
	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593
	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594 595

	if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596
		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597 598
	else {
		value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
G
Giuseppe CAVALLARO 已提交
599 600 601
			 tstamp_all | ptp_v2 | ptp_over_ethernet |
			 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
			 ts_master_en | snap_type_sel);
602
		priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603 604

		/* program Sub Second Increment reg */
605
		sec_inc = priv->hw->ptp->config_sub_second_increment(
606
			priv->ptpaddr, priv->plat->clk_ptp_rate,
607
			priv->plat->has_gmac4);
608
		temp = div_u64(1000000000ULL, sec_inc);
609 610 611 612

		/* calculate default added value:
		 * formula is :
		 * addend = (2^32)/freq_div_ratio;
613
		 * where, freq_div_ratio = 1e9ns/sec_inc
614
		 */
615
		temp = (u64)(temp << 32);
616
		priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617
		priv->hw->ptp->config_addend(priv->ptpaddr,
618 619 620
					     priv->default_addend);

		/* initialize system time */
A
Arnd Bergmann 已提交
621 622 623
		ktime_get_real_ts64(&now);

		/* lower 32 bits of tv_sec are safe until y2106 */
624
		priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625 626 627 628 629 630 631
					    now.tv_nsec);
	}

	return copy_to_user(ifr->ifr_data, &config,
			    sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
}

632
/**
633
 * stmmac_init_ptp - init PTP
634
 * @priv: driver private structure
635
 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636
 * This is done by looking at the HW cap. register.
637
 * This function also registers the ptp driver.
638
 */
639
static int stmmac_init_ptp(struct stmmac_priv *priv)
640
{
641 642 643
	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
		return -EOPNOTSUPP;

644
	priv->adv_ts = 0;
645 646 647 648 649
	/* Check if adv_ts can be enabled for dwmac 4.x core */
	if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
		priv->adv_ts = 1;
	/* Dwmac 3.x core with extend_desc can support adv_ts */
	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650 651
		priv->adv_ts = 1;

652 653
	if (priv->dma_cap.time_stamp)
		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654

655 656 657
	if (priv->adv_ts)
		netdev_info(priv->dev,
			    "IEEE 1588-2008 Advanced Timestamp supported\n");
658 659 660 661

	priv->hw->ptp = &stmmac_ptp;
	priv->hwts_tx_en = 0;
	priv->hwts_rx_en = 0;
662

663 664 665
	stmmac_ptp_register(priv);

	return 0;
666 667 668 669
}

static void stmmac_release_ptp(struct stmmac_priv *priv)
{
670 671
	if (priv->plat->clk_ptp_ref)
		clk_disable_unprepare(priv->plat->clk_ptp_ref);
672
	stmmac_ptp_unregister(priv);
673 674
}

675 676 677 678 679 680 681 682 683 684 685 686 687
/**
 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 *  @priv: driver private structure
 *  Description: It is used for configuring the flow control in all queues
 */
static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
{
	u32 tx_cnt = priv->plat->tx_queues_to_use;

	priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
				 priv->pause, tx_cnt);
}

688
/**
689
 * stmmac_adjust_link - adjusts the link parameters
690
 * @dev: net device structure
691 692 693 694 695
 * Description: this is the helper called by the physical abstraction layer
 * drivers to communicate the phy link status. According the speed and duplex
 * this driver can invoke registered glue-logic as well.
 * It also invoke the eee initialization because it could happen when switch
 * on different networks (that are eee capable).
696 697 698 699
 */
static void stmmac_adjust_link(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
700
	struct phy_device *phydev = dev->phydev;
701 702 703
	unsigned long flags;
	int new_state = 0;

704
	if (!phydev)
705 706 707
		return;

	spin_lock_irqsave(&priv->lock, flags);
708

709
	if (phydev->link) {
710
		u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
711 712 713 714 715 716

		/* Now we make sure that we can be in full duplex mode.
		 * If not, we operate in half-duplex mode. */
		if (phydev->duplex != priv->oldduplex) {
			new_state = 1;
			if (!(phydev->duplex))
717
				ctrl &= ~priv->hw->link.duplex;
718
			else
719
				ctrl |= priv->hw->link.duplex;
720 721 722 723
			priv->oldduplex = phydev->duplex;
		}
		/* Flow Control operation */
		if (phydev->pause)
724
			stmmac_mac_flow_ctrl(priv, phydev->duplex);
725 726 727 728 729

		if (phydev->speed != priv->speed) {
			new_state = 1;
			switch (phydev->speed) {
			case 1000:
730 731
				if (priv->plat->has_gmac ||
				    priv->plat->has_gmac4)
732
					ctrl &= ~priv->hw->link.port;
733 734
				break;
			case 100:
735 736 737 738 739 740 741 742
				if (priv->plat->has_gmac ||
				    priv->plat->has_gmac4) {
					ctrl |= priv->hw->link.port;
					ctrl |= priv->hw->link.speed;
				} else {
					ctrl &= ~priv->hw->link.port;
				}
				break;
743
			case 10:
744 745
				if (priv->plat->has_gmac ||
				    priv->plat->has_gmac4) {
746
					ctrl |= priv->hw->link.port;
747
					ctrl &= ~(priv->hw->link.speed);
748
				} else {
749
					ctrl &= ~priv->hw->link.port;
750 751 752
				}
				break;
			default:
753
				netif_warn(priv, link, priv->dev,
754
					   "broken speed: %d\n", phydev->speed);
755
				phydev->speed = SPEED_UNKNOWN;
756 757
				break;
			}
758 759
			if (phydev->speed != SPEED_UNKNOWN)
				stmmac_hw_fix_mac_speed(priv);
760 761 762
			priv->speed = phydev->speed;
		}

763
		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
764 765 766 767 768 769 770 771

		if (!priv->oldlink) {
			new_state = 1;
			priv->oldlink = 1;
		}
	} else if (priv->oldlink) {
		new_state = 1;
		priv->oldlink = 0;
772 773
		priv->speed = SPEED_UNKNOWN;
		priv->oldduplex = DUPLEX_UNKNOWN;
774 775 776 777 778
	}

	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);

779 780
	spin_unlock_irqrestore(&priv->lock, flags);

781 782 783 784 785 786 787 788 789 790
	if (phydev->is_pseudo_fixed_link)
		/* Stop PHY layer to call the hook to adjust the link in case
		 * of a switch is attached to the stmmac driver.
		 */
		phydev->irq = PHY_IGNORE_INTERRUPT;
	else
		/* At this stage, init the EEE if supported.
		 * Never called in case of fixed_link.
		 */
		priv->eee_enabled = stmmac_eee_init(priv);
791 792
}

793
/**
794
 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
795 796 797 798 799
 * @priv: driver private structure
 * Description: this is to verify if the HW supports the PCS.
 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 * configured for the TBI, RTBI, or SGMII PHY interface.
 */
800 801 802 803 804
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
	int interface = priv->plat->interface;

	if (priv->dma_cap.pcs) {
B
Byungho An 已提交
805 806 807 808
		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
809
			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
810
			priv->hw->pcs = STMMAC_PCS_RGMII;
B
Byungho An 已提交
811
		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
812
			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
813
			priv->hw->pcs = STMMAC_PCS_SGMII;
814 815 816 817
		}
	}
}

818 819 820 821 822 823 824 825 826 827 828 829
/**
 * stmmac_init_phy - PHY initialization
 * @dev: net device structure
 * Description: it initializes the driver's PHY state, and attaches the PHY
 * to the mac driver.
 *  Return value:
 *  0 on success
 */
static int stmmac_init_phy(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	struct phy_device *phydev;
830
	char phy_id_fmt[MII_BUS_ID_SIZE + 3];
831
	char bus_id[MII_BUS_ID_SIZE];
832
	int interface = priv->plat->interface;
833
	int max_speed = priv->plat->max_speed;
834
	priv->oldlink = 0;
835 836
	priv->speed = SPEED_UNKNOWN;
	priv->oldduplex = DUPLEX_UNKNOWN;
837

838 839 840 841
	if (priv->plat->phy_node) {
		phydev = of_phy_connect(dev, priv->plat->phy_node,
					&stmmac_adjust_link, 0, interface);
	} else {
G
Giuseppe CAVALLARO 已提交
842 843
		snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
			 priv->plat->bus_id);
844 845 846

		snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
			 priv->plat->phy_addr);
847
		netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
848
			   phy_id_fmt);
849 850 851 852

		phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
				     interface);
	}
853

854
	if (IS_ERR_OR_NULL(phydev)) {
855
		netdev_err(priv->dev, "Could not attach to PHY\n");
856 857 858
		if (!phydev)
			return -ENODEV;

859 860 861
		return PTR_ERR(phydev);
	}

862
	/* Stop Advertising 1000BASE Capability if interface is not GMII */
863
	if ((interface == PHY_INTERFACE_MODE_MII) ||
864
	    (interface == PHY_INTERFACE_MODE_RMII) ||
P
Pavel Machek 已提交
865
		(max_speed < 1000 && max_speed > 0))
866 867
		phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
					 SUPPORTED_1000baseT_Full);
868

869 870 871 872 873 874 875
	/*
	 * Broken HW is sometimes missing the pull-up resistor on the
	 * MDIO line, which results in reads to non-existent devices returning
	 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
	 * device as well.
	 * Note: phydev->phy_id is the result of reading the UID PHY registers.
	 */
876
	if (!priv->plat->phy_node && phydev->phy_id == 0) {
877 878 879
		phy_disconnect(phydev);
		return -ENODEV;
	}
880

881 882 883 884 885 886 887
	/* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
	 * subsequent PHY polling, make sure we force a link transition if
	 * we have a UP/DOWN/UP transition
	 */
	if (phydev->is_pseudo_fixed_link)
		phydev->irq = PHY_POLL;

888
	phy_attached_info(phydev);
889 890 891
	return 0;
}

892 893
static void stmmac_display_rings(struct stmmac_priv *priv)
{
894 895
	void *head_rx, *head_tx;

896
	if (priv->extend_desc) {
897 898
		head_rx = (void *)priv->dma_erx;
		head_tx = (void *)priv->dma_etx;
899
	} else {
900 901
		head_rx = (void *)priv->dma_rx;
		head_tx = (void *)priv->dma_tx;
902
	}
903 904 905 906 907

	/* Display Rx ring */
	priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
	/* Display Tx ring */
	priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
908 909
}

910 911 912 913 914 915 916 917
static int stmmac_set_bfsize(int mtu, int bufsize)
{
	int ret = bufsize;

	if (mtu >= BUF_SIZE_4KiB)
		ret = BUF_SIZE_8KiB;
	else if (mtu >= BUF_SIZE_2KiB)
		ret = BUF_SIZE_4KiB;
918
	else if (mtu > DEFAULT_BUFSIZE)
919 920
		ret = BUF_SIZE_2KiB;
	else
921
		ret = DEFAULT_BUFSIZE;
922 923 924 925

	return ret;
}

926
/**
927
 * stmmac_clear_descriptors - clear descriptors
928 929 930 931
 * @priv: driver private structure
 * Description: this function is called to clear the tx and rx descriptors
 * in case of both basic and extended descriptors are used.
 */
932 933 934 935 936
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
	int i;

	/* Clear the Rx/Tx descriptors */
937
	for (i = 0; i < DMA_RX_SIZE; i++)
938 939 940
		if (priv->extend_desc)
			priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
						     priv->use_riwt, priv->mode,
941
						     (i == DMA_RX_SIZE - 1));
942 943 944
		else
			priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
						     priv->use_riwt, priv->mode,
945 946
						     (i == DMA_RX_SIZE - 1));
	for (i = 0; i < DMA_TX_SIZE; i++)
947 948 949
		if (priv->extend_desc)
			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
						     priv->mode,
950
						     (i == DMA_TX_SIZE - 1));
951 952 953
		else
			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
						     priv->mode,
954
						     (i == DMA_TX_SIZE - 1));
955 956
}

957 958 959 960 961 962 963 964 965
/**
 * stmmac_init_rx_buffers - init the RX descriptor buffer.
 * @priv: driver private structure
 * @p: descriptor pointer
 * @i: descriptor index
 * @flags: gfp flag.
 * Description: this function is called to allocate a receive buffer, perform
 * the DMA mapping and init the descriptor.
 */
966
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
967
				  int i, gfp_t flags)
968 969 970
{
	struct sk_buff *skb;

971
	skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
972
	if (!skb) {
973 974
		netdev_err(priv->dev,
			   "%s: Rx init fails; skb is NULL\n", __func__);
975
		return -ENOMEM;
976 977 978 979 980
	}
	priv->rx_skbuff[i] = skb;
	priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
						priv->dma_buf_sz,
						DMA_FROM_DEVICE);
981
	if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
982
		netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
983 984 985
		dev_kfree_skb_any(skb);
		return -EINVAL;
	}
986

A
Alexandre TORGUE 已提交
987
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
988
		p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
A
Alexandre TORGUE 已提交
989
	else
990
		p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
991

G
Giuseppe CAVALLARO 已提交
992
	if ((priv->hw->mode->init_desc3) &&
993
	    (priv->dma_buf_sz == BUF_SIZE_16KiB))
G
Giuseppe CAVALLARO 已提交
994
		priv->hw->mode->init_desc3(p);
995 996 997 998

	return 0;
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008
static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
{
	if (priv->rx_skbuff[i]) {
		dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
				 priv->dma_buf_sz, DMA_FROM_DEVICE);
		dev_kfree_skb_any(priv->rx_skbuff[i]);
	}
	priv->rx_skbuff[i] = NULL;
}

1009 1010 1011
/**
 * init_dma_desc_rings - init the RX/TX descriptor rings
 * @dev: net device structure
1012 1013
 * @flags: gfp flag.
 * Description: this function initializes the DMA RX/TX descriptors
1014
 * and allocates the socket buffers. It supports the chained and ring
1015
 * modes.
1016
 */
1017
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1018 1019 1020
{
	int i;
	struct stmmac_priv *priv = netdev_priv(dev);
1021
	unsigned int bfsize = 0;
1022
	int ret = -ENOMEM;
1023

G
Giuseppe CAVALLARO 已提交
1024 1025
	if (priv->hw->mode->set_16kib_bfsize)
		bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1026

1027
	if (bfsize < BUF_SIZE_16KiB)
1028
		bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1029

1030 1031
	priv->dma_buf_sz = bfsize;

1032 1033 1034 1035 1036 1037 1038
	netif_dbg(priv, probe, priv->dev,
		  "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
		  __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);

	/* RX INITIALIZATION */
	netif_dbg(priv, probe, priv->dev,
		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1039

1040
	for (i = 0; i < DMA_RX_SIZE; i++) {
1041 1042 1043 1044 1045
		struct dma_desc *p;
		if (priv->extend_desc)
			p = &((priv->dma_erx + i)->basic);
		else
			p = priv->dma_rx + i;
1046

1047
		ret = stmmac_init_rx_buffers(priv, p, i, flags);
1048 1049
		if (ret)
			goto err_init_rx_buffers;
1050

1051 1052 1053
		netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
			  priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
			  (unsigned int)priv->rx_skbuff_dma[i]);
1054 1055
	}
	priv->cur_rx = 0;
1056
	priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1057 1058
	buf_sz = bfsize;

1059 1060 1061
	/* Setup the chained descriptor addresses */
	if (priv->mode == STMMAC_CHAIN_MODE) {
		if (priv->extend_desc) {
G
Giuseppe CAVALLARO 已提交
1062
			priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1063
					     DMA_RX_SIZE, 1);
G
Giuseppe CAVALLARO 已提交
1064
			priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1065
					     DMA_TX_SIZE, 1);
1066
		} else {
G
Giuseppe CAVALLARO 已提交
1067
			priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1068
					     DMA_RX_SIZE, 0);
G
Giuseppe CAVALLARO 已提交
1069
			priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1070
					     DMA_TX_SIZE, 0);
1071 1072 1073
		}
	}

1074
	/* TX INITIALIZATION */
1075
	for (i = 0; i < DMA_TX_SIZE; i++) {
1076 1077 1078 1079 1080
		struct dma_desc *p;
		if (priv->extend_desc)
			p = &((priv->dma_etx + i)->basic);
		else
			p = priv->dma_tx + i;
A
Alexandre TORGUE 已提交
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090

		if (priv->synopsys_id >= DWMAC_CORE_4_00) {
			p->des0 = 0;
			p->des1 = 0;
			p->des2 = 0;
			p->des3 = 0;
		} else {
			p->des2 = 0;
		}

G
Giuseppe CAVALLARO 已提交
1091 1092
		priv->tx_skbuff_dma[i].buf = 0;
		priv->tx_skbuff_dma[i].map_as_page = false;
1093
		priv->tx_skbuff_dma[i].len = 0;
1094
		priv->tx_skbuff_dma[i].last_segment = false;
1095 1096
		priv->tx_skbuff[i] = NULL;
	}
1097

1098 1099
	priv->dirty_tx = 0;
	priv->cur_tx = 0;
B
Beniamino Galvani 已提交
1100
	netdev_reset_queue(priv->dev);
1101

1102
	stmmac_clear_descriptors(priv);
1103

1104 1105
	if (netif_msg_hw(priv))
		stmmac_display_rings(priv);
1106 1107 1108 1109 1110 1111

	return 0;
err_init_rx_buffers:
	while (--i >= 0)
		stmmac_free_rx_buffers(priv, i);
	return ret;
1112 1113 1114 1115 1116 1117
}

static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
	int i;

1118
	for (i = 0; i < DMA_RX_SIZE; i++)
1119
		stmmac_free_rx_buffers(priv, i);
1120 1121 1122 1123 1124 1125
}

static void dma_free_tx_skbufs(struct stmmac_priv *priv)
{
	int i;

1126
	for (i = 0; i < DMA_TX_SIZE; i++) {
G
Giuseppe CAVALLARO 已提交
1127 1128 1129 1130
		if (priv->tx_skbuff_dma[i].buf) {
			if (priv->tx_skbuff_dma[i].map_as_page)
				dma_unmap_page(priv->device,
					       priv->tx_skbuff_dma[i].buf,
1131
					       priv->tx_skbuff_dma[i].len,
G
Giuseppe CAVALLARO 已提交
1132 1133 1134 1135
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
						 priv->tx_skbuff_dma[i].buf,
1136
						 priv->tx_skbuff_dma[i].len,
G
Giuseppe CAVALLARO 已提交
1137
						 DMA_TO_DEVICE);
1138
		}
1139

1140
		if (priv->tx_skbuff[i]) {
1141 1142
			dev_kfree_skb_any(priv->tx_skbuff[i]);
			priv->tx_skbuff[i] = NULL;
G
Giuseppe CAVALLARO 已提交
1143 1144
			priv->tx_skbuff_dma[i].buf = 0;
			priv->tx_skbuff_dma[i].map_as_page = false;
1145 1146 1147 1148
		}
	}
}

1149 1150 1151 1152 1153 1154 1155 1156
/**
 * alloc_dma_desc_resources - alloc TX/RX resources.
 * @priv: private structure
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
1157 1158 1159 1160
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
	int ret = -ENOMEM;

1161
	priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1162 1163 1164 1165
					    GFP_KERNEL);
	if (!priv->rx_skbuff_dma)
		return -ENOMEM;

1166
	priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1167 1168 1169 1170
					GFP_KERNEL);
	if (!priv->rx_skbuff)
		goto err_rx_skbuff;

1171
	priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
G
Giuseppe CAVALLARO 已提交
1172
					    sizeof(*priv->tx_skbuff_dma),
1173 1174 1175 1176
					    GFP_KERNEL);
	if (!priv->tx_skbuff_dma)
		goto err_tx_skbuff_dma;

1177
	priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1178 1179 1180 1181 1182
					GFP_KERNEL);
	if (!priv->tx_skbuff)
		goto err_tx_skbuff;

	if (priv->extend_desc) {
1183
		priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1184 1185 1186 1187
						    sizeof(struct
							   dma_extended_desc),
						    &priv->dma_rx_phy,
						    GFP_KERNEL);
1188 1189 1190
		if (!priv->dma_erx)
			goto err_dma;

1191
		priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1192 1193 1194 1195
						    sizeof(struct
							   dma_extended_desc),
						    &priv->dma_tx_phy,
						    GFP_KERNEL);
1196
		if (!priv->dma_etx) {
1197
			dma_free_coherent(priv->device, DMA_RX_SIZE *
1198 1199
					  sizeof(struct dma_extended_desc),
					  priv->dma_erx, priv->dma_rx_phy);
1200 1201 1202
			goto err_dma;
		}
	} else {
1203
		priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1204 1205 1206
						   sizeof(struct dma_desc),
						   &priv->dma_rx_phy,
						   GFP_KERNEL);
1207 1208 1209
		if (!priv->dma_rx)
			goto err_dma;

1210
		priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1211 1212 1213
						   sizeof(struct dma_desc),
						   &priv->dma_tx_phy,
						   GFP_KERNEL);
1214
		if (!priv->dma_tx) {
1215
			dma_free_coherent(priv->device, DMA_RX_SIZE *
1216 1217
					  sizeof(struct dma_desc),
					  priv->dma_rx, priv->dma_rx_phy);
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
			goto err_dma;
		}
	}

	return 0;

err_dma:
	kfree(priv->tx_skbuff);
err_tx_skbuff:
	kfree(priv->tx_skbuff_dma);
err_tx_skbuff_dma:
	kfree(priv->rx_skbuff);
err_rx_skbuff:
	kfree(priv->rx_skbuff_dma);
	return ret;
}

1235 1236 1237 1238 1239 1240
static void free_dma_desc_resources(struct stmmac_priv *priv)
{
	/* Release the DMA TX/RX socket buffers */
	dma_free_rx_skbufs(priv);
	dma_free_tx_skbufs(priv);

G
Giuseppe CAVALLARO 已提交
1241
	/* Free DMA regions of consistent memory previously allocated */
1242 1243
	if (!priv->extend_desc) {
		dma_free_coherent(priv->device,
1244
				  DMA_TX_SIZE * sizeof(struct dma_desc),
1245 1246
				  priv->dma_tx, priv->dma_tx_phy);
		dma_free_coherent(priv->device,
1247
				  DMA_RX_SIZE * sizeof(struct dma_desc),
1248 1249
				  priv->dma_rx, priv->dma_rx_phy);
	} else {
1250
		dma_free_coherent(priv->device, DMA_TX_SIZE *
1251 1252
				  sizeof(struct dma_extended_desc),
				  priv->dma_etx, priv->dma_tx_phy);
1253
		dma_free_coherent(priv->device, DMA_RX_SIZE *
1254 1255 1256
				  sizeof(struct dma_extended_desc),
				  priv->dma_erx, priv->dma_rx_phy);
	}
1257 1258
	kfree(priv->rx_skbuff_dma);
	kfree(priv->rx_skbuff);
1259
	kfree(priv->tx_skbuff_dma);
1260 1261 1262
	kfree(priv->tx_skbuff);
}

J
jpinto 已提交
1263 1264 1265 1266 1267 1268 1269
/**
 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
 *  @priv: driver private structure
 *  Description: It is used for enabling the rx queues in the MAC
 */
static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
{
1270 1271 1272
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	int queue;
	u8 mode;
J
jpinto 已提交
1273

1274 1275 1276 1277
	for (queue = 0; queue < rx_queues_count; queue++) {
		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
		priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
	}
J
jpinto 已提交
1278 1279
}

1280 1281
/**
 *  stmmac_dma_operation_mode - HW DMA operation mode
1282
 *  @priv: driver private structure
1283 1284
 *  Description: it is used for configuring the DMA operation mode register in
 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1285 1286 1287
 */
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
1288 1289
	u32 rx_channels_count = priv->plat->rx_queues_to_use;
	u32 tx_channels_count = priv->plat->tx_queues_to_use;
1290
	int rxfifosz = priv->plat->rx_fifo_size;
1291 1292 1293
	u32 txmode = 0;
	u32 rxmode = 0;
	u32 chan = 0;
1294

1295 1296 1297
	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;

1298 1299 1300 1301
	if (priv->plat->force_thresh_dma_mode) {
		txmode = tc;
		rxmode = tc;
	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1302 1303 1304
		/*
		 * In case of GMAC, SF mode can be enabled
		 * to perform the TX COE in HW. This depends on:
1305 1306 1307 1308
		 * 1) TX COE if actually supported
		 * 2) There is no bugged Jumbo frame support
		 *    that needs to not insert csum in the TDES.
		 */
1309 1310
		txmode = SF_DMA_MODE;
		rxmode = SF_DMA_MODE;
1311
		priv->xstats.threshold = SF_DMA_MODE;
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	} else {
		txmode = tc;
		rxmode = SF_DMA_MODE;
	}

	/* configure all channels */
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		for (chan = 0; chan < rx_channels_count; chan++)
			priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
						   rxfifosz);

		for (chan = 0; chan < tx_channels_count; chan++)
			priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
	} else {
		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1327
					rxfifosz);
1328
	}
1329 1330 1331
}

/**
1332
 * stmmac_tx_clean - to manage the transmission completion
1333
 * @priv: driver private structure
1334
 * Description: it reclaims the transmit resources after transmission completes.
1335
 */
1336
static void stmmac_tx_clean(struct stmmac_priv *priv)
1337
{
B
Beniamino Galvani 已提交
1338
	unsigned int bytes_compl = 0, pkts_compl = 0;
1339
	unsigned int entry = priv->dirty_tx;
1340

1341
	netif_tx_lock(priv->dev);
1342

1343 1344
	priv->xstats.tx_clean++;

1345
	while (entry != priv->cur_tx) {
1346
		struct sk_buff *skb = priv->tx_skbuff[entry];
1347
		struct dma_desc *p;
1348
		int status;
1349 1350

		if (priv->extend_desc)
G
Giuseppe CAVALLARO 已提交
1351
			p = (struct dma_desc *)(priv->dma_etx + entry);
1352 1353
		else
			p = priv->dma_tx + entry;
1354

1355
		status = priv->hw->desc->tx_status(&priv->dev->stats,
G
Giuseppe CAVALLARO 已提交
1356 1357
						      &priv->xstats, p,
						      priv->ioaddr);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
		/* Check if the descriptor is owned by the DMA */
		if (unlikely(status & tx_dma_own))
			break;

		/* Just consider the last segment and ...*/
		if (likely(!(status & tx_not_ls))) {
			/* ... verify the status error condition */
			if (unlikely(status & tx_err)) {
				priv->dev->stats.tx_errors++;
			} else {
1368 1369
				priv->dev->stats.tx_packets++;
				priv->xstats.tx_pkt_n++;
1370
			}
1371
			stmmac_get_tx_hwtstamp(priv, p, skb);
1372 1373
		}

G
Giuseppe CAVALLARO 已提交
1374 1375 1376 1377
		if (likely(priv->tx_skbuff_dma[entry].buf)) {
			if (priv->tx_skbuff_dma[entry].map_as_page)
				dma_unmap_page(priv->device,
					       priv->tx_skbuff_dma[entry].buf,
1378
					       priv->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1379 1380 1381 1382
					       DMA_TO_DEVICE);
			else
				dma_unmap_single(priv->device,
						 priv->tx_skbuff_dma[entry].buf,
1383
						 priv->tx_skbuff_dma[entry].len,
G
Giuseppe CAVALLARO 已提交
1384 1385
						 DMA_TO_DEVICE);
			priv->tx_skbuff_dma[entry].buf = 0;
A
Alexandre TORGUE 已提交
1386
			priv->tx_skbuff_dma[entry].len = 0;
G
Giuseppe CAVALLARO 已提交
1387
			priv->tx_skbuff_dma[entry].map_as_page = false;
1388
		}
A
Alexandre TORGUE 已提交
1389 1390 1391 1392

		if (priv->hw->mode->clean_desc3)
			priv->hw->mode->clean_desc3(priv, p);

1393
		priv->tx_skbuff_dma[entry].last_segment = false;
1394
		priv->tx_skbuff_dma[entry].is_jumbo = false;
1395 1396

		if (likely(skb != NULL)) {
B
Beniamino Galvani 已提交
1397 1398
			pkts_compl++;
			bytes_compl += skb->len;
1399
			dev_consume_skb_any(skb);
1400 1401 1402
			priv->tx_skbuff[entry] = NULL;
		}

1403
		priv->hw->desc->release_tx_desc(p, priv->mode);
1404

1405
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1406
	}
1407
	priv->dirty_tx = entry;
B
Beniamino Galvani 已提交
1408 1409 1410

	netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);

1411
	if (unlikely(netif_queue_stopped(priv->dev) &&
1412 1413 1414 1415
	    stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
		netif_dbg(priv, tx_done, priv->dev,
			  "%s: restart transmit\n", __func__);
		netif_wake_queue(priv->dev);
1416
	}
1417 1418 1419

	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
		stmmac_enable_eee_mode(priv);
G
Giuseppe CAVALLARO 已提交
1420
		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1421
	}
1422
	netif_tx_unlock(priv->dev);
1423 1424
}

1425
static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1426
{
1427
	priv->hw->dma->enable_dma_irq(priv->ioaddr);
1428 1429
}

1430
static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1431
{
1432
	priv->hw->dma->disable_dma_irq(priv->ioaddr);
1433 1434 1435
}

/**
1436
 * stmmac_tx_err - to manage the tx error
1437
 * @priv: driver private structure
1438
 * Description: it cleans the descriptors and restarts the transmission
1439
 * in case of transmission errors.
1440 1441 1442
 */
static void stmmac_tx_err(struct stmmac_priv *priv)
{
1443
	int i;
1444 1445
	netif_stop_queue(priv->dev);

1446
	priv->hw->dma->stop_tx(priv->ioaddr);
1447
	dma_free_tx_skbufs(priv);
1448
	for (i = 0; i < DMA_TX_SIZE; i++)
1449 1450 1451
		if (priv->extend_desc)
			priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
						     priv->mode,
1452
						     (i == DMA_TX_SIZE - 1));
1453 1454 1455
		else
			priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
						     priv->mode,
1456
						     (i == DMA_TX_SIZE - 1));
1457 1458
	priv->dirty_tx = 0;
	priv->cur_tx = 0;
B
Beniamino Galvani 已提交
1459
	netdev_reset_queue(priv->dev);
1460
	priv->hw->dma->start_tx(priv->ioaddr);
1461 1462 1463 1464 1465

	priv->dev->stats.tx_errors++;
	netif_wake_queue(priv->dev);
}

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
/**
 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
 *  @priv: driver private structure
 *  @txmode: TX operating mode
 *  @rxmode: RX operating mode
 *  @chan: channel index
 *  Description: it is used for configuring of the DMA operation mode in
 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
 *  mode.
 */
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
					  u32 rxmode, u32 chan)
{
	int rxfifosz = priv->plat->rx_fifo_size;

	if (rxfifosz == 0)
		rxfifosz = priv->dma_cap.rx_fifo_size;

	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
					   rxfifosz);
		priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
	} else {
		priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
					rxfifosz);
	}
}

1494
/**
1495
 * stmmac_dma_interrupt - DMA ISR
1496 1497
 * @priv: driver private structure
 * Description: this is the DMA ISR. It is called by the main ISR.
1498 1499
 * It calls the dwmac dma routine and schedule poll method in case of some
 * work can be done.
1500
 */
1501 1502
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
{
1503
	u32 chan = STMMAC_CHAN0;
1504
	int status;
1505

1506
	status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1507 1508 1509 1510 1511 1512 1513
	if (likely((status & handle_rx)) || (status & handle_tx)) {
		if (likely(napi_schedule_prep(&priv->napi))) {
			stmmac_disable_dma_irq(priv);
			__napi_schedule(&priv->napi);
		}
	}
	if (unlikely(status & tx_hard_error_bump_tc)) {
1514
		/* Try to bump up the dma threshold on this failure */
1515 1516
		if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
		    (tc <= 256)) {
1517
			tc += 64;
1518
			if (priv->plat->force_thresh_dma_mode)
1519 1520
				stmmac_set_dma_operation_mode(priv->ioaddr,
							      tc, tc, chan);
1521
			else
1522 1523 1524
				stmmac_set_dma_operation_mode(priv->ioaddr, tc,
							     SF_DMA_MODE, chan);

1525
			priv->xstats.threshold = tc;
1526
		}
1527 1528
	} else if (unlikely(status == tx_hard_error))
		stmmac_tx_err(priv);
1529 1530
}

1531 1532 1533 1534 1535
/**
 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
 * @priv: driver private structure
 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
 */
1536 1537 1538
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1539
			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1540

1541 1542
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
A
Alexandre TORGUE 已提交
1543
		priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1544 1545
	} else {
		priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
A
Alexandre TORGUE 已提交
1546
		priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1547
	}
1548 1549

	dwmac_mmc_intr_all_mask(priv->mmcaddr);
G
Giuseppe CAVALLARO 已提交
1550 1551

	if (priv->dma_cap.rmon) {
1552
		dwmac_mmc_ctrl(priv->mmcaddr, mode);
G
Giuseppe CAVALLARO 已提交
1553 1554
		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
	} else
1555
		netdev_info(priv->dev, "No MAC Management Counters available\n");
1556 1557
}

1558
/**
1559
 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1560 1561
 * @priv: driver private structure
 * Description: select the Enhanced/Alternate or Normal descriptors.
1562 1563
 * In case of Enhanced/Alternate, it checks if the extended descriptors are
 * supported by the HW capability register.
1564
 */
1565 1566 1567
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
	if (priv->plat->enh_desc) {
1568
		dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1569 1570 1571

		/* GMAC older than 3.50 has no extended descriptors */
		if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1572
			dev_info(priv->device, "Enabled extended descriptors\n");
1573 1574
			priv->extend_desc = 1;
		} else
1575
			dev_warn(priv->device, "Extended descriptors not supported\n");
1576

1577 1578
		priv->hw->desc = &enh_desc_ops;
	} else {
1579
		dev_info(priv->device, "Normal descriptors\n");
1580 1581 1582 1583 1584
		priv->hw->desc = &ndesc_ops;
	}
}

/**
1585
 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1586
 * @priv: driver private structure
1587 1588 1589 1590 1591
 * Description:
 *  new GMAC chip generations have a new register to indicate the
 *  presence of the optional feature/functions.
 *  This can be also used to override the value passed through the
 *  platform and necessary for old MAC10/100 and GMAC chips.
1592 1593 1594
 */
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
1595
	u32 ret = 0;
1596

1597
	if (priv->hw->dma->get_hw_feature) {
1598 1599 1600
		priv->hw->dma->get_hw_feature(priv->ioaddr,
					      &priv->dma_cap);
		ret = 1;
1601
	}
1602

1603
	return ret;
1604 1605
}

1606
/**
1607
 * stmmac_check_ether_addr - check if the MAC addr is valid
1608 1609 1610 1611 1612
 * @priv: driver private structure
 * Description:
 * it is to verify if the MAC address is valid, in case of failures it
 * generates a random MAC address
 */
1613 1614 1615
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1616
		priv->hw->mac->get_umac_addr(priv->hw,
1617
					     priv->dev->dev_addr, 0);
G
Giuseppe CAVALLARO 已提交
1618
		if (!is_valid_ether_addr(priv->dev->dev_addr))
1619
			eth_hw_addr_random(priv->dev);
1620 1621
		netdev_info(priv->dev, "device MAC address %pM\n",
			    priv->dev->dev_addr);
1622 1623 1624
	}
}

1625
/**
1626
 * stmmac_init_dma_engine - DMA init.
1627 1628 1629 1630 1631 1632
 * @priv: driver private structure
 * Description:
 * It inits the DMA invoking the specific MAC/GMAC callback.
 * Some DMA parameters can be passed from the platform;
 * in case of these are not passed a default is kept for the MAC or GMAC.
 */
1633 1634
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
1635
	int atds = 0;
1636
	int ret = 0;
1637

1638 1639
	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
		dev_err(priv->device, "Invalid DMA configuration\n");
1640
		return -EINVAL;
1641 1642
	}

1643 1644 1645
	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
		atds = 1;

1646 1647 1648 1649 1650 1651
	ret = priv->hw->dma->reset(priv->ioaddr);
	if (ret) {
		dev_err(priv->device, "Failed to reset the dma\n");
		return ret;
	}

1652
	priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1653
			    priv->dma_tx_phy, priv->dma_rx_phy, atds);
1654

A
Alexandre TORGUE 已提交
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->rx_tail_addr = priv->dma_rx_phy +
			    (DMA_RX_SIZE * sizeof(struct dma_desc));
		priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
					       STMMAC_CHAN0);

		priv->tx_tail_addr = priv->dma_tx_phy +
			    (DMA_TX_SIZE * sizeof(struct dma_desc));
		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
					       STMMAC_CHAN0);
	}

	if (priv->plat->axi && priv->hw->dma->axi)
1668 1669
		priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);

1670
	return ret;
1671 1672
}

1673
/**
1674
 * stmmac_tx_timer - mitigation sw timer for tx.
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
 * @data: data pointer
 * Description:
 * This is the timer handler to directly invoke the stmmac_tx_clean.
 */
static void stmmac_tx_timer(unsigned long data)
{
	struct stmmac_priv *priv = (struct stmmac_priv *)data;

	stmmac_tx_clean(priv);
}

/**
1687
 * stmmac_init_tx_coalesce - init tx mitigation options.
1688
 * @priv: driver private structure
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
 * Description:
 * This inits the transmit coalesce parameters: i.e. timer rate,
 * timer handler and default threshold used for enabling the
 * interrupt on completion bit.
 */
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
	priv->tx_coal_frames = STMMAC_TX_FRAMES;
	priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
	init_timer(&priv->txtimer);
	priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
	priv->txtimer.data = (unsigned long)priv;
	priv->txtimer.function = stmmac_tx_timer;
	add_timer(&priv->txtimer);
}

1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
/**
 *  stmmac_set_tx_queue_weight - Set TX queue weight
 *  @priv: driver private structure
 *  Description: It is used for setting TX queues weight
 */
static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 weight;
	u32 queue;

	for (queue = 0; queue < tx_queues_count; queue++) {
		weight = priv->plat->tx_queues_cfg[queue].weight;
		priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
	}
}

1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
/**
 *  stmmac_configure_cbs - Configure CBS in TX queue
 *  @priv: driver private structure
 *  Description: It is used for configuring CBS in AVB TX queues
 */
static void stmmac_configure_cbs(struct stmmac_priv *priv)
{
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
	u32 mode_to_use;
	u32 queue;

	for (queue = 0; queue < tx_queues_count; queue++) {
		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
		if (mode_to_use == MTL_QUEUE_DCB)
			continue;

		priv->hw->mac->config_cbs(priv->hw,
				priv->plat->tx_queues_cfg[queue].send_slope,
				priv->plat->tx_queues_cfg[queue].idle_slope,
				priv->plat->tx_queues_cfg[queue].high_credit,
				priv->plat->tx_queues_cfg[queue].low_credit,
				queue);
	}
}

1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
/**
 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
 *  @priv: driver private structure
 *  Description: It is used for mapping RX queues to RX dma channels
 */
static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 queue;
	u32 chan;

	for (queue = 0; queue < rx_queues_count; queue++) {
		chan = priv->plat->rx_queues_cfg[queue].chan;
		priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
	}
}

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
/**
 *  stmmac_mtl_configuration - Configure MTL
 *  @priv: driver private structure
 *  Description: It is used for configurring MTL
 */
static void stmmac_mtl_configuration(struct stmmac_priv *priv)
{
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 tx_queues_count = priv->plat->tx_queues_to_use;

1774 1775 1776
	if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
		stmmac_set_tx_queue_weight(priv);

1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
	/* Configure MTL RX algorithms */
	if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
		priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
						priv->plat->rx_sched_algorithm);

	/* Configure MTL TX algorithms */
	if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
		priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
						priv->plat->tx_sched_algorithm);

1787 1788 1789 1790
	/* Configure CBS in AVB TX queues */
	if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
		stmmac_configure_cbs(priv);

1791 1792 1793 1794
	/* Map RX MTL to DMA channels */
	if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
		stmmac_rx_queue_dma_chan_map(priv);

1795 1796 1797
	/* Enable MAC RX Queues */
	if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
		stmmac_mac_enable_rx_queues(priv);
1798 1799 1800

	/* Set the HW DMA mode and the COE */
	stmmac_dma_operation_mode(priv);
1801 1802
}

1803
/**
1804
 * stmmac_hw_setup - setup mac in a usable state.
1805 1806
 *  @dev : pointer to the device structure.
 *  Description:
1807 1808 1809 1810
 *  this is the main function to setup the HW in a usable state because the
 *  dma engine is reset, the core registers are configured (e.g. AXI,
 *  Checksum features, timers). The DMA is ready to start receiving and
 *  transmitting.
1811 1812 1813 1814
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
1815
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1816 1817 1818 1819 1820 1821 1822
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

	/* DMA initialization and SW reset */
	ret = stmmac_init_dma_engine(priv);
	if (ret < 0) {
1823 1824
		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
			   __func__);
1825 1826 1827 1828
		return ret;
	}

	/* Copy the MAC addr into the HW  */
1829
	priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1830

1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
	/* PS and related bits will be programmed according to the speed */
	if (priv->hw->pcs) {
		int speed = priv->plat->mac_port_sel_speed;

		if ((speed == SPEED_10) || (speed == SPEED_100) ||
		    (speed == SPEED_1000)) {
			priv->hw->ps = speed;
		} else {
			dev_warn(priv->device, "invalid port speed\n");
			priv->hw->ps = 0;
		}
	}

1844
	/* Initialize the MAC Core */
1845
	priv->hw->mac->core_init(priv->hw, dev->mtu);
1846

1847 1848 1849
	/* Initialize MTL*/
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
		stmmac_mtl_configuration(priv);
J
jpinto 已提交
1850

1851 1852
	ret = priv->hw->mac->rx_ipc(priv->hw);
	if (!ret) {
1853
		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1854
		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1855
		priv->hw->rx_csum = 0;
1856 1857
	}

1858
	/* Enable the MAC Rx/Tx */
A
Alexandre TORGUE 已提交
1859 1860 1861 1862
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
		stmmac_dwmac4_set_mac(priv->ioaddr, true);
	else
		stmmac_set_mac(priv->ioaddr, true);
1863 1864 1865

	stmmac_mmc_setup(priv);

1866
	if (init_ptp) {
1867 1868 1869 1870
		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
		if (ret < 0)
			netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);

1871
		ret = stmmac_init_ptp(priv);
1872 1873 1874 1875
		if (ret == -EOPNOTSUPP)
			netdev_warn(priv->dev, "PTP not supported by HW\n");
		else if (ret)
			netdev_warn(priv->dev, "PTP init failed\n");
1876
	}
1877

1878
#ifdef CONFIG_DEBUG_FS
1879 1880
	ret = stmmac_init_fs(dev);
	if (ret < 0)
1881 1882
		netdev_warn(priv->dev, "%s: failed debugFS registration\n",
			    __func__);
1883 1884
#endif
	/* Start the ball rolling... */
1885
	netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
	priv->hw->dma->start_tx(priv->ioaddr);
	priv->hw->dma->start_rx(priv->ioaddr);

	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;

	if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
		priv->rx_riwt = MAX_DMA_RIWT;
		priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
	}

1896
	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1897
		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1898

A
Alexandre TORGUE 已提交
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
	/*  set TX ring length */
	if (priv->hw->dma->set_tx_ring_len)
		priv->hw->dma->set_tx_ring_len(priv->ioaddr,
					       (DMA_TX_SIZE - 1));
	/*  set RX ring length */
	if (priv->hw->dma->set_rx_ring_len)
		priv->hw->dma->set_rx_ring_len(priv->ioaddr,
					       (DMA_RX_SIZE - 1));
	/* Enable TSO */
	if (priv->tso)
		priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);

1911 1912 1913
	return 0;
}

1914 1915 1916 1917 1918 1919 1920
static void stmmac_hw_teardown(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	clk_disable_unprepare(priv->plat->clk_ptp_ref);
}

1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
/**
 *  stmmac_open - open entry point of the driver
 *  @dev : pointer to the device structure.
 *  Description:
 *  This function is the open entry point of the driver.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_open(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret;

1935 1936
	stmmac_check_ether_addr(priv);

1937 1938 1939
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
1940 1941
		ret = stmmac_init_phy(dev);
		if (ret) {
1942 1943 1944
			netdev_err(priv->dev,
				   "%s: Cannot attach to PHY (error: %d)\n",
				   __func__, ret);
1945
			return ret;
1946
		}
1947
	}
1948

1949 1950 1951 1952
	/* Extra statistics */
	memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
	priv->xstats.threshold = tc;

1953
	priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1954
	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1955

1956
	ret = alloc_dma_desc_resources(priv);
1957
	if (ret < 0) {
1958 1959
		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
			   __func__);
1960 1961 1962
		goto dma_desc_error;
	}

1963 1964
	ret = init_dma_desc_rings(dev, GFP_KERNEL);
	if (ret < 0) {
1965 1966
		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
			   __func__);
1967 1968 1969
		goto init_error;
	}

1970
	ret = stmmac_hw_setup(dev, true);
1971
	if (ret < 0) {
1972
		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1973
		goto init_error;
1974 1975
	}

1976 1977
	stmmac_init_tx_coalesce(priv);

1978 1979
	if (dev->phydev)
		phy_start(dev->phydev);
1980

1981 1982
	/* Request the IRQ lines */
	ret = request_irq(dev->irq, stmmac_interrupt,
G
Giuseppe CAVALLARO 已提交
1983
			  IRQF_SHARED, dev->name, dev);
1984
	if (unlikely(ret < 0)) {
1985 1986 1987
		netdev_err(priv->dev,
			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
			   __func__, dev->irq, ret);
1988
		goto irq_error;
1989 1990
	}

1991 1992 1993 1994 1995
	/* Request the Wake IRQ in case of another line is used for WoL */
	if (priv->wol_irq != dev->irq) {
		ret = request_irq(priv->wol_irq, stmmac_interrupt,
				  IRQF_SHARED, dev->name, dev);
		if (unlikely(ret < 0)) {
1996 1997 1998
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
				   __func__, priv->wol_irq, ret);
1999
			goto wolirq_error;
2000 2001 2002
		}
	}

2003
	/* Request the IRQ lines */
2004
	if (priv->lpi_irq > 0) {
2005 2006 2007
		ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
				  dev->name, dev);
		if (unlikely(ret < 0)) {
2008 2009 2010
			netdev_err(priv->dev,
				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
				   __func__, priv->lpi_irq, ret);
2011
			goto lpiirq_error;
2012 2013 2014
		}
	}

2015 2016
	napi_enable(&priv->napi);
	netif_start_queue(dev);
2017

2018
	return 0;
2019

2020
lpiirq_error:
2021 2022
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2023
wolirq_error:
2024
	free_irq(dev->irq, dev);
2025 2026 2027
irq_error:
	if (dev->phydev)
		phy_stop(dev->phydev);
2028

2029
	del_timer_sync(&priv->txtimer);
2030
	stmmac_hw_teardown(dev);
2031 2032
init_error:
	free_dma_desc_resources(priv);
2033
dma_desc_error:
2034 2035
	if (dev->phydev)
		phy_disconnect(dev->phydev);
2036

2037
	return ret;
2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049
}

/**
 *  stmmac_release - close entry point of the driver
 *  @dev : device pointer.
 *  Description:
 *  This is the stop entry point of the driver.
 */
static int stmmac_release(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

2050 2051 2052
	if (priv->eee_enabled)
		del_timer_sync(&priv->eee_ctrl_timer);

2053
	/* Stop and disconnect the PHY */
2054 2055 2056
	if (dev->phydev) {
		phy_stop(dev->phydev);
		phy_disconnect(dev->phydev);
2057 2058 2059 2060 2061 2062
	}

	netif_stop_queue(dev);

	napi_disable(&priv->napi);

2063 2064
	del_timer_sync(&priv->txtimer);

2065 2066
	/* Free the IRQ lines */
	free_irq(dev->irq, dev);
2067 2068
	if (priv->wol_irq != dev->irq)
		free_irq(priv->wol_irq, dev);
2069
	if (priv->lpi_irq > 0)
2070
		free_irq(priv->lpi_irq, dev);
2071 2072

	/* Stop TX/RX DMA and clear the descriptors */
2073 2074
	priv->hw->dma->stop_tx(priv->ioaddr);
	priv->hw->dma->stop_rx(priv->ioaddr);
2075 2076 2077 2078

	/* Release and free the Rx/Tx resources */
	free_dma_desc_resources(priv);

2079
	/* Disable the MAC Rx/Tx */
2080
	stmmac_set_mac(priv->ioaddr, false);
2081 2082 2083

	netif_carrier_off(dev);

2084
#ifdef CONFIG_DEBUG_FS
2085
	stmmac_exit_fs(dev);
2086 2087
#endif

2088 2089
	stmmac_release_ptp(priv);

2090 2091 2092
	return 0;
}

A
Alexandre TORGUE 已提交
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
/**
 *  stmmac_tso_allocator - close entry point of the driver
 *  @priv: driver private structure
 *  @des: buffer start address
 *  @total_len: total length to fill in descriptors
 *  @last_segmant: condition for the last descriptor
 *  Description:
 *  This function fills descriptor and request new descriptors according to
 *  buffer length to fill
 */
static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
				 int total_len, bool last_segment)
{
	struct dma_desc *desc;
	int tmp_len;
	u32 buff_size;

	tmp_len = total_len;

	while (tmp_len > 0) {
		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
		desc = priv->dma_tx + priv->cur_tx;

2116
		desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
A
Alexandre TORGUE 已提交
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
			    TSO_MAX_BUFF_SIZE : tmp_len;

		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
			0, 1,
			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
			0, 0);

		tmp_len -= TSO_MAX_BUFF_SIZE;
	}
}

/**
 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
 *  @skb : the socket buffer
 *  @dev : device pointer
 *  Description: this is the transmit function that is called on TSO frames
 *  (support available on GMAC4 and newer chips).
 *  Diagram below show the ring programming in case of TSO frames:
 *
 *  First Descriptor
 *   --------
 *   | DES0 |---> buffer1 = L2/L3/L4 header
 *   | DES1 |---> TCP Payload (can continue on next descr...)
 *   | DES2 |---> buffer 1 and 2 len
 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
 *   --------
 *	|
 *     ...
 *	|
 *   --------
 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
 *   | DES1 | --|
 *   | DES2 | --> buffer 1 and 2 len
 *   | DES3 |
 *   --------
 *
 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
 */
static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
	u32 pay_len, mss;
	int tmp_pay_len = 0;
	struct stmmac_priv *priv = netdev_priv(dev);
	int nfrags = skb_shinfo(skb)->nr_frags;
	unsigned int first_entry, des;
	struct dma_desc *desc, *first, *mss_desc = NULL;
	u8 proto_hdr_len;
	int i;

	/* Compute header lengths */
	proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	/* Desc availability based on threshold should be enough safe */
	if (unlikely(stmmac_tx_avail(priv) <
		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
		if (!netif_queue_stopped(dev)) {
			netif_stop_queue(dev);
			/* This is a hard error, log it. */
2176 2177 2178
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
A
Alexandre TORGUE 已提交
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
		}
		return NETDEV_TX_BUSY;
	}

	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */

	mss = skb_shinfo(skb)->gso_size;

	/* set new MSS value if needed */
	if (mss != priv->mss) {
		mss_desc = priv->dma_tx + priv->cur_tx;
		priv->hw->desc->set_mss(mss_desc, mss);
		priv->mss = mss;
		priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
	}

	if (netif_msg_tx_queued(priv)) {
		pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
			__func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
			skb->data_len);
	}

	first_entry = priv->cur_tx;

	desc = priv->dma_tx + first_entry;
	first = desc;

	/* first descriptor: fill Headers on Buf1 */
	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
			     DMA_TO_DEVICE);
	if (dma_mapping_error(priv->device, des))
		goto dma_map_err;

	priv->tx_skbuff_dma[first_entry].buf = des;
	priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
	priv->tx_skbuff[first_entry] = skb;

2217
	first->des0 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2218 2219 2220

	/* Fill start of payload in buff2 of first descriptor */
	if (pay_len)
2221
		first->des1 = cpu_to_le32(des + proto_hdr_len);
A
Alexandre TORGUE 已提交
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234

	/* If needed take extra descriptors to fill the remaining payload */
	tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;

	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));

	/* Prepare fragments */
	for (i = 0; i < nfrags; i++) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		des = skb_frag_dma_map(priv->device, frag, 0,
				       skb_frag_size(frag),
				       DMA_TO_DEVICE);
2235 2236
		if (dma_mapping_error(priv->device, des))
			goto dma_map_err;
A
Alexandre TORGUE 已提交
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251

		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
				     (i == nfrags - 1));

		priv->tx_skbuff_dma[priv->cur_tx].buf = des;
		priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
		priv->tx_skbuff[priv->cur_tx] = NULL;
		priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
	}

	priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;

	priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);

	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2252 2253
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
A
Alexandre TORGUE 已提交
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296
		netif_stop_queue(dev);
	}

	dev->stats.tx_bytes += skb->len;
	priv->xstats.tx_tso_frames++;
	priv->xstats.tx_tso_nfrags += nfrags;

	/* Manage tx mitigation */
	priv->tx_count_frames += nfrags + 1;
	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
		mod_timer(&priv->txtimer,
			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
	} else {
		priv->tx_count_frames = 0;
		priv->hw->desc->set_tx_ic(desc);
		priv->xstats.tx_set_ic_bit++;
	}

	if (!priv->hwts_tx_en)
		skb_tx_timestamp(skb);

	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
		     priv->hwts_tx_en)) {
		/* declare that device is doing timestamping */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
		priv->hw->desc->enable_tx_timestamp(first);
	}

	/* Complete the first descriptor before granting the DMA */
	priv->hw->desc->prepare_tso_tx_desc(first, 1,
			proto_hdr_len,
			pay_len,
			1, priv->tx_skbuff_dma[first_entry].last_segment,
			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));

	/* If context desc is used to change MSS */
	if (mss_desc)
		priv->hw->desc->set_tx_owner(mss_desc);

	/* The own bit must be the latest setting done when prepare the
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
P
Pavel Machek 已提交
2297
	dma_wmb();
A
Alexandre TORGUE 已提交
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324

	if (netif_msg_pktdata(priv)) {
		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
			__func__, priv->cur_tx, priv->dirty_tx, first_entry,
			priv->cur_tx, first, nfrags);

		priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
					     0);

		pr_info(">>> frame to be transmitted: ");
		print_pkt(skb->data, skb_headlen(skb));
	}

	netdev_sent_queue(dev, skb->len);

	priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
				       STMMAC_CHAN0);

	return NETDEV_TX_OK;

dma_map_err:
	dev_err(priv->device, "Tx dma map failed\n");
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
	return NETDEV_TX_OK;
}

2325
/**
2326
 *  stmmac_xmit - Tx entry point of the driver
2327 2328
 *  @skb : the socket buffer
 *  @dev : device pointer
2329 2330 2331
 *  Description : this is the tx entry point of the driver.
 *  It programs the chain or the ring and supports oversized frames
 *  and SG feature.
2332 2333 2334 2335
 */
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);
2336
	unsigned int nopaged_len = skb_headlen(skb);
2337
	int i, csum_insertion = 0, is_jumbo = 0;
2338
	int nfrags = skb_shinfo(skb)->nr_frags;
2339
	unsigned int entry, first_entry;
2340
	struct dma_desc *desc, *first;
2341
	unsigned int enh_desc;
A
Alexandre TORGUE 已提交
2342 2343 2344 2345 2346 2347 2348
	unsigned int des;

	/* Manage oversized TCP frames for GMAC4 device */
	if (skb_is_gso(skb) && priv->tso) {
		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
			return stmmac_tso_xmit(skb, dev);
	}
2349 2350 2351 2352 2353

	if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
		if (!netif_queue_stopped(dev)) {
			netif_stop_queue(dev);
			/* This is a hard error, log it. */
2354 2355 2356
			netdev_err(priv->dev,
				   "%s: Tx Ring full when queue awake\n",
				   __func__);
2357 2358 2359 2360
		}
		return NETDEV_TX_BUSY;
	}

2361 2362 2363
	if (priv->tx_path_in_lpi_mode)
		stmmac_disable_eee_mode(priv);

2364
	entry = priv->cur_tx;
2365
	first_entry = entry;
2366

2367
	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2368

2369
	if (likely(priv->extend_desc))
G
Giuseppe CAVALLARO 已提交
2370
		desc = (struct dma_desc *)(priv->dma_etx + entry);
2371 2372 2373
	else
		desc = priv->dma_tx + entry;

2374 2375
	first = desc;

2376 2377 2378
	priv->tx_skbuff[first_entry] = skb;

	enh_desc = priv->plat->enh_desc;
2379
	/* To program the descriptors according to the size of the frame */
G
Giuseppe CAVALLARO 已提交
2380 2381 2382
	if (enh_desc)
		is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);

A
Alexandre TORGUE 已提交
2383 2384
	if (unlikely(is_jumbo) && likely(priv->synopsys_id <
					 DWMAC_CORE_4_00)) {
G
Giuseppe CAVALLARO 已提交
2385
		entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
G
Giuseppe CAVALLARO 已提交
2386 2387
		if (unlikely(entry < 0))
			goto dma_map_err;
G
Giuseppe CAVALLARO 已提交
2388
	}
2389 2390

	for (i = 0; i < nfrags; i++) {
E
Eric Dumazet 已提交
2391 2392
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);
2393
		bool last_segment = (i == (nfrags - 1));
2394

2395 2396
		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);

2397
		if (likely(priv->extend_desc))
G
Giuseppe CAVALLARO 已提交
2398
			desc = (struct dma_desc *)(priv->dma_etx + entry);
2399 2400
		else
			desc = priv->dma_tx + entry;
2401

A
Alexandre TORGUE 已提交
2402 2403 2404
		des = skb_frag_dma_map(priv->device, frag, 0, len,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
G
Giuseppe CAVALLARO 已提交
2405 2406
			goto dma_map_err; /* should reuse desc w/o issues */

2407
		priv->tx_skbuff[entry] = NULL;
A
Alexandre TORGUE 已提交
2408

2409 2410 2411 2412 2413
		priv->tx_skbuff_dma[entry].buf = des;
		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			desc->des0 = cpu_to_le32(des);
		else
			desc->des2 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2414

G
Giuseppe CAVALLARO 已提交
2415
		priv->tx_skbuff_dma[entry].map_as_page = true;
2416
		priv->tx_skbuff_dma[entry].len = len;
2417 2418 2419
		priv->tx_skbuff_dma[entry].last_segment = last_segment;

		/* Prepare the descriptor and set the own bit too */
2420
		priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2421
						priv->mode, 1, last_segment);
2422 2423
	}

2424 2425 2426
	entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);

	priv->cur_tx = entry;
2427 2428

	if (netif_msg_pktdata(priv)) {
2429 2430
		void *tx_head;

2431 2432 2433 2434
		netdev_dbg(priv->dev,
			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
			   __func__, priv->cur_tx, priv->dirty_tx, first_entry,
			   entry, first, nfrags);
2435

2436
		if (priv->extend_desc)
2437
			tx_head = (void *)priv->dma_etx;
2438
		else
2439 2440 2441
			tx_head = (void *)priv->dma_tx;

		priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2442

2443
		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2444 2445
		print_pkt(skb->data, skb->len);
	}
2446

2447
	if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2448 2449
		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
			  __func__);
2450 2451 2452 2453 2454
		netif_stop_queue(dev);
	}

	dev->stats.tx_bytes += skb->len;

2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
	/* According to the coalesce parameter the IC bit for the latest
	 * segment is reset and the timer re-started to clean the tx status.
	 * This approach takes care about the fragments: desc is the first
	 * element in case of no SG.
	 */
	priv->tx_count_frames += nfrags + 1;
	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
		mod_timer(&priv->txtimer,
			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
	} else {
		priv->tx_count_frames = 0;
		priv->hw->desc->set_tx_ic(desc);
		priv->xstats.tx_set_ic_bit++;
2468 2469 2470 2471
	}

	if (!priv->hwts_tx_en)
		skb_tx_timestamp(skb);
2472

2473 2474 2475 2476 2477 2478 2479
	/* Ready to fill the first descriptor and set the OWN bit w/o any
	 * problems because all the descriptors are actually ready to be
	 * passed to the DMA engine.
	 */
	if (likely(!is_jumbo)) {
		bool last_segment = (nfrags == 0);

A
Alexandre TORGUE 已提交
2480 2481 2482
		des = dma_map_single(priv->device, skb->data,
				     nopaged_len, DMA_TO_DEVICE);
		if (dma_mapping_error(priv->device, des))
2483 2484
			goto dma_map_err;

2485 2486 2487 2488 2489
		priv->tx_skbuff_dma[first_entry].buf = des;
		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			first->des0 = cpu_to_le32(des);
		else
			first->des2 = cpu_to_le32(des);
A
Alexandre TORGUE 已提交
2490

2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
		priv->tx_skbuff_dma[first_entry].len = nopaged_len;
		priv->tx_skbuff_dma[first_entry].last_segment = last_segment;

		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
			     priv->hwts_tx_en)) {
			/* declare that device is doing timestamping */
			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
			priv->hw->desc->enable_tx_timestamp(first);
		}

		/* Prepare the first descriptor setting the OWN bit too */
		priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
						csum_insertion, priv->mode, 1,
						last_segment);

		/* The own bit must be the latest setting done when prepare the
		 * descriptor and then barrier is needed to make sure that
		 * all is coherent before granting the DMA engine.
		 */
P
Pavel Machek 已提交
2510
		dma_wmb();
2511 2512
	}

B
Beniamino Galvani 已提交
2513
	netdev_sent_queue(dev, skb->len);
A
Alexandre TORGUE 已提交
2514 2515 2516 2517 2518 2519

	if (priv->synopsys_id < DWMAC_CORE_4_00)
		priv->hw->dma->enable_dma_transmission(priv->ioaddr);
	else
		priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
					       STMMAC_CHAN0);
2520

G
Giuseppe CAVALLARO 已提交
2521
	return NETDEV_TX_OK;
2522

G
Giuseppe CAVALLARO 已提交
2523
dma_map_err:
2524
	netdev_err(priv->dev, "Tx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
2525 2526
	dev_kfree_skb(skb);
	priv->dev->stats.tx_dropped++;
2527 2528 2529
	return NETDEV_TX_OK;
}

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
	struct ethhdr *ehdr;
	u16 vlanid;

	if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
	    NETIF_F_HW_VLAN_CTAG_RX &&
	    !__vlan_get_tag(skb, &vlanid)) {
		/* pop the vlan tag */
		ehdr = (struct ethhdr *)skb->data;
		memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
		skb_pull(skb, VLAN_HLEN);
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
	}
}


2547 2548 2549 2550 2551 2552 2553 2554
static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
{
	if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
		return 0;

	return 1;
}

2555
/**
2556
 * stmmac_rx_refill - refill used skb preallocated buffers
2557 2558 2559 2560
 * @priv: driver private structure
 * Description : this is to reallocate the skb for the reception process
 * that is based on zero-copy.
 */
2561 2562 2563
static inline void stmmac_rx_refill(struct stmmac_priv *priv)
{
	int bfsize = priv->dma_buf_sz;
2564 2565
	unsigned int entry = priv->dirty_rx;
	int dirty = stmmac_rx_dirty(priv);
2566

2567
	while (dirty-- > 0) {
2568 2569 2570
		struct dma_desc *p;

		if (priv->extend_desc)
G
Giuseppe CAVALLARO 已提交
2571
			p = (struct dma_desc *)(priv->dma_erx + entry);
2572 2573 2574
		else
			p = priv->dma_rx + entry;

2575 2576 2577
		if (likely(priv->rx_skbuff[entry] == NULL)) {
			struct sk_buff *skb;

E
Eric Dumazet 已提交
2578
			skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2579 2580 2581 2582 2583 2584 2585
			if (unlikely(!skb)) {
				/* so for a while no zero-copy! */
				priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
				if (unlikely(net_ratelimit()))
					dev_err(priv->device,
						"fail to alloc skb entry %d\n",
						entry);
2586
				break;
2587
			}
2588 2589 2590 2591 2592

			priv->rx_skbuff[entry] = skb;
			priv->rx_skbuff_dma[entry] =
			    dma_map_single(priv->device, skb->data, bfsize,
					   DMA_FROM_DEVICE);
G
Giuseppe CAVALLARO 已提交
2593 2594
			if (dma_mapping_error(priv->device,
					      priv->rx_skbuff_dma[entry])) {
2595
				netdev_err(priv->dev, "Rx DMA map failed\n");
G
Giuseppe CAVALLARO 已提交
2596 2597 2598
				dev_kfree_skb(skb);
				break;
			}
2599

A
Alexandre TORGUE 已提交
2600
			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2601
				p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
A
Alexandre TORGUE 已提交
2602 2603
				p->des1 = 0;
			} else {
2604
				p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
A
Alexandre TORGUE 已提交
2605 2606 2607
			}
			if (priv->hw->mode->refill_desc3)
				priv->hw->mode->refill_desc3(priv, p);
2608

2609 2610 2611
			if (priv->rx_zeroc_thresh > 0)
				priv->rx_zeroc_thresh--;

2612 2613
			netif_dbg(priv, rx_status, priv->dev,
				  "refill entry #%d\n", entry);
2614
		}
P
Pavel Machek 已提交
2615
		dma_wmb();
A
Alexandre TORGUE 已提交
2616 2617 2618 2619 2620 2621

		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
		else
			priv->hw->desc->set_rx_owner(p);

P
Pavel Machek 已提交
2622
		dma_wmb();
2623 2624

		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2625
	}
2626
	priv->dirty_rx = entry;
2627 2628
}

2629
/**
2630
 * stmmac_rx - manage the receive process
2631 2632 2633 2634 2635
 * @priv: driver private structure
 * @limit: napi bugget.
 * Description :  this the function called by the napi poll method.
 * It gets all the frames inside the ring.
 */
2636 2637
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
2638
	unsigned int entry = priv->cur_rx;
2639 2640
	unsigned int next_entry;
	unsigned int count = 0;
2641
	int coe = priv->hw->rx_csum;
2642

2643
	if (netif_msg_rx_status(priv)) {
2644 2645
		void *rx_head;

2646
		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2647
		if (priv->extend_desc)
2648
			rx_head = (void *)priv->dma_erx;
2649
		else
2650 2651 2652
			rx_head = (void *)priv->dma_rx;

		priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2653
	}
2654
	while (count < limit) {
2655
		int status;
2656
		struct dma_desc *p;
2657
		struct dma_desc *np;
2658

2659
		if (priv->extend_desc)
G
Giuseppe CAVALLARO 已提交
2660
			p = (struct dma_desc *)(priv->dma_erx + entry);
2661
		else
G
Giuseppe CAVALLARO 已提交
2662
			p = priv->dma_rx + entry;
2663

2664 2665 2666 2667 2668
		/* read the status of the incoming frame */
		status = priv->hw->desc->rx_status(&priv->dev->stats,
						   &priv->xstats, p);
		/* check if managed by the DMA otherwise go ahead */
		if (unlikely(status & dma_own))
2669 2670 2671 2672
			break;

		count++;

2673 2674 2675
		priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
		next_entry = priv->cur_rx;

2676
		if (priv->extend_desc)
2677
			np = (struct dma_desc *)(priv->dma_erx + next_entry);
2678
		else
2679 2680 2681
			np = priv->dma_rx + next_entry;

		prefetch(np);
2682

2683 2684 2685 2686 2687
		if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
			priv->hw->desc->rx_extended_status(&priv->dev->stats,
							   &priv->xstats,
							   priv->dma_erx +
							   entry);
2688
		if (unlikely(status == discard_frame)) {
2689
			priv->dev->stats.rx_errors++;
2690
			if (priv->hwts_rx_en && !priv->extend_desc) {
2691
				/* DESC2 & DESC3 will be overwritten by device
2692 2693 2694 2695 2696 2697
				 * with timestamp value, hence reinitialize
				 * them in stmmac_rx_refill() function so that
				 * device can reuse it.
				 */
				priv->rx_skbuff[entry] = NULL;
				dma_unmap_single(priv->device,
G
Giuseppe CAVALLARO 已提交
2698 2699 2700
						 priv->rx_skbuff_dma[entry],
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
2701 2702
			}
		} else {
2703
			struct sk_buff *skb;
2704
			int frame_len;
A
Alexandre TORGUE 已提交
2705 2706 2707
			unsigned int des;

			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2708
				des = le32_to_cpu(p->des0);
A
Alexandre TORGUE 已提交
2709
			else
2710
				des = le32_to_cpu(p->des2);
2711

G
Giuseppe CAVALLARO 已提交
2712 2713
			frame_len = priv->hw->desc->get_rx_frame_len(p, coe);

2714
			/*  If frame length is greater than skb buffer size
A
Alexandre TORGUE 已提交
2715 2716 2717
			 *  (preallocated during init) then the packet is
			 *  ignored
			 */
2718
			if (frame_len > priv->dma_buf_sz) {
2719 2720 2721
				netdev_err(priv->dev,
					   "len %d larger than size (%d)\n",
					   frame_len, priv->dma_buf_sz);
2722 2723 2724 2725
				priv->dev->stats.rx_length_errors++;
				break;
			}

2726
			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
G
Giuseppe CAVALLARO 已提交
2727 2728
			 * Type frames (LLC/LLC-SNAP)
			 */
2729 2730
			if (unlikely(status != llc_snap))
				frame_len -= ETH_FCS_LEN;
2731

2732
			if (netif_msg_rx_status(priv)) {
2733 2734
				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
					   p, entry, des);
2735
				if (frame_len > ETH_FRAME_LEN)
2736 2737
					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
						   frame_len, status);
2738
			}
2739

A
Alexandre TORGUE 已提交
2740 2741 2742 2743 2744 2745 2746
			/* The zero-copy is always used for all the sizes
			 * in case of GMAC4 because it needs
			 * to refill the used descriptors, always.
			 */
			if (unlikely(!priv->plat->has_gmac4 &&
				     ((frame_len < priv->rx_copybreak) ||
				     stmmac_rx_threshold_count(priv)))) {
2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
				skb = netdev_alloc_skb_ip_align(priv->dev,
								frame_len);
				if (unlikely(!skb)) {
					if (net_ratelimit())
						dev_warn(priv->device,
							 "packet dropped\n");
					priv->dev->stats.rx_dropped++;
					break;
				}

				dma_sync_single_for_cpu(priv->device,
							priv->rx_skbuff_dma
							[entry], frame_len,
							DMA_FROM_DEVICE);
				skb_copy_to_linear_data(skb,
							priv->
							rx_skbuff[entry]->data,
							frame_len);

				skb_put(skb, frame_len);
				dma_sync_single_for_device(priv->device,
							   priv->rx_skbuff_dma
							   [entry], frame_len,
							   DMA_FROM_DEVICE);
			} else {
				skb = priv->rx_skbuff[entry];
				if (unlikely(!skb)) {
2774 2775 2776
					netdev_err(priv->dev,
						   "%s: Inconsistent Rx chain\n",
						   priv->dev->name);
2777 2778 2779 2780 2781
					priv->dev->stats.rx_dropped++;
					break;
				}
				prefetch(skb->data - NET_IP_ALIGN);
				priv->rx_skbuff[entry] = NULL;
2782
				priv->rx_zeroc_thresh++;
2783 2784 2785 2786 2787 2788

				skb_put(skb, frame_len);
				dma_unmap_single(priv->device,
						 priv->rx_skbuff_dma[entry],
						 priv->dma_buf_sz,
						 DMA_FROM_DEVICE);
2789 2790 2791
			}

			if (netif_msg_pktdata(priv)) {
2792 2793
				netdev_dbg(priv->dev, "frame received (%dbytes)",
					   frame_len);
2794 2795
				print_pkt(skb->data, frame_len);
			}
2796

2797 2798
			stmmac_get_rx_hwtstamp(priv, p, np, skb);

2799 2800
			stmmac_rx_vlan(priv->dev, skb);

2801 2802
			skb->protocol = eth_type_trans(skb, priv->dev);

G
Giuseppe CAVALLARO 已提交
2803
			if (unlikely(!coe))
2804
				skb_checksum_none_assert(skb);
2805
			else
2806
				skb->ip_summed = CHECKSUM_UNNECESSARY;
2807 2808

			napi_gro_receive(&priv->napi, skb);
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828

			priv->dev->stats.rx_packets++;
			priv->dev->stats.rx_bytes += frame_len;
		}
		entry = next_entry;
	}

	stmmac_rx_refill(priv);

	priv->xstats.rx_pkt_n += count;

	return count;
}

/**
 *  stmmac_poll - stmmac poll method (NAPI)
 *  @napi : pointer to the napi structure.
 *  @budget : maximum number of packets that the current CPU can receive from
 *	      all interfaces.
 *  Description :
2829
 *  To look at the incoming frames and clear the tx resources.
2830 2831 2832 2833 2834 2835
 */
static int stmmac_poll(struct napi_struct *napi, int budget)
{
	struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
	int work_done = 0;

2836 2837
	priv->xstats.napi_poll++;
	stmmac_tx_clean(priv);
2838

2839
	work_done = stmmac_rx(priv, budget);
2840
	if (work_done < budget) {
2841
		napi_complete_done(napi, work_done);
2842
		stmmac_enable_dma_irq(priv);
2843 2844 2845 2846 2847 2848 2849 2850
	}
	return work_done;
}

/**
 *  stmmac_tx_timeout
 *  @dev : Pointer to net device structure
 *  Description: this function is called when a packet transmission fails to
2851
 *   complete within a reasonable time. The driver will mark the error in the
2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
 *   netdev structure and arrange for the device to be reset to a sane state
 *   in order to transmit a new packet.
 */
static void stmmac_tx_timeout(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	/* Clear Tx resources and restart transmitting again */
	stmmac_tx_err(priv);
}

/**
2864
 *  stmmac_set_rx_mode - entry point for multicast addressing
2865 2866 2867 2868 2869 2870 2871
 *  @dev : pointer to the device structure
 *  Description:
 *  This function is a driver entry point which gets called by the kernel
 *  whenever multicast addresses must be enabled/disabled.
 *  Return value:
 *  void.
 */
2872
static void stmmac_set_rx_mode(struct net_device *dev)
2873 2874 2875
{
	struct stmmac_priv *priv = netdev_priv(dev);

2876
	priv->hw->mac->set_filter(priv->hw, dev);
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891
}

/**
 *  stmmac_change_mtu - entry point to change MTU size for the device.
 *  @dev : device pointer.
 *  @new_mtu : the new MTU size for the device.
 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
 *  Return value:
 *  0 on success and an appropriate (-)ve integer as defined in errno.h
 *  file on failure.
 */
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
2892 2893
	struct stmmac_priv *priv = netdev_priv(dev);

2894
	if (netif_running(dev)) {
2895
		netdev_err(priv->dev, "must be stopped to change its MTU\n");
2896 2897 2898
		return -EBUSY;
	}

2899
	dev->mtu = new_mtu;
A
Alexandre TORGUE 已提交
2900

2901 2902 2903 2904 2905
	netdev_update_features(dev);

	return 0;
}

2906
static netdev_features_t stmmac_fix_features(struct net_device *dev,
G
Giuseppe CAVALLARO 已提交
2907
					     netdev_features_t features)
2908 2909 2910
{
	struct stmmac_priv *priv = netdev_priv(dev);

2911
	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2912
		features &= ~NETIF_F_RXCSUM;
2913

2914
	if (!priv->plat->tx_coe)
2915
		features &= ~NETIF_F_CSUM_MASK;
2916

2917 2918 2919
	/* Some GMAC devices have a bugged Jumbo frame support that
	 * needs to have the Tx COE disabled for oversized frames
	 * (due to limited buffer sizes). In this case we disable
2920
	 * the TX csum insertion in the TDES and not use SF.
G
Giuseppe CAVALLARO 已提交
2921
	 */
2922
	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2923
		features &= ~NETIF_F_CSUM_MASK;
2924

A
Alexandre TORGUE 已提交
2925 2926 2927 2928 2929 2930 2931 2932
	/* Disable tso if asked by ethtool */
	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		if (features & NETIF_F_TSO)
			priv->tso = true;
		else
			priv->tso = false;
	}

2933
	return features;
2934 2935
}

2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
static int stmmac_set_features(struct net_device *netdev,
			       netdev_features_t features)
{
	struct stmmac_priv *priv = netdev_priv(netdev);

	/* Keep the COE Type in case of csum is supporting */
	if (features & NETIF_F_RXCSUM)
		priv->hw->rx_csum = priv->plat->rx_coe;
	else
		priv->hw->rx_csum = 0;
	/* No check needed because rx_coe has been set before and it will be
	 * fixed in case of issue.
	 */
	priv->hw->mac->rx_ipc(priv->hw);

	return 0;
}

2954 2955 2956 2957 2958
/**
 *  stmmac_interrupt - main ISR
 *  @irq: interrupt number.
 *  @dev_id: to pass the net device pointer.
 *  Description: this is the main driver interrupt service routine.
2959 2960 2961 2962 2963
 *  It can call:
 *  o DMA service routine (to manage incoming frame reception and transmission
 *    status)
 *  o Core interrupts to manage: remote wake-up, management counter, LPI
 *    interrupts.
2964
 */
2965 2966 2967 2968 2969
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
{
	struct net_device *dev = (struct net_device *)dev_id;
	struct stmmac_priv *priv = netdev_priv(dev);

2970 2971 2972
	if (priv->irq_wake)
		pm_wakeup_event(priv->device, 0);

2973
	if (unlikely(!dev)) {
2974
		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2975 2976 2977
		return IRQ_NONE;
	}

2978
	/* To handle GMAC own interrupts */
A
Alexandre TORGUE 已提交
2979
	if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2980
		int status = priv->hw->mac->host_irq_status(priv->hw,
2981
							    &priv->xstats);
2982 2983 2984 2985 2986

		if (priv->synopsys_id >= DWMAC_CORE_4_00)
			status |= priv->hw->mac->host_mtl_irq_status(priv->hw,
								STMMAC_CHAN0);

2987 2988
		if (unlikely(status)) {
			/* For LPI we need to save the tx status */
2989
			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2990
				priv->tx_path_in_lpi_mode = true;
2991
			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2992
				priv->tx_path_in_lpi_mode = false;
2993
			if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
A
Alexandre TORGUE 已提交
2994 2995 2996
				priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
							priv->rx_tail_addr,
							STMMAC_CHAN0);
2997
		}
2998 2999

		/* PCS link status */
3000
		if (priv->hw->pcs) {
3001 3002 3003 3004 3005
			if (priv->xstats.pcs_link)
				netif_carrier_on(dev);
			else
				netif_carrier_off(dev);
		}
3006
	}
3007

3008
	/* To handle DMA interrupts */
3009
	stmmac_dma_interrupt(priv);
3010 3011 3012 3013 3014 3015

	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
/* Polling receive - used by NETCONSOLE and other diagnostic tools
G
Giuseppe CAVALLARO 已提交
3016 3017
 * to allow network I/O with interrupts disabled.
 */
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
static void stmmac_poll_controller(struct net_device *dev)
{
	disable_irq(dev->irq);
	stmmac_interrupt(dev->irq, dev);
	enable_irq(dev->irq);
}
#endif

/**
 *  stmmac_ioctl - Entry point for the Ioctl
 *  @dev: Device pointer.
 *  @rq: An IOCTL specefic structure, that can contain a pointer to
 *  a proprietary structure used to pass information to the driver.
 *  @cmd: IOCTL command
 *  Description:
3033
 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3034 3035 3036
 */
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
3037
	int ret = -EOPNOTSUPP;
3038 3039 3040 3041

	if (!netif_running(dev))
		return -EINVAL;

3042 3043 3044 3045
	switch (cmd) {
	case SIOCGMIIPHY:
	case SIOCGMIIREG:
	case SIOCSMIIREG:
3046
		if (!dev->phydev)
3047
			return -EINVAL;
3048
		ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3049 3050 3051 3052 3053 3054 3055
		break;
	case SIOCSHWTSTAMP:
		ret = stmmac_hwtstamp_ioctl(dev, rq);
		break;
	default:
		break;
	}
3056

3057 3058 3059
	return ret;
}

3060
#ifdef CONFIG_DEBUG_FS
3061 3062
static struct dentry *stmmac_fs_dir;

3063
static void sysfs_display_ring(void *head, int size, int extend_desc,
G
Giuseppe CAVALLARO 已提交
3064
			       struct seq_file *seq)
3065 3066
{
	int i;
G
Giuseppe CAVALLARO 已提交
3067 3068
	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
	struct dma_desc *p = (struct dma_desc *)head;
3069

3070 3071 3072
	for (i = 0; i < size; i++) {
		if (extend_desc) {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
3073
				   i, (unsigned int)virt_to_phys(ep),
3074 3075 3076 3077
				   le32_to_cpu(ep->basic.des0),
				   le32_to_cpu(ep->basic.des1),
				   le32_to_cpu(ep->basic.des2),
				   le32_to_cpu(ep->basic.des3));
3078 3079 3080
			ep++;
		} else {
			seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
G
Giuseppe CAVALLARO 已提交
3081
				   i, (unsigned int)virt_to_phys(ep),
3082 3083
				   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
				   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3084 3085
			p++;
		}
3086 3087
		seq_printf(seq, "\n");
	}
3088
}
3089

3090 3091 3092 3093
static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);
3094

3095 3096
	if (priv->extend_desc) {
		seq_printf(seq, "Extended RX descriptor ring:\n");
3097
		sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
3098
		seq_printf(seq, "Extended TX descriptor ring:\n");
3099
		sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
3100 3101
	} else {
		seq_printf(seq, "RX descriptor ring:\n");
3102
		sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
3103
		seq_printf(seq, "TX descriptor ring:\n");
3104
		sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
3105 3106 3107 3108 3109 3110 3111 3112 3113 3114
	}

	return 0;
}

static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
{
	return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
}

3115 3116
/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */

3117 3118 3119 3120 3121
static const struct file_operations stmmac_rings_status_fops = {
	.owner = THIS_MODULE,
	.open = stmmac_sysfs_ring_open,
	.read = seq_read,
	.llseek = seq_lseek,
3122
	.release = single_release,
3123 3124
};

3125 3126 3127 3128 3129
static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
{
	struct net_device *dev = seq->private;
	struct stmmac_priv *priv = netdev_priv(dev);

3130
	if (!priv->hw_cap_support) {
3131 3132 3133 3134 3135 3136 3137 3138
		seq_printf(seq, "DMA HW features not supported\n");
		return 0;
	}

	seq_printf(seq, "==============================\n");
	seq_printf(seq, "\tDMA HW features\n");
	seq_printf(seq, "==============================\n");

3139
	seq_printf(seq, "\t10/100 Mbps: %s\n",
3140
		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3141
	seq_printf(seq, "\t1000 Mbps: %s\n",
3142
		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3143
	seq_printf(seq, "\tHalf duplex: %s\n",
3144 3145 3146 3147 3148
		   (priv->dma_cap.half_duplex) ? "Y" : "N");
	seq_printf(seq, "\tHash Filter: %s\n",
		   (priv->dma_cap.hash_filter) ? "Y" : "N");
	seq_printf(seq, "\tMultiple MAC address registers: %s\n",
		   (priv->dma_cap.multi_addr) ? "Y" : "N");
3149
	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160
		   (priv->dma_cap.pcs) ? "Y" : "N");
	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
	seq_printf(seq, "\tPMT Remote wake up: %s\n",
		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
	seq_printf(seq, "\tPMT Magic Frame: %s\n",
		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
	seq_printf(seq, "\tRMON module: %s\n",
		   (priv->dma_cap.rmon) ? "Y" : "N");
	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
		   (priv->dma_cap.time_stamp) ? "Y" : "N");
3161
	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3162
		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3163
	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3164 3165 3166 3167
		   (priv->dma_cap.eee) ? "Y" : "N");
	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
		   (priv->dma_cap.tx_coe) ? "Y" : "N");
A
Alexandre TORGUE 已提交
3168 3169 3170 3171 3172 3173 3174 3175 3176
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
			   (priv->dma_cap.rx_coe) ? "Y" : "N");
	} else {
		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
	}
3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
	seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
		   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
		   priv->dma_cap.number_rx_channel);
	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
		   priv->dma_cap.number_tx_channel);
	seq_printf(seq, "\tEnhanced descriptors: %s\n",
		   (priv->dma_cap.enh_desc) ? "Y" : "N");

	return 0;
}

static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
{
	return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
}

static const struct file_operations stmmac_dma_cap_fops = {
	.owner = THIS_MODULE,
	.open = stmmac_sysfs_dma_cap_open,
	.read = seq_read,
	.llseek = seq_lseek,
3199
	.release = single_release,
3200 3201
};

3202 3203
static int stmmac_init_fs(struct net_device *dev)
{
3204 3205 3206 3207
	struct stmmac_priv *priv = netdev_priv(dev);

	/* Create per netdev entries */
	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3208

3209
	if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3210
		netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3211 3212 3213 3214 3215

		return -ENOMEM;
	}

	/* Entry to report DMA RX/TX rings */
3216 3217 3218 3219
	priv->dbgfs_rings_status =
		debugfs_create_file("descriptors_status", S_IRUGO,
				    priv->dbgfs_dir, dev,
				    &stmmac_rings_status_fops);
3220

3221
	if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3222
		netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3223
		debugfs_remove_recursive(priv->dbgfs_dir);
3224 3225 3226 3227

		return -ENOMEM;
	}

3228
	/* Entry to report the DMA HW features */
3229 3230 3231
	priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
					    priv->dbgfs_dir,
					    dev, &stmmac_dma_cap_fops);
3232

3233
	if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3234
		netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3235
		debugfs_remove_recursive(priv->dbgfs_dir);
3236 3237 3238 3239

		return -ENOMEM;
	}

3240 3241 3242
	return 0;
}

3243
static void stmmac_exit_fs(struct net_device *dev)
3244
{
3245 3246 3247
	struct stmmac_priv *priv = netdev_priv(dev);

	debugfs_remove_recursive(priv->dbgfs_dir);
3248
}
3249
#endif /* CONFIG_DEBUG_FS */
3250

3251 3252 3253 3254 3255
static const struct net_device_ops stmmac_netdev_ops = {
	.ndo_open = stmmac_open,
	.ndo_start_xmit = stmmac_xmit,
	.ndo_stop = stmmac_release,
	.ndo_change_mtu = stmmac_change_mtu,
3256
	.ndo_fix_features = stmmac_fix_features,
3257
	.ndo_set_features = stmmac_set_features,
3258
	.ndo_set_rx_mode = stmmac_set_rx_mode,
3259 3260 3261 3262 3263 3264 3265 3266
	.ndo_tx_timeout = stmmac_tx_timeout,
	.ndo_do_ioctl = stmmac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = stmmac_poll_controller,
#endif
	.ndo_set_mac_address = eth_mac_addr,
};

3267 3268
/**
 *  stmmac_hw_init - Init the MAC device
3269
 *  @priv: driver private structure
3270 3271 3272 3273
 *  Description: this function is to configure the MAC device according to
 *  some platform parameters or the HW capability register. It prepares the
 *  driver to use either ring or chain modes and to setup either enhanced or
 *  normal descriptors.
3274 3275 3276 3277 3278 3279
 */
static int stmmac_hw_init(struct stmmac_priv *priv)
{
	struct mac_device_info *mac;

	/* Identify the MAC HW device */
3280 3281
	if (priv->plat->has_gmac) {
		priv->dev->priv_flags |= IFF_UNICAST_FLT;
3282 3283
		mac = dwmac1000_setup(priv->ioaddr,
				      priv->plat->multicast_filter_bins,
3284 3285
				      priv->plat->unicast_filter_entries,
				      &priv->synopsys_id);
A
Alexandre TORGUE 已提交
3286 3287 3288 3289 3290 3291
	} else if (priv->plat->has_gmac4) {
		priv->dev->priv_flags |= IFF_UNICAST_FLT;
		mac = dwmac4_setup(priv->ioaddr,
				   priv->plat->multicast_filter_bins,
				   priv->plat->unicast_filter_entries,
				   &priv->synopsys_id);
3292
	} else {
3293
		mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3294
	}
3295 3296 3297 3298 3299
	if (!mac)
		return -ENOMEM;

	priv->hw = mac;

3300
	/* To use the chained or ring mode */
A
Alexandre TORGUE 已提交
3301 3302
	if (priv->synopsys_id >= DWMAC_CORE_4_00) {
		priv->hw->mode = &dwmac4_ring_mode_ops;
3303
	} else {
A
Alexandre TORGUE 已提交
3304 3305
		if (chain_mode) {
			priv->hw->mode = &chain_mode_ops;
3306
			dev_info(priv->device, "Chain mode enabled\n");
A
Alexandre TORGUE 已提交
3307 3308 3309
			priv->mode = STMMAC_CHAIN_MODE;
		} else {
			priv->hw->mode = &ring_mode_ops;
3310
			dev_info(priv->device, "Ring mode enabled\n");
A
Alexandre TORGUE 已提交
3311 3312
			priv->mode = STMMAC_RING_MODE;
		}
3313 3314
	}

3315 3316 3317
	/* Get the HW capability (new GMAC newer than 3.50a) */
	priv->hw_cap_support = stmmac_get_hw_features(priv);
	if (priv->hw_cap_support) {
3318
		dev_info(priv->device, "DMA HW capability register supported\n");
3319 3320 3321 3322 3323 3324 3325 3326

		/* We can override some gmac/dma configuration fields: e.g.
		 * enh_desc, tx_coe (e.g. that are passed through the
		 * platform) with the values from the HW capability
		 * register (if supported).
		 */
		priv->plat->enh_desc = priv->dma_cap.enh_desc;
		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3327
		priv->hw->pmt = priv->plat->pmt;
3328

3329 3330 3331 3332 3333 3334
		/* TXCOE doesn't work in thresh DMA mode */
		if (priv->plat->force_thresh_dma_mode)
			priv->plat->tx_coe = 0;
		else
			priv->plat->tx_coe = priv->dma_cap.tx_coe;

A
Alexandre TORGUE 已提交
3335 3336
		/* In case of GMAC4 rx_coe is from HW cap register. */
		priv->plat->rx_coe = priv->dma_cap.rx_coe;
3337 3338 3339 3340 3341 3342

		if (priv->dma_cap.rx_coe_type2)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
		else if (priv->dma_cap.rx_coe_type1)
			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;

3343 3344 3345
	} else {
		dev_info(priv->device, "No HW DMA feature register supported\n");
	}
3346

A
Alexandre TORGUE 已提交
3347 3348 3349 3350 3351
	/* To use alternate (extended), normal or GMAC4 descriptor structures */
	if (priv->synopsys_id >= DWMAC_CORE_4_00)
		priv->hw->desc = &dwmac4_desc_ops;
	else
		stmmac_selec_desc_mode(priv);
3352

3353 3354
	if (priv->plat->rx_coe) {
		priv->hw->rx_csum = priv->plat->rx_coe;
3355
		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
A
Alexandre TORGUE 已提交
3356
		if (priv->synopsys_id < DWMAC_CORE_4_00)
3357
			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3358
	}
3359
	if (priv->plat->tx_coe)
3360
		dev_info(priv->device, "TX Checksum insertion supported\n");
3361 3362

	if (priv->plat->pmt) {
3363
		dev_info(priv->device, "Wake-Up On Lan supported\n");
3364 3365 3366
		device_set_wakeup_capable(priv->device, 1);
	}

A
Alexandre TORGUE 已提交
3367
	if (priv->dma_cap.tsoen)
3368
		dev_info(priv->device, "TSO supported\n");
A
Alexandre TORGUE 已提交
3369

3370
	return 0;
3371 3372
}

3373
/**
3374 3375
 * stmmac_dvr_probe
 * @device: device pointer
3376
 * @plat_dat: platform data pointer
3377
 * @res: stmmac resource pointer
3378 3379
 * Description: this is the main probe function used to
 * call the alloc_etherdev, allocate the priv structure.
3380
 * Return:
3381
 * returns 0 on success, otherwise errno.
3382
 */
3383 3384 3385
int stmmac_dvr_probe(struct device *device,
		     struct plat_stmmacenet_data *plat_dat,
		     struct stmmac_resources *res)
3386 3387
{
	int ret = 0;
3388 3389
	struct net_device *ndev = NULL;
	struct stmmac_priv *priv;
3390

3391
	ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3392
	if (!ndev)
3393
		return -ENOMEM;
3394 3395 3396 3397 3398 3399

	SET_NETDEV_DEV(ndev, device);

	priv = netdev_priv(ndev);
	priv->device = device;
	priv->dev = ndev;
3400

3401
	stmmac_set_ethtool_ops(ndev);
3402 3403
	priv->pause = pause;
	priv->plat = plat_dat;
3404 3405 3406 3407 3408 3409 3410 3411 3412
	priv->ioaddr = res->addr;
	priv->dev->base_addr = (unsigned long)res->addr;

	priv->dev->irq = res->irq;
	priv->wol_irq = res->wol_irq;
	priv->lpi_irq = res->lpi_irq;

	if (res->mac)
		memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3413

3414
	dev_set_drvdata(device, priv->dev);
3415

3416 3417
	/* Verify driver arguments */
	stmmac_verify_args();
3418

3419
	/* Override with kernel parameters if supplied XXX CRS XXX
G
Giuseppe CAVALLARO 已提交
3420 3421
	 * this needs to have multiple instances
	 */
3422 3423 3424
	if ((phyaddr >= 0) && (phyaddr <= 31))
		priv->plat->phy_addr = phyaddr;

3425 3426
	if (priv->plat->stmmac_rst)
		reset_control_deassert(priv->plat->stmmac_rst);
3427

3428
	/* Init MAC and get the capabilities */
3429 3430
	ret = stmmac_hw_init(priv);
	if (ret)
3431
		goto error_hw_init;
3432 3433

	ndev->netdev_ops = &stmmac_netdev_ops;
3434

3435 3436
	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			    NETIF_F_RXCSUM;
A
Alexandre TORGUE 已提交
3437 3438 3439 3440

	if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
		ndev->hw_features |= NETIF_F_TSO;
		priv->tso = true;
3441
		dev_info(priv->device, "TSO feature enabled\n");
A
Alexandre TORGUE 已提交
3442
	}
3443 3444
	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3445 3446
#ifdef STMMAC_VLAN_TAG_USED
	/* Both mac100 and gmac support receive VLAN tag detection */
3447
	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3448 3449 3450
#endif
	priv->msg_enable = netif_msg_init(debug, default_msg_level);

3451 3452 3453 3454 3455 3456
	/* MTU range: 46 - hw-specific max */
	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
	if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
		ndev->max_mtu = JUMBO_LEN;
	else
		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3457 3458 3459 3460 3461
	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
	 */
	if ((priv->plat->maxmtu < ndev->max_mtu) &&
	    (priv->plat->maxmtu >= ndev->min_mtu))
3462
		ndev->max_mtu = priv->plat->maxmtu;
3463
	else if (priv->plat->maxmtu < ndev->min_mtu)
3464 3465 3466
		dev_warn(priv->device,
			 "%s: warning: maxmtu having invalid value (%d)\n",
			 __func__, priv->plat->maxmtu);
3467

3468 3469 3470
	if (flow_ctrl)
		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */

3471 3472 3473 3474 3475 3476 3477
	/* Rx Watchdog is available in the COREs newer than the 3.40.
	 * In some case, for example on bugged HW this feature
	 * has to be disable and this can be done by passing the
	 * riwt_off field from the platform.
	 */
	if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
		priv->use_riwt = 1;
3478 3479
		dev_info(priv->device,
			 "Enable RX Mitigation via HW Watchdog Timer\n");
3480 3481
	}

3482
	netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3483

3484 3485
	spin_lock_init(&priv->lock);

3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
	/* If a specific clk_csr value is passed from the platform
	 * this means that the CSR Clock Range selection cannot be
	 * changed at run-time and it is fixed. Viceversa the driver'll try to
	 * set the MDC clock dynamically according to the csr actual
	 * clock input.
	 */
	if (!priv->plat->clk_csr)
		stmmac_clk_csr_set(priv);
	else
		priv->clk_csr = priv->plat->clk_csr;

3497 3498
	stmmac_check_pcs_mode(priv);

3499 3500 3501
	if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI) {
3502 3503 3504
		/* MDIO bus Registration */
		ret = stmmac_mdio_register(ndev);
		if (ret < 0) {
3505 3506 3507
			dev_err(priv->device,
				"%s: MDIO bus (id: %d) registration failed",
				__func__, priv->plat->bus_id);
3508 3509
			goto error_mdio_register;
		}
3510 3511
	}

3512
	ret = register_netdev(ndev);
3513
	if (ret) {
3514 3515
		dev_err(priv->device, "%s: ERROR %i registering the device\n",
			__func__, ret);
3516 3517
		goto error_netdev_register;
	}
3518 3519

	return ret;
3520

3521
error_netdev_register:
3522 3523 3524 3525
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
		stmmac_mdio_unregister(ndev);
3526 3527
error_mdio_register:
	netif_napi_del(&priv->napi);
3528
error_hw_init:
3529
	free_netdev(ndev);
3530

3531
	return ret;
3532
}
3533
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3534 3535 3536

/**
 * stmmac_dvr_remove
3537
 * @dev: device pointer
3538
 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3539
 * changes the link status, releases the DMA descriptor rings.
3540
 */
3541
int stmmac_dvr_remove(struct device *dev)
3542
{
3543
	struct net_device *ndev = dev_get_drvdata(dev);
3544
	struct stmmac_priv *priv = netdev_priv(ndev);
3545

3546
	netdev_info(priv->dev, "%s: removing driver", __func__);
3547

3548 3549
	priv->hw->dma->stop_rx(priv->ioaddr);
	priv->hw->dma->stop_tx(priv->ioaddr);
3550

3551
	stmmac_set_mac(priv->ioaddr, false);
3552 3553
	netif_carrier_off(ndev);
	unregister_netdev(ndev);
3554 3555 3556 3557
	if (priv->plat->stmmac_rst)
		reset_control_assert(priv->plat->stmmac_rst);
	clk_disable_unprepare(priv->plat->pclk);
	clk_disable_unprepare(priv->plat->stmmac_clk);
3558 3559 3560
	if (priv->hw->pcs != STMMAC_PCS_RGMII &&
	    priv->hw->pcs != STMMAC_PCS_TBI &&
	    priv->hw->pcs != STMMAC_PCS_RTBI)
3561
		stmmac_mdio_unregister(ndev);
3562 3563 3564 3565
	free_netdev(ndev);

	return 0;
}
3566
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3567

3568 3569
/**
 * stmmac_suspend - suspend callback
3570
 * @dev: device pointer
3571 3572 3573 3574
 * Description: this is the function to suspend the device and it is called
 * by the platform driver to stop the network queue, release the resources,
 * program the PMT register (for WoL), clean and release driver resources.
 */
3575
int stmmac_suspend(struct device *dev)
3576
{
3577
	struct net_device *ndev = dev_get_drvdata(dev);
3578
	struct stmmac_priv *priv = netdev_priv(ndev);
3579
	unsigned long flags;
3580

3581
	if (!ndev || !netif_running(ndev))
3582 3583
		return 0;

3584 3585
	if (ndev->phydev)
		phy_stop(ndev->phydev);
3586

3587
	spin_lock_irqsave(&priv->lock, flags);
3588

3589 3590
	netif_device_detach(ndev);
	netif_stop_queue(ndev);
3591

3592 3593 3594 3595 3596
	napi_disable(&priv->napi);

	/* Stop TX/RX DMA */
	priv->hw->dma->stop_tx(priv->ioaddr);
	priv->hw->dma->stop_rx(priv->ioaddr);
3597

3598
	/* Enable Power down mode by programming the PMT regs */
3599
	if (device_may_wakeup(priv->device)) {
3600
		priv->hw->mac->pmt(priv->hw, priv->wolopts);
3601 3602
		priv->irq_wake = 1;
	} else {
3603
		stmmac_set_mac(priv->ioaddr, false);
3604
		pinctrl_pm_select_sleep_state(priv->device);
3605
		/* Disable clock in case of PWM is off */
3606 3607
		clk_disable(priv->plat->pclk);
		clk_disable(priv->plat->stmmac_clk);
3608
	}
3609
	spin_unlock_irqrestore(&priv->lock, flags);
3610 3611

	priv->oldlink = 0;
3612 3613
	priv->speed = SPEED_UNKNOWN;
	priv->oldduplex = DUPLEX_UNKNOWN;
3614 3615
	return 0;
}
3616
EXPORT_SYMBOL_GPL(stmmac_suspend);
3617

3618 3619
/**
 * stmmac_resume - resume callback
3620
 * @dev: device pointer
3621 3622 3623
 * Description: when resume this function is invoked to setup the DMA and CORE
 * in a usable state.
 */
3624
int stmmac_resume(struct device *dev)
3625
{
3626
	struct net_device *ndev = dev_get_drvdata(dev);
3627
	struct stmmac_priv *priv = netdev_priv(ndev);
3628
	unsigned long flags;
3629

3630
	if (!netif_running(ndev))
3631 3632 3633 3634 3635 3636
		return 0;

	/* Power Down bit, into the PM register, is cleared
	 * automatically as soon as a magic packet or a Wake-up frame
	 * is received. Anyway, it's better to manually clear
	 * this bit because it can generate problems while resuming
G
Giuseppe CAVALLARO 已提交
3637 3638
	 * from another devices (e.g. serial console).
	 */
3639
	if (device_may_wakeup(priv->device)) {
3640
		spin_lock_irqsave(&priv->lock, flags);
3641
		priv->hw->mac->pmt(priv->hw, 0);
3642
		spin_unlock_irqrestore(&priv->lock, flags);
3643
		priv->irq_wake = 0;
3644
	} else {
3645
		pinctrl_pm_select_default_state(priv->device);
3646
		/* enable the clk previously disabled */
3647 3648
		clk_enable(priv->plat->stmmac_clk);
		clk_enable(priv->plat->pclk);
3649 3650 3651 3652
		/* reset the phy so that it's ready */
		if (priv->mii)
			stmmac_mdio_reset(priv->mii);
	}
3653

3654
	netif_device_attach(ndev);
3655

3656 3657
	spin_lock_irqsave(&priv->lock, flags);

3658 3659 3660 3661
	priv->cur_rx = 0;
	priv->dirty_rx = 0;
	priv->dirty_tx = 0;
	priv->cur_tx = 0;
A
Alexandre TORGUE 已提交
3662 3663 3664 3665 3666
	/* reset private mss value to force mss context settings at
	 * next tso xmit (only used for gmac4).
	 */
	priv->mss = 0;

3667 3668
	stmmac_clear_descriptors(priv);

3669
	stmmac_hw_setup(ndev, false);
3670
	stmmac_init_tx_coalesce(priv);
3671
	stmmac_set_rx_mode(ndev);
3672 3673 3674

	napi_enable(&priv->napi);

3675
	netif_start_queue(ndev);
3676

3677
	spin_unlock_irqrestore(&priv->lock, flags);
3678

3679 3680
	if (ndev->phydev)
		phy_start(ndev->phydev);
3681

3682 3683
	return 0;
}
3684
EXPORT_SYMBOL_GPL(stmmac_resume);
3685

3686 3687 3688 3689 3690 3691 3692 3693
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
	char *opt;

	if (!str || !*str)
		return -EINVAL;
	while ((opt = strsep(&str, ",")) != NULL) {
3694
		if (!strncmp(opt, "debug:", 6)) {
3695
			if (kstrtoint(opt + 6, 0, &debug))
3696 3697
				goto err;
		} else if (!strncmp(opt, "phyaddr:", 8)) {
3698
			if (kstrtoint(opt + 8, 0, &phyaddr))
3699 3700
				goto err;
		} else if (!strncmp(opt, "buf_sz:", 7)) {
3701
			if (kstrtoint(opt + 7, 0, &buf_sz))
3702 3703
				goto err;
		} else if (!strncmp(opt, "tc:", 3)) {
3704
			if (kstrtoint(opt + 3, 0, &tc))
3705 3706
				goto err;
		} else if (!strncmp(opt, "watchdog:", 9)) {
3707
			if (kstrtoint(opt + 9, 0, &watchdog))
3708 3709
				goto err;
		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
3710
			if (kstrtoint(opt + 10, 0, &flow_ctrl))
3711 3712
				goto err;
		} else if (!strncmp(opt, "pause:", 6)) {
3713
			if (kstrtoint(opt + 6, 0, &pause))
3714
				goto err;
3715
		} else if (!strncmp(opt, "eee_timer:", 10)) {
3716 3717
			if (kstrtoint(opt + 10, 0, &eee_timer))
				goto err;
3718 3719 3720
		} else if (!strncmp(opt, "chain_mode:", 11)) {
			if (kstrtoint(opt + 11, 0, &chain_mode))
				goto err;
3721
		}
3722 3723
	}
	return 0;
3724 3725 3726 3727

err:
	pr_err("%s: ERROR broken module parameter conversion", __func__);
	return -EINVAL;
3728 3729 3730
}

__setup("stmmaceth=", stmmac_cmdline_opt);
G
Giuseppe CAVALLARO 已提交
3731
#endif /* MODULE */
3732

3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761
static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
	/* Create debugfs main directory if it doesn't exist yet */
	if (!stmmac_fs_dir) {
		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);

		if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
			pr_err("ERROR %s, debugfs create directory failed\n",
			       STMMAC_RESOURCE_NAME);

			return -ENOMEM;
		}
	}
#endif

	return 0;
}

static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
	debugfs_remove_recursive(stmmac_fs_dir);
#endif
}

module_init(stmmac_init)
module_exit(stmmac_exit)

3762 3763 3764
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");