stmmac_ethtool.c 24.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11 12
/*******************************************************************************
  STMMAC Ethtool support

  Copyright (C) 2007-2009  STMicroelectronics Ltd


  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/

#include <linux/etherdevice.h>
#include <linux/ethtool.h>
13
#include <linux/interrupt.h>
14
#include <linux/mii.h>
15
#include <linux/phylink.h>
16
#include <linux/net_tstamp.h>
17
#include <asm/io.h>
18 19

#include "stmmac.h"
20
#include "dwmac_dma.h"
21

22
#define REG_SPACE_SIZE	0x1060
23 24 25
#define MAC100_ETHTOOL_NAME	"st_mac100"
#define GMAC_ETHTOOL_NAME	"st_gmac"

26 27
#define ETHTOOL_DMA_OFFSET	55

28 29 30 31 32 33 34 35 36 37
struct stmmac_stats {
	char stat_string[ETH_GSTRING_LEN];
	int sizeof_stat;
	int stat_offset;
};

#define STMMAC_STAT(m)	\
	{ #m, FIELD_SIZEOF(struct stmmac_extra_stats, m),	\
	offsetof(struct stmmac_priv, xstats.m)}

38
static const struct stmmac_stats stmmac_gstrings_stats[] = {
39
	/* Transmit errors */
40 41 42
	STMMAC_STAT(tx_underflow),
	STMMAC_STAT(tx_carrier),
	STMMAC_STAT(tx_losscarrier),
43
	STMMAC_STAT(vlan_tag),
44 45 46 47 48 49
	STMMAC_STAT(tx_deferred),
	STMMAC_STAT(tx_vlan),
	STMMAC_STAT(tx_jabber),
	STMMAC_STAT(tx_frame_flushed),
	STMMAC_STAT(tx_payload_error),
	STMMAC_STAT(tx_ip_header_error),
50
	/* Receive errors */
51
	STMMAC_STAT(rx_desc),
52 53 54
	STMMAC_STAT(sa_filter_fail),
	STMMAC_STAT(overflow_error),
	STMMAC_STAT(ipc_csum_error),
55
	STMMAC_STAT(rx_collision),
56
	STMMAC_STAT(rx_crc_errors),
57
	STMMAC_STAT(dribbling_bit),
58
	STMMAC_STAT(rx_length),
59 60 61 62 63 64 65 66
	STMMAC_STAT(rx_mii),
	STMMAC_STAT(rx_multicast),
	STMMAC_STAT(rx_gmac_overflow),
	STMMAC_STAT(rx_watchdog),
	STMMAC_STAT(da_rx_filter_fail),
	STMMAC_STAT(sa_rx_filter_fail),
	STMMAC_STAT(rx_missed_cntr),
	STMMAC_STAT(rx_overflow_cntr),
67
	STMMAC_STAT(rx_vlan),
68
	/* Tx/Rx IRQ error info */
69 70 71 72 73 74 75 76 77
	STMMAC_STAT(tx_undeflow_irq),
	STMMAC_STAT(tx_process_stopped_irq),
	STMMAC_STAT(tx_jabber_irq),
	STMMAC_STAT(rx_overflow_irq),
	STMMAC_STAT(rx_buf_unav_irq),
	STMMAC_STAT(rx_process_stopped_irq),
	STMMAC_STAT(rx_watchdog_irq),
	STMMAC_STAT(tx_early_irq),
	STMMAC_STAT(fatal_bus_error_irq),
78 79
	/* Tx/Rx IRQ Events */
	STMMAC_STAT(rx_early_irq),
80 81 82
	STMMAC_STAT(threshold),
	STMMAC_STAT(tx_pkt_n),
	STMMAC_STAT(rx_pkt_n),
83
	STMMAC_STAT(normal_irq_n),
84 85 86 87
	STMMAC_STAT(rx_normal_irq_n),
	STMMAC_STAT(napi_poll),
	STMMAC_STAT(tx_normal_irq_n),
	STMMAC_STAT(tx_clean),
88
	STMMAC_STAT(tx_set_ic_bit),
89 90
	STMMAC_STAT(irq_receive_pmt_irq_n),
	/* MMC info */
91 92 93
	STMMAC_STAT(mmc_tx_irq_n),
	STMMAC_STAT(mmc_rx_irq_n),
	STMMAC_STAT(mmc_rx_csum_offload_irq_n),
94
	/* EEE */
95 96 97 98 99
	STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
	STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
	STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
	STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
	STMMAC_STAT(phy_eee_wakeup_error_n),
100 101 102 103 104 105
	/* Extended RDES status */
	STMMAC_STAT(ip_hdr_err),
	STMMAC_STAT(ip_payload_err),
	STMMAC_STAT(ip_csum_bypassed),
	STMMAC_STAT(ipv4_pkt_rcvd),
	STMMAC_STAT(ipv6_pkt_rcvd),
106 107 108 109 110 111 112 113 114 115 116
	STMMAC_STAT(no_ptp_rx_msg_type_ext),
	STMMAC_STAT(ptp_rx_msg_type_sync),
	STMMAC_STAT(ptp_rx_msg_type_follow_up),
	STMMAC_STAT(ptp_rx_msg_type_delay_req),
	STMMAC_STAT(ptp_rx_msg_type_delay_resp),
	STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
	STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
	STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
	STMMAC_STAT(ptp_rx_msg_type_announce),
	STMMAC_STAT(ptp_rx_msg_type_management),
	STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
117 118 119 120 121 122 123 124 125
	STMMAC_STAT(ptp_frame_type),
	STMMAC_STAT(ptp_ver),
	STMMAC_STAT(timestamp_dropped),
	STMMAC_STAT(av_pkt_rcvd),
	STMMAC_STAT(av_tagged_pkt_rcvd),
	STMMAC_STAT(vlan_tag_priority_val),
	STMMAC_STAT(l3_filter_match),
	STMMAC_STAT(l4_filter_match),
	STMMAC_STAT(l3_l4_filter_no_match),
126 127 128 129
	/* PCS */
	STMMAC_STAT(irq_pcs_ane_n),
	STMMAC_STAT(irq_pcs_link_n),
	STMMAC_STAT(irq_rgmii_n),
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	/* DEBUG */
	STMMAC_STAT(mtl_tx_status_fifo_full),
	STMMAC_STAT(mtl_tx_fifo_not_empty),
	STMMAC_STAT(mmtl_fifo_ctrl),
	STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
	STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
	STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
	STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
	STMMAC_STAT(mac_tx_in_pause),
	STMMAC_STAT(mac_tx_frame_ctrl_xfer),
	STMMAC_STAT(mac_tx_frame_ctrl_idle),
	STMMAC_STAT(mac_tx_frame_ctrl_wait),
	STMMAC_STAT(mac_tx_frame_ctrl_pause),
	STMMAC_STAT(mac_gmii_tx_proto_engine),
	STMMAC_STAT(mtl_rx_fifo_fill_level_full),
	STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
	STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
	STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
	STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
	STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
	STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
	STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
	STMMAC_STAT(mtl_rx_fifo_ctrl_active),
	STMMAC_STAT(mac_rx_frame_ctrl_fifo),
	STMMAC_STAT(mac_gmii_rx_proto_engine),
A
Alexandre TORGUE 已提交
155 156 157
	/* TSO */
	STMMAC_STAT(tx_tso_frames),
	STMMAC_STAT(tx_tso_nfrags),
158 159 160
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)

161 162 163 164 165
/* HW MAC Management counters (if supported) */
#define STMMAC_MMC_STAT(m)	\
	{ #m, FIELD_SIZEOF(struct stmmac_counters, m),	\
	offsetof(struct stmmac_priv, mmc.m)}

166
static const struct stmmac_stats stmmac_mmc[] = {
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
	STMMAC_MMC_STAT(mmc_tx_framecount_gb),
	STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
	STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
	STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
	STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
	STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
	STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
	STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
	STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
	STMMAC_MMC_STAT(mmc_tx_unicast_gb),
	STMMAC_MMC_STAT(mmc_tx_multicast_gb),
	STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
	STMMAC_MMC_STAT(mmc_tx_underflow_error),
	STMMAC_MMC_STAT(mmc_tx_singlecol_g),
	STMMAC_MMC_STAT(mmc_tx_multicol_g),
	STMMAC_MMC_STAT(mmc_tx_deferred),
	STMMAC_MMC_STAT(mmc_tx_latecol),
	STMMAC_MMC_STAT(mmc_tx_exesscol),
	STMMAC_MMC_STAT(mmc_tx_carrier_error),
	STMMAC_MMC_STAT(mmc_tx_octetcount_g),
	STMMAC_MMC_STAT(mmc_tx_framecount_g),
	STMMAC_MMC_STAT(mmc_tx_excessdef),
	STMMAC_MMC_STAT(mmc_tx_pause_frame),
	STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
	STMMAC_MMC_STAT(mmc_rx_framecount_gb),
	STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
	STMMAC_MMC_STAT(mmc_rx_octetcount_g),
	STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
	STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
197
	STMMAC_MMC_STAT(mmc_rx_crc_error),
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
	STMMAC_MMC_STAT(mmc_rx_align_error),
	STMMAC_MMC_STAT(mmc_rx_run_error),
	STMMAC_MMC_STAT(mmc_rx_jabber_error),
	STMMAC_MMC_STAT(mmc_rx_undersize_g),
	STMMAC_MMC_STAT(mmc_rx_oversize_g),
	STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
	STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
	STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
	STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
	STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
	STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
	STMMAC_MMC_STAT(mmc_rx_unicast_g),
	STMMAC_MMC_STAT(mmc_rx_length_error),
	STMMAC_MMC_STAT(mmc_rx_autofrangetype),
	STMMAC_MMC_STAT(mmc_rx_pause_frames),
	STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
	STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
	STMMAC_MMC_STAT(mmc_rx_watchdog_error),
	STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
	STMMAC_MMC_STAT(mmc_rx_ipc_intr),
	STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
	STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
	STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
	STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
	STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
	STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
	STMMAC_MMC_STAT(mmc_rx_udp_gd),
	STMMAC_MMC_STAT(mmc_rx_udp_err),
	STMMAC_MMC_STAT(mmc_rx_tcp_gd),
	STMMAC_MMC_STAT(mmc_rx_tcp_err),
	STMMAC_MMC_STAT(mmc_rx_icmp_gd),
	STMMAC_MMC_STAT(mmc_rx_icmp_err),
	STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
	STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
	STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
	STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
	STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
	STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
};
247
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
248

249 250
static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
				      struct ethtool_drvinfo *info)
251 252 253
{
	struct stmmac_priv *priv = netdev_priv(dev);

254
	if (priv->plat->has_gmac || priv->plat->has_gmac4)
255
		strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
256
	else
257 258
		strlcpy(info->driver, MAC100_ETHTOOL_NAME,
			sizeof(info->driver));
259

260
	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
261 262
}

263 264
static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
					     struct ethtool_link_ksettings *cmd)
265 266
{
	struct stmmac_priv *priv = netdev_priv(dev);
267

268 269
	if (priv->hw->pcs & STMMAC_PCS_RGMII ||
	    priv->hw->pcs & STMMAC_PCS_SGMII) {
270
		struct rgmii_adv adv;
271
		u32 supported, advertising, lp_advertising;
272 273

		if (!priv->xstats.pcs_link) {
274 275
			cmd->base.speed = SPEED_UNKNOWN;
			cmd->base.duplex = DUPLEX_UNKNOWN;
276 277
			return 0;
		}
278
		cmd->base.duplex = priv->xstats.pcs_duplex;
279

280
		cmd->base.speed = priv->xstats.pcs_speed;
281 282

		/* Get and convert ADV/LP_ADV from the HW AN registers */
283
		if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
284 285 286 287
			return -EOPNOTSUPP;	/* should never happen indeed */

		/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */

288 289 290 291 292 293 294
		ethtool_convert_link_mode_to_legacy_u32(
			&supported, cmd->link_modes.supported);
		ethtool_convert_link_mode_to_legacy_u32(
			&advertising, cmd->link_modes.advertising);
		ethtool_convert_link_mode_to_legacy_u32(
			&lp_advertising, cmd->link_modes.lp_advertising);

295
		if (adv.pause & STMMAC_PCS_PAUSE)
296
			advertising |= ADVERTISED_Pause;
297
		if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
298
			advertising |= ADVERTISED_Asym_Pause;
299
		if (adv.lp_pause & STMMAC_PCS_PAUSE)
300
			lp_advertising |= ADVERTISED_Pause;
301
		if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
302
			lp_advertising |= ADVERTISED_Asym_Pause;
303 304

		/* Reg49[3] always set because ANE is always supported */
305 306 307 308
		cmd->base.autoneg = ADVERTISED_Autoneg;
		supported |= SUPPORTED_Autoneg;
		advertising |= ADVERTISED_Autoneg;
		lp_advertising |= ADVERTISED_Autoneg;
309 310

		if (adv.duplex) {
311 312 313 314 315 316
			supported |= (SUPPORTED_1000baseT_Full |
				      SUPPORTED_100baseT_Full |
				      SUPPORTED_10baseT_Full);
			advertising |= (ADVERTISED_1000baseT_Full |
					ADVERTISED_100baseT_Full |
					ADVERTISED_10baseT_Full);
317
		} else {
318 319 320 321 322 323
			supported |= (SUPPORTED_1000baseT_Half |
				      SUPPORTED_100baseT_Half |
				      SUPPORTED_10baseT_Half);
			advertising |= (ADVERTISED_1000baseT_Half |
					ADVERTISED_100baseT_Half |
					ADVERTISED_10baseT_Half);
324 325
		}
		if (adv.lp_duplex)
326 327 328
			lp_advertising |= (ADVERTISED_1000baseT_Full |
					   ADVERTISED_100baseT_Full |
					   ADVERTISED_10baseT_Full);
329
		else
330 331 332 333 334 335 336 337 338 339 340
			lp_advertising |= (ADVERTISED_1000baseT_Half |
					   ADVERTISED_100baseT_Half |
					   ADVERTISED_10baseT_Half);
		cmd->base.port = PORT_OTHER;

		ethtool_convert_legacy_u32_to_link_mode(
			cmd->link_modes.supported, supported);
		ethtool_convert_legacy_u32_to_link_mode(
			cmd->link_modes.advertising, advertising);
		ethtool_convert_legacy_u32_to_link_mode(
			cmd->link_modes.lp_advertising, lp_advertising);
341 342 343 344

		return 0;
	}

345
	return phylink_ethtool_ksettings_get(priv->phylink, cmd);
346 347
}

348 349 350
static int
stmmac_ethtool_set_link_ksettings(struct net_device *dev,
				  const struct ethtool_link_ksettings *cmd)
351 352 353
{
	struct stmmac_priv *priv = netdev_priv(dev);

354 355
	if (priv->hw->pcs & STMMAC_PCS_RGMII ||
	    priv->hw->pcs & STMMAC_PCS_SGMII) {
356 357 358
		u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;

		/* Only support ANE */
359
		if (cmd->base.autoneg != AUTONEG_ENABLE)
360 361
			return -EINVAL;

P
Pavel Machek 已提交
362
		mask &= (ADVERTISED_1000baseT_Half |
363 364 365 366 367 368
			ADVERTISED_1000baseT_Full |
			ADVERTISED_100baseT_Half |
			ADVERTISED_100baseT_Full |
			ADVERTISED_10baseT_Half |
			ADVERTISED_10baseT_Full);

369
		mutex_lock(&priv->lock);
370
		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
371
		mutex_unlock(&priv->lock);
372 373 374 375

		return 0;
	}

376
	return phylink_ethtool_ksettings_set(priv->phylink, cmd);
377 378
}

379
static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
380 381 382 383 384
{
	struct stmmac_priv *priv = netdev_priv(dev);
	return priv->msg_enable;
}

385
static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
386 387 388 389 390 391
{
	struct stmmac_priv *priv = netdev_priv(dev);
	priv->msg_enable = level;

}

392
static int stmmac_check_if_running(struct net_device *dev)
393 394 395 396 397 398
{
	if (!netif_running(dev))
		return -EBUSY;
	return 0;
}

399
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
400 401 402 403
{
	return REG_SPACE_SIZE;
}

404
static void stmmac_ethtool_gregs(struct net_device *dev,
405 406 407 408 409 410 411 412
			  struct ethtool_regs *regs, void *space)
{
	u32 *reg_space = (u32 *) space;

	struct stmmac_priv *priv = netdev_priv(dev);

	memset(reg_space, 0x0, REG_SPACE_SIZE);

413
	stmmac_dump_mac_regs(priv, priv->hw, reg_space);
414
	stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
415 416 417
	/* Copy DMA registers to where ethtool expects them */
	memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
	       NUM_DWMAC1000_DMA_REGS * 4);
418 419
}

420 421 422 423 424 425 426
static int stmmac_nway_reset(struct net_device *dev)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	return phylink_ethtool_nway_reset(priv->phylink);
}

427 428 429 430 431
static void
stmmac_get_pauseparam(struct net_device *netdev,
		      struct ethtool_pauseparam *pause)
{
	struct stmmac_priv *priv = netdev_priv(netdev);
432
	struct rgmii_adv adv_lp;
433

434
	if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
435 436 437 438
		pause->autoneg = 1;
		if (!adv_lp.pause)
			return;
	} else {
439
		phylink_ethtool_get_pauseparam(priv->phylink, pause);
440
	}
441 442 443 444 445 446 447
}

static int
stmmac_set_pauseparam(struct net_device *netdev,
		      struct ethtool_pauseparam *pause)
{
	struct stmmac_priv *priv = netdev_priv(netdev);
448
	struct rgmii_adv adv_lp;
449

450
	if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
451 452 453
		pause->autoneg = 1;
		if (!adv_lp.pause)
			return -EOPNOTSUPP;
454
		return 0;
455
	} else {
456
		return phylink_ethtool_set_pauseparam(priv->phylink, pause);
457
	}
458 459 460 461 462 463
}

static void stmmac_get_ethtool_stats(struct net_device *dev,
				 struct ethtool_stats *dummy, u64 *data)
{
	struct stmmac_priv *priv = netdev_priv(dev);
464 465
	u32 rx_queues_count = priv->plat->rx_queues_to_use;
	u32 tx_queues_count = priv->plat->tx_queues_to_use;
466
	unsigned long count;
467
	int i, j = 0, ret;
468

469
	if (priv->dma_cap.asp) {
470
		for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
471 472
			if (!stmmac_safety_feat_dump(priv, &priv->sstats, i,
						&count, NULL))
473 474 475 476
				data[j++] = count;
		}
	}

477
	/* Update the DMA HW counters for dwmac10/100 */
478 479 480
	ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats,
			priv->ioaddr);
	if (ret) {
481
		/* If supported, for new GMAC chips expose the MMC counters */
482
		if (priv->dma_cap.rmon) {
483
			stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
484

485 486 487
			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
				char *p;
				p = (char *)priv + stmmac_mmc[i].stat_offset;
488

489 490 491 492
				data[j++] = (stmmac_mmc[i].sizeof_stat ==
					     sizeof(u64)) ? (*(u64 *)p) :
					     (*(u32 *)p);
			}
493
		}
494
		if (priv->eee_enabled) {
495
			int val = phylink_get_eee_err(priv->phylink);
496 497 498
			if (val)
				priv->xstats.phy_eee_wakeup_error_n = val;
		}
499

500 501 502 503
		if (priv->synopsys_id >= DWMAC_CORE_3_50)
			stmmac_mac_debug(priv, priv->ioaddr,
					(void *)&priv->xstats,
					rx_queues_count, tx_queues_count);
504
	}
505 506
	for (i = 0; i < STMMAC_STATS_LEN; i++) {
		char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
507 508
		data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
			     sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
509 510 511 512 513
	}
}

static int stmmac_get_sset_count(struct net_device *netdev, int sset)
{
514
	struct stmmac_priv *priv = netdev_priv(netdev);
515
	int i, len, safety_len = 0;
516

517 518
	switch (sset) {
	case ETH_SS_STATS:
519 520
		len = STMMAC_STATS_LEN;

521
		if (priv->dma_cap.rmon)
522
			len += STMMAC_MMC_STATS_LEN;
523
		if (priv->dma_cap.asp) {
524
			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
525 526 527
				if (!stmmac_safety_feat_dump(priv,
							&priv->sstats, i,
							NULL, NULL))
528 529 530 531 532
					safety_len++;
			}

			len += safety_len;
		}
533 534

		return len;
535 536
	case ETH_SS_TEST:
		return stmmac_selftest_get_count(priv);
537 538 539 540 541 542 543 544 545
	default:
		return -EOPNOTSUPP;
	}
}

static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	int i;
	u8 *p = data;
546
	struct stmmac_priv *priv = netdev_priv(dev);
547 548 549

	switch (stringset) {
	case ETH_SS_STATS:
550
		if (priv->dma_cap.asp) {
551
			for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
552 553 554 555
				const char *desc;
				if (!stmmac_safety_feat_dump(priv,
							&priv->sstats, i,
							NULL, &desc)) {
556 557 558 559 560
					memcpy(p, desc, ETH_GSTRING_LEN);
					p += ETH_GSTRING_LEN;
				}
			}
		}
561
		if (priv->dma_cap.rmon)
562
			for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
563
				memcpy(p, stmmac_mmc[i].stat_string,
564 565 566
				       ETH_GSTRING_LEN);
				p += ETH_GSTRING_LEN;
			}
567 568 569 570 571 572
		for (i = 0; i < STMMAC_STATS_LEN; i++) {
			memcpy(p, stmmac_gstrings_stats[i].stat_string,
				ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
		break;
573 574 575
	case ETH_SS_TEST:
		stmmac_selftest_get_strings(priv, p);
		break;
576 577 578 579 580 581 582 583 584 585 586
	default:
		WARN_ON(1);
		break;
	}
}

/* Currently only support WOL through Magic packet. */
static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct stmmac_priv *priv = netdev_priv(dev);

587
	mutex_lock(&priv->lock);
588
	if (device_can_wakeup(priv->device)) {
589
		wol->supported = WAKE_MAGIC | WAKE_UCAST;
590 591
		wol->wolopts = priv->wolopts;
	}
592
	mutex_unlock(&priv->lock);
593 594 595 596 597
}

static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct stmmac_priv *priv = netdev_priv(dev);
598
	u32 support = WAKE_MAGIC | WAKE_UCAST;
599

600 601 602 603 604 605
	/* By default almost all GMAC devices support the WoL via
	 * magic frame but we can disable it if the HW capability
	 * register shows no support for pmt_magic_frame. */
	if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
		wol->wolopts &= ~WAKE_MAGIC;

606
	if (!device_can_wakeup(priv->device))
607 608 609 610 611
		return -EINVAL;

	if (wol->wolopts & ~support)
		return -EINVAL;

612 613
	if (wol->wolopts) {
		pr_info("stmmac: wakeup enable\n");
614
		device_set_wakeup_enable(priv->device, 1);
615
		enable_irq_wake(priv->wol_irq);
616 617
	} else {
		device_set_wakeup_enable(priv->device, 0);
618
		disable_irq_wake(priv->wol_irq);
619
	}
620

621
	mutex_lock(&priv->lock);
622
	priv->wolopts = wol->wolopts;
623
	mutex_unlock(&priv->lock);
624 625 626 627

	return 0;
}

628 629 630 631 632 633 634 635 636 637 638 639
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
				     struct ethtool_eee *edata)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	if (!priv->dma_cap.eee)
		return -EOPNOTSUPP;

	edata->eee_enabled = priv->eee_enabled;
	edata->eee_active = priv->eee_active;
	edata->tx_lpi_timer = priv->tx_lpi_timer;

640
	return phylink_ethtool_get_eee(priv->phylink, edata);
641 642 643 644 645 646
}

static int stmmac_ethtool_op_set_eee(struct net_device *dev,
				     struct ethtool_eee *edata)
{
	struct stmmac_priv *priv = netdev_priv(dev);
647
	int ret;
648

649
	if (!edata->eee_enabled) {
650
		stmmac_disable_eee_mode(priv);
651
	} else {
652 653 654 655
		/* We are asking for enabling the EEE but it is safe
		 * to verify all by invoking the eee_init function.
		 * In case of failure it will return an error.
		 */
656 657
		edata->eee_enabled = stmmac_eee_init(priv);
		if (!edata->eee_enabled)
658 659 660
			return -EOPNOTSUPP;
	}

661
	ret = phylink_ethtool_set_eee(priv->phylink, edata);
662 663 664 665 666 667
	if (ret)
		return ret;

	priv->eee_enabled = edata->eee_enabled;
	priv->tx_lpi_timer = edata->tx_lpi_timer;
	return 0;
668 669
}

670 671
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
{
672
	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
673

674 675 676 677 678
	if (!clk) {
		clk = priv->plat->clk_ref_rate;
		if (!clk)
			return 0;
	}
679 680 681 682 683 684

	return (usec * (clk / 1000000)) / 256;
}

static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
{
685
	unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
686

687 688 689 690 691
	if (!clk) {
		clk = priv->plat->clk_ref_rate;
		if (!clk)
			return 0;
	}
692 693 694 695 696 697 698 699 700 701 702 703

	return (riwt * 256) / (clk / 1000000);
}

static int stmmac_get_coalesce(struct net_device *dev,
			       struct ethtool_coalesce *ec)
{
	struct stmmac_priv *priv = netdev_priv(dev);

	ec->tx_coalesce_usecs = priv->tx_coal_timer;
	ec->tx_max_coalesced_frames = priv->tx_coal_frames;

704 705
	if (priv->use_riwt) {
		ec->rx_max_coalesced_frames = priv->rx_coal_frames;
706
		ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
707
	}
708 709 710 711 712 713 714 715

	return 0;
}

static int stmmac_set_coalesce(struct net_device *dev,
			       struct ethtool_coalesce *ec)
{
	struct stmmac_priv *priv = netdev_priv(dev);
716
	u32 rx_cnt = priv->plat->rx_queues_to_use;
717 718 719
	unsigned int rx_riwt;

	/* Check not supported parameters  */
720
	if ((ec->rx_coalesce_usecs_irq) ||
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	    (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
	    (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
	    (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
	    (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
	    (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
	    (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
	    (ec->rx_max_coalesced_frames_high) ||
	    (ec->tx_max_coalesced_frames_irq) ||
	    (ec->stats_block_coalesce_usecs) ||
	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
		return -EOPNOTSUPP;

	if (ec->rx_coalesce_usecs == 0)
		return -EINVAL;

	if ((ec->tx_coalesce_usecs == 0) &&
	    (ec->tx_max_coalesced_frames == 0))
		return -EINVAL;

740
	if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
741 742 743 744 745 746 747 748 749 750 751 752 753
	    (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
		return -EINVAL;

	rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);

	if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
		return -EINVAL;
	else if (!priv->use_riwt)
		return -EOPNOTSUPP;

	/* Only copy relevant parameters, ignore all others. */
	priv->tx_coal_frames = ec->tx_max_coalesced_frames;
	priv->tx_coal_timer = ec->tx_coalesce_usecs;
754
	priv->rx_coal_frames = ec->rx_max_coalesced_frames;
755
	priv->rx_riwt = rx_riwt;
756
	stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
757 758 759 760

	return 0;
}

761 762 763 764 765
static int stmmac_get_ts_info(struct net_device *dev,
			      struct ethtool_ts_info *info)
{
	struct stmmac_priv *priv = netdev_priv(dev);

766
	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
767

768 769 770
		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
					SOF_TIMESTAMPING_TX_HARDWARE |
					SOF_TIMESTAMPING_RX_SOFTWARE |
771
					SOF_TIMESTAMPING_RX_HARDWARE |
772
					SOF_TIMESTAMPING_SOFTWARE |
773 774
					SOF_TIMESTAMPING_RAW_HARDWARE;

775 776 777
		if (priv->ptp_clock)
			info->phc_index = ptp_clock_index(priv->ptp_clock);

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
		info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);

		info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
				    (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
				    (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
				    (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
				    (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
				    (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
				    (1 << HWTSTAMP_FILTER_ALL));
		return 0;
	} else
		return ethtool_op_get_ts_info(dev, info);
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
static int stmmac_get_tunable(struct net_device *dev,
			      const struct ethtool_tunable *tuna, void *data)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		*(u32 *)data = priv->rx_copybreak;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int stmmac_set_tunable(struct net_device *dev,
			      const struct ethtool_tunable *tuna,
			      const void *data)
{
	struct stmmac_priv *priv = netdev_priv(dev);
	int ret = 0;

	switch (tuna->id) {
	case ETHTOOL_RX_COPYBREAK:
		priv->rx_copybreak = *(u32 *)data;
		break;
	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

S
stephen hemminger 已提交
833
static const struct ethtool_ops stmmac_ethtool_ops = {
834 835 836 837 838 839 840
	.begin = stmmac_check_if_running,
	.get_drvinfo = stmmac_ethtool_getdrvinfo,
	.get_msglevel = stmmac_ethtool_getmsglevel,
	.set_msglevel = stmmac_ethtool_setmsglevel,
	.get_regs = stmmac_ethtool_gregs,
	.get_regs_len = stmmac_ethtool_get_regs_len,
	.get_link = ethtool_op_get_link,
841
	.nway_reset = stmmac_nway_reset,
842 843
	.get_pauseparam = stmmac_get_pauseparam,
	.set_pauseparam = stmmac_set_pauseparam,
844
	.self_test = stmmac_selftest_run,
845 846 847 848
	.get_ethtool_stats = stmmac_get_ethtool_stats,
	.get_strings = stmmac_get_strings,
	.get_wol = stmmac_get_wol,
	.set_wol = stmmac_set_wol,
849 850
	.get_eee = stmmac_ethtool_op_get_eee,
	.set_eee = stmmac_ethtool_op_set_eee,
851
	.get_sset_count	= stmmac_get_sset_count,
852
	.get_ts_info = stmmac_get_ts_info,
853 854
	.get_coalesce = stmmac_get_coalesce,
	.set_coalesce = stmmac_set_coalesce,
855 856
	.get_tunable = stmmac_get_tunable,
	.set_tunable = stmmac_set_tunable,
857 858
	.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
	.set_link_ksettings = stmmac_ethtool_set_link_ksettings,
859 860 861 862
};

void stmmac_set_ethtool_ops(struct net_device *netdev)
{
863
	netdev->ethtool_ops = &stmmac_ethtool_ops;
864
}