ixgbe_ethtool.c 92.8 KB
Newer Older
1 2 3
/*******************************************************************************

  Intel 10 Gigabit PCI Express Linux driver
4
  Copyright(c) 1999 - 2016 Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  You should have received a copy of the GNU General Public License along with
  this program; if not, write to the Free Software Foundation, Inc.,
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24 25 26 27 28 29 30
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497

*******************************************************************************/

/* ethtool support for ixgbe */

31
#include <linux/interrupt.h>
32 33
#include <linux/types.h>
#include <linux/module.h>
34
#include <linux/slab.h>
35 36 37 38
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
39
#include <linux/highmem.h>
40 41 42
#include <linux/uaccess.h>

#include "ixgbe.h"
43
#include "ixgbe_phy.h"
44 45 46 47


#define IXGBE_ALL_RAR_ENTRIES 16

48 49
enum {NETDEV_STATS, IXGBE_STATS};

50 51
struct ixgbe_stats {
	char stat_string[ETH_GSTRING_LEN];
52
	int type;
53 54 55 56
	int sizeof_stat;
	int stat_offset;
};

57 58 59 60
#define IXGBE_STAT(m)		IXGBE_STATS, \
				sizeof(((struct ixgbe_adapter *)0)->m), \
				offsetof(struct ixgbe_adapter, m)
#define IXGBE_NETDEV_STAT(m)	NETDEV_STATS, \
E
Eric Dumazet 已提交
61 62
				sizeof(((struct rtnl_link_stats64 *)0)->m), \
				offsetof(struct rtnl_link_stats64, m)
63

64
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
E
Eric Dumazet 已提交
65 66 67 68
	{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
	{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
	{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
	{"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69 70 71 72
	{"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
	{"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
	{"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
	{"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73 74 75
	{"lsc_int", IXGBE_STAT(lsc_int)},
	{"tx_busy", IXGBE_STAT(tx_busy)},
	{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
E
Eric Dumazet 已提交
76 77 78 79 80
	{"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
	{"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
	{"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
	{"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
	{"multicast", IXGBE_NETDEV_STAT(multicast)},
81 82
	{"broadcast", IXGBE_STAT(stats.bprc)},
	{"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
E
Eric Dumazet 已提交
83 84 85 86
	{"collisions", IXGBE_NETDEV_STAT(collisions)},
	{"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
	{"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
	{"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87 88
	{"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
	{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89 90
	{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
	{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91
	{"fdir_overflow", IXGBE_STAT(fdir_overflow)},
E
Eric Dumazet 已提交
92 93 94 95 96 97
	{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
	{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
	{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
	{"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
	{"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
	{"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98 99 100 101 102 103 104 105 106 107 108
	{"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
	{"tx_restart_queue", IXGBE_STAT(restart_queue)},
	{"rx_long_length_errors", IXGBE_STAT(stats.roc)},
	{"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
	{"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
	{"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
	{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
	{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
	{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
	{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
	{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
109
	{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
110 111 112 113
	{"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
	{"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
	{"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
	{"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
114 115 116 117 118
#ifdef IXGBE_FCOE
	{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
	{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
	{"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
	{"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
119 120
	{"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
	{"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
121 122 123
	{"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
	{"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
#endif /* IXGBE_FCOE */
124 125
};

126 127 128 129 130 131 132 133 134
/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
 * we set the num_rx_queues to evaluate to num_tx_queues. This is
 * used because we do not have a good way to get the max number of
 * rx queues with CONFIG_RPS disabled.
 */
#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues

#define IXGBE_QUEUE_STATS_LEN ( \
	(netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
135
	(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
136
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
137
#define IXGBE_PB_STATS_LEN ( \
138 139 140 141 142
			(sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
			 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
			 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
			 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
			/ sizeof(u64))
143
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
144 145
			 IXGBE_PB_STATS_LEN + \
			 IXGBE_QUEUE_STATS_LEN)
146

147 148 149 150 151 152 153
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
	"Register test  (offline)", "Eeprom test    (offline)",
	"Interrupt test (offline)", "Loopback test  (offline)",
	"Link test   (on/offline)"
};
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/* currently supported speeds for 10G */
#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
			 SUPPORTED_10000baseKX4_Full | \
			 SUPPORTED_10000baseKR_Full)

#define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)

static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
{
	if (!ixgbe_isbackplane(hw->phy.media_type))
		return SUPPORTED_10000baseT_Full;

	switch (hw->device_id) {
	case IXGBE_DEV_ID_82598:
	case IXGBE_DEV_ID_82599_KX4:
	case IXGBE_DEV_ID_82599_KX4_MEZZ:
	case IXGBE_DEV_ID_X550EM_X_KX4:
		return SUPPORTED_10000baseKX4_Full;
	case IXGBE_DEV_ID_82598_BX:
	case IXGBE_DEV_ID_82599_KR:
	case IXGBE_DEV_ID_X550EM_X_KR:
		return SUPPORTED_10000baseKR_Full;
	default:
		return SUPPORTED_10000baseKX4_Full |
		       SUPPORTED_10000baseKR_Full;
	}
}

182
static int ixgbe_get_settings(struct net_device *netdev,
183
			      struct ethtool_cmd *ecmd)
184 185
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
186
	struct ixgbe_hw *hw = &adapter->hw;
187
	ixgbe_link_speed supported_link;
J
Josh Hay 已提交
188
	bool autoneg = false;
189

190
	hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
191

192 193
	/* set the supported link speeds */
	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
194
		ecmd->supported |= ixgbe_get_supported_10gtypes(hw);
195 196 197
	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
		ecmd->supported |= SUPPORTED_1000baseT_Full;
	if (supported_link & IXGBE_LINK_SPEED_100_FULL)
198 199 200
		ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
				   SUPPORTED_1000baseKX_Full :
				   SUPPORTED_1000baseT_Full;
201

202 203
	/* default advertised speed if phy.autoneg_advertised isn't set */
	ecmd->advertising = ecmd->supported;
204 205
	/* set the advertised speeds */
	if (hw->phy.autoneg_advertised) {
206
		ecmd->advertising = 0;
207 208 209
		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
			ecmd->advertising |= ADVERTISED_100baseT_Full;
		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
210 211 212 213 214 215 216
			ecmd->advertising |= ecmd->supported & ADVRTSD_MSK_10G;
		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
			if (ecmd->supported & SUPPORTED_1000baseKX_Full)
				ecmd->advertising |= ADVERTISED_1000baseKX_Full;
			else
				ecmd->advertising |= ADVERTISED_1000baseT_Full;
		}
217
	} else {
218 219 220 221
		if (hw->phy.multispeed_fiber && !autoneg) {
			if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
				ecmd->advertising = ADVERTISED_10000baseT_Full;
		}
222
	}
223

224 225 226 227 228 229 230 231 232 233
	if (autoneg) {
		ecmd->supported |= SUPPORTED_Autoneg;
		ecmd->advertising |= ADVERTISED_Autoneg;
		ecmd->autoneg = AUTONEG_ENABLE;
	} else
		ecmd->autoneg = AUTONEG_DISABLE;

	ecmd->transceiver = XCVR_EXTERNAL;

	/* Determine the remaining settings based on the PHY type. */
234 235
	switch (adapter->hw.phy.type) {
	case ixgbe_phy_tn:
236
	case ixgbe_phy_aq:
D
Don Skidmore 已提交
237
	case ixgbe_phy_x550em_ext_t:
238
	case ixgbe_phy_cu_unknown:
239 240
		ecmd->supported |= SUPPORTED_TP;
		ecmd->advertising |= ADVERTISED_TP;
241 242 243
		ecmd->port = PORT_TP;
		break;
	case ixgbe_phy_qt:
244 245
		ecmd->supported |= SUPPORTED_FIBRE;
		ecmd->advertising |= ADVERTISED_FIBRE;
246 247 248
		ecmd->port = PORT_FIBRE;
		break;
	case ixgbe_phy_nl:
249 250
	case ixgbe_phy_sfp_passive_tyco:
	case ixgbe_phy_sfp_passive_unknown:
251 252 253 254
	case ixgbe_phy_sfp_ftl:
	case ixgbe_phy_sfp_avago:
	case ixgbe_phy_sfp_intel:
	case ixgbe_phy_sfp_unknown:
255 256 257 258
	case ixgbe_phy_qsfp_passive_unknown:
	case ixgbe_phy_qsfp_active_unknown:
	case ixgbe_phy_qsfp_intel:
	case ixgbe_phy_qsfp_unknown:
259
		/* SFP+ devices, further checking needed */
260
		switch (adapter->hw.phy.sfp_type) {
261 262 263
		case ixgbe_sfp_type_da_cu:
		case ixgbe_sfp_type_da_cu_core0:
		case ixgbe_sfp_type_da_cu_core1:
264 265
			ecmd->supported |= SUPPORTED_FIBRE;
			ecmd->advertising |= ADVERTISED_FIBRE;
266 267 268 269 270 271
			ecmd->port = PORT_DA;
			break;
		case ixgbe_sfp_type_sr:
		case ixgbe_sfp_type_lr:
		case ixgbe_sfp_type_srlr_core0:
		case ixgbe_sfp_type_srlr_core1:
D
Don Skidmore 已提交
272 273 274 275
		case ixgbe_sfp_type_1g_sx_core0:
		case ixgbe_sfp_type_1g_sx_core1:
		case ixgbe_sfp_type_1g_lx_core0:
		case ixgbe_sfp_type_1g_lx_core1:
276 277
			ecmd->supported |= SUPPORTED_FIBRE;
			ecmd->advertising |= ADVERTISED_FIBRE;
278 279 280
			ecmd->port = PORT_FIBRE;
			break;
		case ixgbe_sfp_type_not_present:
281 282
			ecmd->supported |= SUPPORTED_FIBRE;
			ecmd->advertising |= ADVERTISED_FIBRE;
283 284
			ecmd->port = PORT_NONE;
			break;
285 286
		case ixgbe_sfp_type_1g_cu_core0:
		case ixgbe_sfp_type_1g_cu_core1:
287 288
			ecmd->supported |= SUPPORTED_TP;
			ecmd->advertising |= ADVERTISED_TP;
289
			ecmd->port = PORT_TP;
290
			break;
291 292
		case ixgbe_sfp_type_unknown:
		default:
293 294
			ecmd->supported |= SUPPORTED_FIBRE;
			ecmd->advertising |= ADVERTISED_FIBRE;
295 296 297 298 299
			ecmd->port = PORT_OTHER;
			break;
		}
		break;
	case ixgbe_phy_xaui:
300 301
		ecmd->supported |= SUPPORTED_FIBRE;
		ecmd->advertising |= ADVERTISED_FIBRE;
302 303 304 305 306 307
		ecmd->port = PORT_NONE;
		break;
	case ixgbe_phy_unknown:
	case ixgbe_phy_generic:
	case ixgbe_phy_sfp_unsupported:
	default:
308 309
		ecmd->supported |= SUPPORTED_FIBRE;
		ecmd->advertising |= ADVERTISED_FIBRE;
310 311 312 313
		ecmd->port = PORT_OTHER;
		break;
	}

314 315
	if (netif_carrier_ok(netdev)) {
		switch (adapter->link_speed) {
316
		case IXGBE_LINK_SPEED_10GB_FULL:
317
			ethtool_cmd_speed_set(ecmd, SPEED_10000);
318
			break;
319 320 321
		case IXGBE_LINK_SPEED_2_5GB_FULL:
			ethtool_cmd_speed_set(ecmd, SPEED_2500);
			break;
322
		case IXGBE_LINK_SPEED_1GB_FULL:
323
			ethtool_cmd_speed_set(ecmd, SPEED_1000);
324 325
			break;
		case IXGBE_LINK_SPEED_100_FULL:
326
			ethtool_cmd_speed_set(ecmd, SPEED_100);
327 328 329 330
			break;
		default:
			break;
		}
331 332
		ecmd->duplex = DUPLEX_FULL;
	} else {
333 334
		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
		ecmd->duplex = DUPLEX_UNKNOWN;
335 336 337 338 339 340
	}

	return 0;
}

static int ixgbe_set_settings(struct net_device *netdev,
341
			      struct ethtool_cmd *ecmd)
342 343
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
344
	struct ixgbe_hw *hw = &adapter->hw;
345
	u32 advertised, old;
346
	s32 err = 0;
347

348
	if ((hw->phy.media_type == ixgbe_media_type_copper) ||
349
	    (hw->phy.multispeed_fiber)) {
350 351 352 353 354 355 356
		/*
		 * this function does not support duplex forcing, but can
		 * limit the advertising of the adapter to the specified speed
		 */
		if (ecmd->advertising & ~ecmd->supported)
			return -EINVAL;

357 358 359 360 361 362 363 364
		/* only allow one speed at a time if no autoneg */
		if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
			if (ecmd->advertising ==
			    (ADVERTISED_10000baseT_Full |
			     ADVERTISED_1000baseT_Full))
				return -EINVAL;
		}

365 366 367 368 369 370 371 372
		old = hw->phy.autoneg_advertised;
		advertised = 0;
		if (ecmd->advertising & ADVERTISED_10000baseT_Full)
			advertised |= IXGBE_LINK_SPEED_10GB_FULL;

		if (ecmd->advertising & ADVERTISED_1000baseT_Full)
			advertised |= IXGBE_LINK_SPEED_1GB_FULL;

373 374 375
		if (ecmd->advertising & ADVERTISED_100baseT_Full)
			advertised |= IXGBE_LINK_SPEED_100_FULL;

376
		if (old == advertised)
377
			return err;
378
		/* this sets the link speed and restarts auto-neg */
379 380 381
		while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
			usleep_range(1000, 2000);

382
		hw->mac.autotry_restart = true;
J
Josh Hay 已提交
383
		err = hw->mac.ops.setup_link(hw, advertised, true);
384
		if (err) {
385
			e_info(probe, "setup link failed with code %d\n", err);
J
Josh Hay 已提交
386
			hw->mac.ops.setup_link(hw, old, true);
387
		}
388
		clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
389 390
	} else {
		/* in this case we currently only support 10Gb/FULL */
391
		u32 speed = ethtool_cmd_speed(ecmd);
392
		if ((ecmd->autoneg == AUTONEG_ENABLE) ||
393
		    (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
394
		    (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
395
			return -EINVAL;
396 397
	}

398
	return err;
399 400 401
}

static void ixgbe_get_pauseparam(struct net_device *netdev,
402
				 struct ethtool_pauseparam *pause)
403 404 405 406
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;

407 408
	if (ixgbe_device_supports_autoneg_fc(hw) &&
	    !hw->fc.disable_fc_autoneg)
D
Don Skidmore 已提交
409
		pause->autoneg = 1;
410 411
	else
		pause->autoneg = 0;
412

413
	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
414
		pause->rx_pause = 1;
415
	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
416
		pause->tx_pause = 1;
417
	} else if (hw->fc.current_mode == ixgbe_fc_full) {
418 419 420 421 422 423
		pause->rx_pause = 1;
		pause->tx_pause = 1;
	}
}

static int ixgbe_set_pauseparam(struct net_device *netdev,
424
				struct ethtool_pauseparam *pause)
425 426 427
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
428
	struct ixgbe_fc_info fc = hw->fc;
429

430 431 432
	/* 82598 does no support link flow control with DCB enabled */
	if ((hw->mac.type == ixgbe_mac_82598EB) &&
	    (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
433 434
		return -EINVAL;

435 436
	/* some devices do not support autoneg of link flow control */
	if ((pause->autoneg == AUTONEG_ENABLE) &&
437
	    !ixgbe_device_supports_autoneg_fc(hw))
438 439
		return -EINVAL;

440
	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
D
Don Skidmore 已提交
441

442
	if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
443
		fc.requested_mode = ixgbe_fc_full;
444
	else if (pause->rx_pause && !pause->tx_pause)
445
		fc.requested_mode = ixgbe_fc_rx_pause;
446
	else if (!pause->rx_pause && pause->tx_pause)
447
		fc.requested_mode = ixgbe_fc_tx_pause;
448
	else
449
		fc.requested_mode = ixgbe_fc_none;
450 451 452 453 454 455 456 457 458

	/* if the thing changed then we'll update and use new autoneg */
	if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
		hw->fc = fc;
		if (netif_running(netdev))
			ixgbe_reinit_locked(adapter);
		else
			ixgbe_reset(adapter);
	}
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476

	return 0;
}

static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	return adapter->msg_enable;
}

static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	adapter->msg_enable = data;
}

static int ixgbe_get_regs_len(struct net_device *netdev)
{
477
#define IXGBE_REGS_LEN  1139
478 479 480 481 482 483
	return IXGBE_REGS_LEN * sizeof(u32);
}

#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_

static void ixgbe_get_regs(struct net_device *netdev,
484
			   struct ethtool_regs *regs, void *p)
485 486 487 488 489 490 491 492
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
	u32 *regs_buff = p;
	u8 i;

	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));

493 494
	regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
			hw->device_id;
495 496 497 498 499 500 501 502 503 504 505 506

	/* General Registers */
	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);

	/* NVM Register */
507
	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
508
	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
509
	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
510 511 512 513 514 515
	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
516
	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
517 518

	/* Interrupt */
519 520 521
	/* don't read EICR because it can clear interrupt causes, instead
	 * read EICS which is a shadow but doesn't clear EICR */
	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
522 523 524 525 526 527 528 529 530
	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
531
	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
532 533 534 535
	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);

	/* Flow Control */
	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
536 537
	for (i = 0; i < 4; i++)
		regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
538 539 540 541 542 543 544
	for (i = 0; i < 8; i++) {
		switch (hw->mac.type) {
		case ixgbe_mac_82598EB:
			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
			break;
		case ixgbe_mac_82599EB:
545
		case ixgbe_mac_X540:
546 547
		case ixgbe_mac_X550:
		case ixgbe_mac_X550EM_x:
548
		case ixgbe_mac_x550em_a:
549 550 551 552 553 554 555
			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
			break;
		default:
			break;
		}
	}
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);

	/* Receive DMA */
	for (i = 0; i < 64; i++)
		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
	for (i = 0; i < 64; i++)
		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
	for (i = 0; i < 64; i++)
		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
	for (i = 0; i < 64; i++)
		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
	for (i = 0; i < 64; i++)
		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
	for (i = 0; i < 64; i++)
		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
	for (i = 0; i < 16; i++)
		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
	for (i = 0; i < 16; i++)
		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
	for (i = 0; i < 8; i++)
		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);

	/* Receive */
	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
	for (i = 0; i < 16; i++)
		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
	for (i = 0; i < 16; i++)
		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
589
	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
	for (i = 0; i < 8; i++)
		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
	for (i = 0; i < 8; i++)
		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);

	/* Transmit */
	for (i = 0; i < 32; i++)
		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
	for (i = 0; i < 32; i++)
		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
	for (i = 0; i < 32; i++)
		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
	for (i = 0; i < 32; i++)
		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
	for (i = 0; i < 32; i++)
		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
	for (i = 0; i < 32; i++)
		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
	for (i = 0; i < 32; i++)
		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
	for (i = 0; i < 32; i++)
		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
	for (i = 0; i < 16; i++)
		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
	for (i = 0; i < 8; i++)
		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);

	/* Wake Up */
	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
635
	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
636

637
	/* DCB */
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */

	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
		for (i = 0; i < 8; i++)
			regs_buff[833 + i] =
				IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
		for (i = 0; i < 8; i++)
			regs_buff[841 + i] =
				IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
		for (i = 0; i < 8; i++)
			regs_buff[849 + i] =
				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
		for (i = 0; i < 8; i++)
			regs_buff[857 + i] =
				IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
		break;
	case ixgbe_mac_82599EB:
	case ixgbe_mac_X540:
660 661
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
662
	case ixgbe_mac_x550em_a:
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
		regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
		regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
		for (i = 0; i < 8; i++)
			regs_buff[833 + i] =
				IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
		for (i = 0; i < 8; i++)
			regs_buff[841 + i] =
				IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
		for (i = 0; i < 8; i++)
			regs_buff[849 + i] =
				IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
		for (i = 0; i < 8; i++)
			regs_buff[857 + i] =
				IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
		break;
	default:
		break;
	}

682
	for (i = 0; i < 8; i++)
683 684
		regs_buff[865 + i] =
		IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
685
	for (i = 0; i < 8; i++)
686 687
		regs_buff[873 + i] =
		IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720

	/* Statistics */
	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
	for (i = 0; i < 8; i++)
		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
	for (i = 0; i < 8; i++)
		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
	for (i = 0; i < 8; i++)
		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
	for (i = 0; i < 8; i++)
		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
	for (i = 0; i < 8; i++)
		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
721 722 723 724
	regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
	regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
	regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
	regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
725 726 727 728 729 730 731 732 733
	for (i = 0; i < 8; i++)
		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
734 735
	regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
	regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
	for (i = 0; i < 16; i++)
		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
	for (i = 0; i < 16; i++)
		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
	for (i = 0; i < 16; i++)
		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
	for (i = 0; i < 16; i++)
		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);

	/* MAC */
	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);

	/* Diagnostic */
	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
	for (i = 0; i < 8; i++)
794
		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
795
	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
796 797
	for (i = 0; i < 4; i++)
		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
798 799 800
	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
	for (i = 0; i < 8; i++)
801
		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
802
	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
803 804
	for (i = 0; i < 4; i++)
		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
805 806
	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
807 808
	for (i = 0; i < 4; i++)
		regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
809
	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
810 811
	for (i = 0; i < 4; i++)
		regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
812
	for (i = 0; i < 8; i++)
813
		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
814 815 816 817 818 819 820 821 822
	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
E
Emil Tantilov 已提交
823 824 825

	/* 82599 X540 specific registers  */
	regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
826 827 828 829 830 831 832 833 834 835 836 837 838 839

	/* 82599 X540 specific DCB registers  */
	regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
	regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
	for (i = 0; i < 4; i++)
		regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
	regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
					/* same as RTTQCNRM */
	regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
					/* same as RTTQCNRR */

	/* X540 specific DCB registers  */
	regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
	regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
840 841 842 843 844 845 846 847 848
}

static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	return adapter->hw.eeprom.word_size * 2;
}

static int ixgbe_get_eeprom(struct net_device *netdev,
849
			    struct ethtool_eeprom *eeprom, u8 *bytes)
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
	u16 *eeprom_buff;
	int first_word, last_word, eeprom_len;
	int ret_val = 0;
	u16 i;

	if (eeprom->len == 0)
		return -EINVAL;

	eeprom->magic = hw->vendor_id | (hw->device_id << 16);

	first_word = eeprom->offset >> 1;
	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
	eeprom_len = last_word - first_word + 1;

	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
	if (!eeprom_buff)
		return -ENOMEM;

871 872
	ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
					     eeprom_buff);
873 874 875 876 877 878 879 880 881 882 883

	/* Device's eeprom is always little-endian, word addressable */
	for (i = 0; i < eeprom_len; i++)
		le16_to_cpus(&eeprom_buff[i]);

	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
	kfree(eeprom_buff);

	return ret_val;
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
static int ixgbe_set_eeprom(struct net_device *netdev,
			    struct ethtool_eeprom *eeprom, u8 *bytes)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
	u16 *eeprom_buff;
	void *ptr;
	int max_len, first_word, last_word, ret_val = 0;
	u16 i;

	if (eeprom->len == 0)
		return -EINVAL;

	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
		return -EINVAL;

	max_len = hw->eeprom.word_size * 2;

	first_word = eeprom->offset >> 1;
	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
	if (!eeprom_buff)
		return -ENOMEM;

	ptr = eeprom_buff;

	if (eeprom->offset & 1) {
		/*
		 * need read/modify/write of first changed EEPROM word
		 * only the second byte of the word is being modified
		 */
		ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
		if (ret_val)
			goto err;

		ptr++;
	}
	if ((eeprom->offset + eeprom->len) & 1) {
		/*
		 * need read/modify/write of last changed EEPROM word
		 * only the first byte of the word is being modified
		 */
		ret_val = hw->eeprom.ops.read(hw, last_word,
					  &eeprom_buff[last_word - first_word]);
		if (ret_val)
			goto err;
	}

	/* Device's eeprom is always little-endian, word addressable */
	for (i = 0; i < last_word - first_word + 1; i++)
		le16_to_cpus(&eeprom_buff[i]);

	memcpy(ptr, bytes, eeprom->len);

	for (i = 0; i < last_word - first_word + 1; i++)
		cpu_to_le16s(&eeprom_buff[i]);

	ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
					      last_word - first_word + 1,
					      eeprom_buff);

	/* Update the checksum */
	if (ret_val == 0)
		hw->eeprom.ops.update_checksum(hw);

err:
	kfree(eeprom_buff);
	return ret_val;
}

954
static void ixgbe_get_drvinfo(struct net_device *netdev,
955
			      struct ethtool_drvinfo *drvinfo)
956 957
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
958
	u32 nvm_track_id;
959

960 961 962
	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, ixgbe_driver_version,
		sizeof(drvinfo->version));
963

964 965
	nvm_track_id = (adapter->eeprom_verh << 16) |
			adapter->eeprom_verl;
966
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
967
		 nvm_track_id);
968

969 970
	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
		sizeof(drvinfo->bus_info));
971 972 973
}

static void ixgbe_get_ringparam(struct net_device *netdev,
974
				struct ethtool_ringparam *ring)
975 976
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
977 978
	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
	struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
979 980 981 982 983 984 985 986

	ring->rx_max_pending = IXGBE_MAX_RXD;
	ring->tx_max_pending = IXGBE_MAX_TXD;
	ring->rx_pending = rx_ring->count;
	ring->tx_pending = tx_ring->count;
}

static int ixgbe_set_ringparam(struct net_device *netdev,
987
			       struct ethtool_ringparam *ring)
988 989
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
990
	struct ixgbe_ring *temp_ring;
991
	int i, err = 0;
992
	u32 new_rx_count, new_tx_count;
993 994 995 996

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

997 998
	new_tx_count = clamp_t(u32, ring->tx_pending,
			       IXGBE_MIN_TXD, IXGBE_MAX_TXD);
999 1000
	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);

1001 1002 1003 1004 1005 1006
	new_rx_count = clamp_t(u32, ring->rx_pending,
			       IXGBE_MIN_RXD, IXGBE_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == adapter->tx_ring_count) &&
	    (new_rx_count == adapter->rx_ring_count)) {
1007 1008 1009 1010
		/* nothing to do */
		return 0;
	}

1011
	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1012
		usleep_range(1000, 2000);
1013

1014 1015
	if (!netif_running(adapter->netdev)) {
		for (i = 0; i < adapter->num_tx_queues; i++)
1016
			adapter->tx_ring[i]->count = new_tx_count;
1017
		for (i = 0; i < adapter->num_rx_queues; i++)
1018
			adapter->rx_ring[i]->count = new_rx_count;
1019 1020
		adapter->tx_ring_count = new_tx_count;
		adapter->rx_ring_count = new_rx_count;
1021
		goto clear_reset;
1022 1023
	}

1024 1025 1026 1027 1028
	/* allocate temporary buffer to store rings in */
	i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
	temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));

	if (!temp_ring) {
1029
		err = -ENOMEM;
1030
		goto clear_reset;
1031 1032
	}

1033 1034 1035 1036 1037 1038 1039 1040
	ixgbe_down(adapter);

	/*
	 * Setup new Tx resources and free the old Tx resources in that order.
	 * We can then assign the new resources to the rings via a memcpy.
	 * The advantage to this approach is that we are guaranteed to still
	 * have resources even in the case of an allocation failure.
	 */
1041
	if (new_tx_count != adapter->tx_ring_count) {
1042
		for (i = 0; i < adapter->num_tx_queues; i++) {
1043
			memcpy(&temp_ring[i], adapter->tx_ring[i],
1044
			       sizeof(struct ixgbe_ring));
1045 1046 1047

			temp_ring[i].count = new_tx_count;
			err = ixgbe_setup_tx_resources(&temp_ring[i]);
1048
			if (err) {
1049 1050
				while (i) {
					i--;
1051
					ixgbe_free_tx_resources(&temp_ring[i]);
1052
				}
1053
				goto err_setup;
1054 1055 1056
			}
		}

1057 1058 1059 1060 1061 1062 1063 1064
		for (i = 0; i < adapter->num_tx_queues; i++) {
			ixgbe_free_tx_resources(adapter->tx_ring[i]);

			memcpy(adapter->tx_ring[i], &temp_ring[i],
			       sizeof(struct ixgbe_ring));
		}

		adapter->tx_ring_count = new_tx_count;
1065
	}
1066

1067
	/* Repeat the process for the Rx rings if needed */
1068
	if (new_rx_count != adapter->rx_ring_count) {
1069
		for (i = 0; i < adapter->num_rx_queues; i++) {
1070
			memcpy(&temp_ring[i], adapter->rx_ring[i],
1071
			       sizeof(struct ixgbe_ring));
1072 1073 1074

			temp_ring[i].count = new_rx_count;
			err = ixgbe_setup_rx_resources(&temp_ring[i]);
1075
			if (err) {
1076 1077
				while (i) {
					i--;
1078
					ixgbe_free_rx_resources(&temp_ring[i]);
1079
				}
1080 1081
				goto err_setup;
			}
1082

1083
		}
1084

1085 1086
		for (i = 0; i < adapter->num_rx_queues; i++) {
			ixgbe_free_rx_resources(adapter->rx_ring[i]);
1087

1088 1089
			memcpy(adapter->rx_ring[i], &temp_ring[i],
			       sizeof(struct ixgbe_ring));
1090 1091
		}

1092
		adapter->rx_ring_count = new_rx_count;
1093
	}
1094

1095
err_setup:
1096 1097
	ixgbe_up(adapter);
	vfree(temp_ring);
1098
clear_reset:
1099
	clear_bit(__IXGBE_RESETTING, &adapter->state);
1100 1101 1102
	return err;
}

1103
static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1104
{
1105
	switch (sset) {
1106 1107
	case ETH_SS_TEST:
		return IXGBE_TEST_LEN;
1108 1109 1110 1111 1112
	case ETH_SS_STATS:
		return IXGBE_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
1113 1114 1115
}

static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1116
				    struct ethtool_stats *stats, u64 *data)
1117 1118
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
1119 1120
	struct rtnl_link_stats64 temp;
	const struct rtnl_link_stats64 *net_stats;
E
Eric Dumazet 已提交
1121 1122 1123
	unsigned int start;
	struct ixgbe_ring *ring;
	int i, j;
1124
	char *p = NULL;
1125 1126

	ixgbe_update_stats(adapter);
1127
	net_stats = dev_get_stats(netdev, &temp);
1128
	for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1129 1130
		switch (ixgbe_gstrings_stats[i].type) {
		case NETDEV_STATS:
1131
			p = (char *) net_stats +
1132 1133 1134 1135 1136 1137
					ixgbe_gstrings_stats[i].stat_offset;
			break;
		case IXGBE_STATS:
			p = (char *) adapter +
					ixgbe_gstrings_stats[i].stat_offset;
			break;
J
Josh Hay 已提交
1138 1139 1140
		default:
			data[i] = 0;
			continue;
1141 1142
		}

1143
		data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1144
			   sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1145
	}
1146
	for (j = 0; j < netdev->num_tx_queues; j++) {
E
Eric Dumazet 已提交
1147
		ring = adapter->tx_ring[j];
1148 1149 1150 1151
		if (!ring) {
			data[i] = 0;
			data[i+1] = 0;
			i += 2;
1152
#ifdef BP_EXTENDED_STATS
1153 1154 1155 1156 1157
			data[i] = 0;
			data[i+1] = 0;
			data[i+2] = 0;
			i += 3;
#endif
1158 1159 1160
			continue;
		}

E
Eric Dumazet 已提交
1161
		do {
1162
			start = u64_stats_fetch_begin_irq(&ring->syncp);
E
Eric Dumazet 已提交
1163 1164
			data[i]   = ring->stats.packets;
			data[i+1] = ring->stats.bytes;
1165
		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
E
Eric Dumazet 已提交
1166
		i += 2;
1167
#ifdef BP_EXTENDED_STATS
1168 1169 1170 1171 1172
		data[i] = ring->stats.yields;
		data[i+1] = ring->stats.misses;
		data[i+2] = ring->stats.cleaned;
		i += 3;
#endif
1173
	}
1174
	for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
E
Eric Dumazet 已提交
1175
		ring = adapter->rx_ring[j];
1176 1177 1178 1179
		if (!ring) {
			data[i] = 0;
			data[i+1] = 0;
			i += 2;
1180
#ifdef BP_EXTENDED_STATS
1181 1182 1183 1184 1185
			data[i] = 0;
			data[i+1] = 0;
			data[i+2] = 0;
			i += 3;
#endif
1186 1187 1188
			continue;
		}

E
Eric Dumazet 已提交
1189
		do {
1190
			start = u64_stats_fetch_begin_irq(&ring->syncp);
E
Eric Dumazet 已提交
1191 1192
			data[i]   = ring->stats.packets;
			data[i+1] = ring->stats.bytes;
1193
		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
E
Eric Dumazet 已提交
1194
		i += 2;
1195
#ifdef BP_EXTENDED_STATS
1196 1197 1198 1199 1200
		data[i] = ring->stats.yields;
		data[i+1] = ring->stats.misses;
		data[i+2] = ring->stats.cleaned;
		i += 3;
#endif
1201
	}
1202 1203 1204 1205 1206 1207 1208 1209

	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
		data[i++] = adapter->stats.pxontxc[j];
		data[i++] = adapter->stats.pxofftxc[j];
	}
	for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
		data[i++] = adapter->stats.pxonrxc[j];
		data[i++] = adapter->stats.pxoffrxc[j];
1210
	}
1211 1212 1213
}

static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1214
			      u8 *data)
1215
{
1216
	char *p = (char *)data;
1217 1218 1219
	int i;

	switch (stringset) {
1220
	case ETH_SS_TEST:
1221 1222 1223 1224
		for (i = 0; i < IXGBE_TEST_LEN; i++) {
			memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
1225
		break;
1226 1227 1228 1229 1230 1231
	case ETH_SS_STATS:
		for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
			memcpy(p, ixgbe_gstrings_stats[i].stat_string,
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
1232
		for (i = 0; i < netdev->num_tx_queues; i++) {
1233 1234 1235 1236
			sprintf(p, "tx_queue_%u_packets", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "tx_queue_%u_bytes", i);
			p += ETH_GSTRING_LEN;
1237 1238
#ifdef BP_EXTENDED_STATS
			sprintf(p, "tx_queue_%u_bp_napi_yield", i);
1239
			p += ETH_GSTRING_LEN;
1240
			sprintf(p, "tx_queue_%u_bp_misses", i);
1241
			p += ETH_GSTRING_LEN;
1242
			sprintf(p, "tx_queue_%u_bp_cleaned", i);
1243
			p += ETH_GSTRING_LEN;
1244
#endif /* BP_EXTENDED_STATS */
1245
		}
1246
		for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1247 1248 1249 1250
			sprintf(p, "rx_queue_%u_packets", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "rx_queue_%u_bytes", i);
			p += ETH_GSTRING_LEN;
1251 1252
#ifdef BP_EXTENDED_STATS
			sprintf(p, "rx_queue_%u_bp_poll_yield", i);
1253
			p += ETH_GSTRING_LEN;
1254
			sprintf(p, "rx_queue_%u_bp_misses", i);
1255
			p += ETH_GSTRING_LEN;
1256
			sprintf(p, "rx_queue_%u_bp_cleaned", i);
1257
			p += ETH_GSTRING_LEN;
1258
#endif /* BP_EXTENDED_STATS */
1259
		}
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
			sprintf(p, "tx_pb_%u_pxon", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "tx_pb_%u_pxoff", i);
			p += ETH_GSTRING_LEN;
		}
		for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
			sprintf(p, "rx_pb_%u_pxon", i);
			p += ETH_GSTRING_LEN;
			sprintf(p, "rx_pb_%u_pxoff", i);
			p += ETH_GSTRING_LEN;
1271
		}
1272
		/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1273 1274 1275 1276
		break;
	}
}

1277 1278 1279 1280 1281
static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
{
	struct ixgbe_hw *hw = &adapter->hw;
	bool link_up;
	u32 link_speed = 0;
1282 1283 1284 1285 1286

	if (ixgbe_removed(hw->hw_addr)) {
		*data = 1;
		return 1;
	}
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
	*data = 0;

	hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
	if (link_up)
		return *data;
	else
		*data = 1;
	return *data;
}

/* ethtool register test data */
struct ixgbe_reg_test {
	u16 reg;
	u8  array_len;
	u8  test_type;
	u32 mask;
	u32 write;
};

/* In the hardware, registers are laid out either singly, in arrays
 * spaced 0x40 bytes apart, or in contiguous tables.  We assume
 * most tests take place on arrays or single registers (handled
 * as a single-element array) and special-case the tables.
 * Table tests are always pattern tests.
 *
 * We also make provision for some required setup steps by specifying
 * registers to be written without any read-back testing.
 */

#define PATTERN_TEST	1
#define SET_READ_TEST	2
#define WRITE_NO_TEST	3
#define TABLE32_TEST	4
#define TABLE64_TEST_LO	5
#define TABLE64_TEST_HI	6

/* default 82599 register test */
1324
static const struct ixgbe_reg_test reg_test_82599[] = {
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1344
	{ .reg = 0 }
1345 1346 1347
};

/* default 82598 register test */
1348
static const struct ixgbe_reg_test reg_test_82598[] = {
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
	{ IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
	{ IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
	/* Enable all four RX queues before testing. */
	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
	/* RDH is read-only for 82598, only test RDT. */
	{ IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
	{ IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
	{ IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
	{ IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
	{ IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
	{ IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
	{ IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
	{ IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
	{ IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
	{ IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1372
	{ .reg = 0 }
1373 1374
};

E
Emil Tantilov 已提交
1375 1376 1377 1378 1379 1380 1381
static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
			     u32 mask, u32 write)
{
	u32 pat, val, before;
	static const u32 test_pattern[] = {
		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};

1382 1383
	if (ixgbe_removed(adapter->hw.hw_addr)) {
		*data = 1;
1384
		return true;
1385
	}
E
Emil Tantilov 已提交
1386
	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1387 1388 1389
		before = ixgbe_read_reg(&adapter->hw, reg);
		ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
		val = ixgbe_read_reg(&adapter->hw, reg);
E
Emil Tantilov 已提交
1390
		if (val != (test_pattern[pat] & write & mask)) {
1391
			e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
E
Emil Tantilov 已提交
1392 1393
			      reg, val, (test_pattern[pat] & write & mask));
			*data = reg;
1394 1395
			ixgbe_write_reg(&adapter->hw, reg, before);
			return true;
E
Emil Tantilov 已提交
1396
		}
1397
		ixgbe_write_reg(&adapter->hw, reg, before);
E
Emil Tantilov 已提交
1398
	}
1399
	return false;
1400 1401
}

E
Emil Tantilov 已提交
1402 1403 1404 1405
static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
			      u32 mask, u32 write)
{
	u32 val, before;
1406

1407 1408
	if (ixgbe_removed(adapter->hw.hw_addr)) {
		*data = 1;
1409
		return true;
1410
	}
1411 1412 1413
	before = ixgbe_read_reg(&adapter->hw, reg);
	ixgbe_write_reg(&adapter->hw, reg, write & mask);
	val = ixgbe_read_reg(&adapter->hw, reg);
E
Emil Tantilov 已提交
1414
	if ((write & mask) != (val & mask)) {
1415 1416
		e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
		      reg, (val & mask), (write & mask));
E
Emil Tantilov 已提交
1417
		*data = reg;
1418 1419
		ixgbe_write_reg(&adapter->hw, reg, before);
		return true;
E
Emil Tantilov 已提交
1420
	}
1421 1422
	ixgbe_write_reg(&adapter->hw, reg, before);
	return false;
1423 1424 1425 1426
}

static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
1427
	const struct ixgbe_reg_test *test;
1428 1429 1430
	u32 value, before, after;
	u32 i, toggle;

1431 1432 1433 1434 1435
	if (ixgbe_removed(adapter->hw.hw_addr)) {
		e_err(drv, "Adapter removed - register test blocked\n");
		*data = 1;
		return 1;
	}
1436 1437
	switch (adapter->hw.mac.type) {
	case ixgbe_mac_82598EB:
1438 1439
		toggle = 0x7FFFF3FF;
		test = reg_test_82598;
1440 1441
		break;
	case ixgbe_mac_82599EB:
D
Don Skidmore 已提交
1442
	case ixgbe_mac_X540:
1443 1444
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
1445
	case ixgbe_mac_x550em_a:
1446 1447 1448 1449 1450 1451
		toggle = 0x7FFFF30F;
		test = reg_test_82599;
		break;
	default:
		*data = 1;
		return 1;
1452 1453 1454 1455 1456 1457 1458 1459
	}

	/*
	 * Because the status register is such a special case,
	 * we handle it separately from the rest of the register
	 * tests.  Some bits are read-only, some toggle, and some
	 * are writeable on newer MACs.
	 */
1460 1461 1462 1463
	before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
	value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
	after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1464
	if (value != after) {
1465 1466
		e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
		      after, value);
1467 1468 1469 1470
		*data = 1;
		return 1;
	}
	/* restore previous status */
1471
	ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1472 1473 1474 1475 1476 1477 1478

	/*
	 * Perform the remainder of the register test, looping through
	 * the test table until we either fail or reach the null entry.
	 */
	while (test->reg) {
		for (i = 0; i < test->array_len; i++) {
1479 1480
			bool b = false;

1481 1482
			switch (test->test_type) {
			case PATTERN_TEST:
1483 1484 1485 1486
				b = reg_pattern_test(adapter, data,
						     test->reg + (i * 0x40),
						     test->mask,
						     test->write);
1487 1488
				break;
			case SET_READ_TEST:
1489 1490 1491 1492
				b = reg_set_and_check(adapter, data,
						      test->reg + (i * 0x40),
						      test->mask,
						      test->write);
1493 1494
				break;
			case WRITE_NO_TEST:
1495 1496 1497
				ixgbe_write_reg(&adapter->hw,
						test->reg + (i * 0x40),
						test->write);
1498 1499
				break;
			case TABLE32_TEST:
1500 1501 1502 1503
				b = reg_pattern_test(adapter, data,
						     test->reg + (i * 4),
						     test->mask,
						     test->write);
1504 1505
				break;
			case TABLE64_TEST_LO:
1506 1507 1508 1509
				b = reg_pattern_test(adapter, data,
						     test->reg + (i * 8),
						     test->mask,
						     test->write);
1510 1511
				break;
			case TABLE64_TEST_HI:
1512 1513 1514 1515
				b = reg_pattern_test(adapter, data,
						     (test->reg + 4) + (i * 8),
						     test->mask,
						     test->write);
1516 1517
				break;
			}
1518 1519
			if (b)
				return 1;
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
		}
		test++;
	}

	*data = 0;
	return 0;
}

static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
{
	struct ixgbe_hw *hw = &adapter->hw;
	if (hw->eeprom.ops.validate_checksum(hw, NULL))
		*data = 1;
	else
		*data = 0;
	return *data;
}

static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
	struct net_device *netdev = (struct net_device *) data;
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);

	return IRQ_HANDLED;
}

static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
{
	struct net_device *netdev = adapter->netdev;
	u32 mask, i = 0, shared_int = true;
	u32 irq = adapter->pdev->irq;

	*data = 0;

	/* Hook up test interrupt handler just for this test */
	if (adapter->msix_entries) {
		/* NOTE: we don't test MSI-X interrupts here, yet */
		return 0;
	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
		shared_int = false;
1562
		if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1563 1564 1565 1566
				netdev)) {
			*data = 1;
			return -1;
		}
1567
	} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1568
				netdev->name, netdev)) {
1569
		shared_int = false;
1570
	} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1571
			       netdev->name, netdev)) {
1572 1573 1574
		*data = 1;
		return -1;
	}
1575 1576
	e_info(hw, "testing %s interrupt\n", shared_int ?
	       "shared" : "unshared");
1577 1578 1579

	/* Disable all the interrupts */
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1580
	IXGBE_WRITE_FLUSH(&adapter->hw);
1581
	usleep_range(10000, 20000);
1582 1583 1584 1585

	/* Test each interrupt */
	for (; i < 10; i++) {
		/* Interrupt to test */
J
Jacob Keller 已提交
1586
		mask = BIT(i);
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597

		if (!shared_int) {
			/*
			 * Disable the interrupts to be reported in
			 * the cause register and then force the same
			 * interrupt and see if one gets posted.  If
			 * an interrupt was posted to the bus, the
			 * test failed.
			 */
			adapter->test_icr = 0;
			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1598
					~mask & 0x00007FFF);
1599
			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1600
					~mask & 0x00007FFF);
1601
			IXGBE_WRITE_FLUSH(&adapter->hw);
1602
			usleep_range(10000, 20000);
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618

			if (adapter->test_icr & mask) {
				*data = 3;
				break;
			}
		}

		/*
		 * Enable the interrupt to be reported in the cause
		 * register and then force the same interrupt and see
		 * if one gets posted.  If an interrupt was not posted
		 * to the bus, the test failed.
		 */
		adapter->test_icr = 0;
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1619
		IXGBE_WRITE_FLUSH(&adapter->hw);
1620
		usleep_range(10000, 20000);
1621

1622
		if (!(adapter->test_icr & mask)) {
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
			*data = 4;
			break;
		}

		if (!shared_int) {
			/*
			 * Disable the other interrupts to be reported in
			 * the cause register and then force the other
			 * interrupts and see if any get posted.  If
			 * an interrupt was posted to the bus, the
			 * test failed.
			 */
			adapter->test_icr = 0;
			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1637
					~mask & 0x00007FFF);
1638
			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1639
					~mask & 0x00007FFF);
1640
			IXGBE_WRITE_FLUSH(&adapter->hw);
1641
			usleep_range(10000, 20000);
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651

			if (adapter->test_icr) {
				*data = 5;
				break;
			}
		}
	}

	/* Disable all the interrupts */
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1652
	IXGBE_WRITE_FLUSH(&adapter->hw);
1653
	usleep_range(10000, 20000);
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670

	/* Unhook test interrupt handler */
	free_irq(irq, netdev);

	return *data;
}

static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
{
	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
	struct ixgbe_hw *hw = &adapter->hw;
	u32 reg_ctl;

	/* shut down the DMA engines now so they can be reinitialized later */

	/* first Rx */
1671
	hw->mac.ops.disable_rx(hw);
1672
	ixgbe_disable_rx_queue(adapter, rx_ring);
1673 1674

	/* now Tx */
1675
	reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1676
	reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1677 1678
	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);

1679 1680
	switch (hw->mac.type) {
	case ixgbe_mac_82599EB:
D
Don Skidmore 已提交
1681
	case ixgbe_mac_X540:
1682 1683
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
1684
	case ixgbe_mac_x550em_a:
1685 1686 1687
		reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
		reg_ctl &= ~IXGBE_DMATXCTL_TE;
		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1688 1689 1690
		break;
	default:
		break;
1691 1692 1693 1694
	}

	ixgbe_reset(adapter);

1695 1696
	ixgbe_free_tx_resources(&adapter->test_tx_ring);
	ixgbe_free_rx_resources(&adapter->test_rx_ring);
1697 1698 1699 1700 1701 1702
}

static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
{
	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1703
	struct ixgbe_hw *hw = &adapter->hw;
1704
	u32 rctl, reg_data;
1705 1706
	int ret_val;
	int err;
1707 1708

	/* Setup Tx descriptor ring and Tx buffers */
1709 1710
	tx_ring->count = IXGBE_DEFAULT_TXD;
	tx_ring->queue_index = 0;
1711
	tx_ring->dev = &adapter->pdev->dev;
1712
	tx_ring->netdev = adapter->netdev;
1713
	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1714

1715
	err = ixgbe_setup_tx_resources(tx_ring);
1716 1717
	if (err)
		return 1;
1718

1719 1720
	switch (adapter->hw.mac.type) {
	case ixgbe_mac_82599EB:
D
Don Skidmore 已提交
1721
	case ixgbe_mac_X540:
1722 1723
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
1724
	case ixgbe_mac_x550em_a:
1725 1726 1727
		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
		reg_data |= IXGBE_DMATXCTL_TE;
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1728 1729 1730
		break;
	default:
		break;
1731
	}
1732

1733
	ixgbe_configure_tx_ring(adapter, tx_ring);
1734 1735

	/* Setup Rx Descriptor ring and Rx buffers */
1736 1737
	rx_ring->count = IXGBE_DEFAULT_RXD;
	rx_ring->queue_index = 0;
1738
	rx_ring->dev = &adapter->pdev->dev;
1739
	rx_ring->netdev = adapter->netdev;
1740 1741
	rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;

1742
	err = ixgbe_setup_rx_resources(rx_ring);
1743
	if (err) {
1744 1745 1746 1747
		ret_val = 4;
		goto err_nomem;
	}

1748
	hw->mac.ops.disable_rx(hw);
1749

1750
	ixgbe_configure_rx_ring(adapter, rx_ring);
1751

1752 1753
	rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
	rctl |= IXGBE_RXCTRL_DMBYPS;
1754 1755
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);

1756 1757
	hw->mac.ops.enable_rx(hw);

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
	return 0;

err_nomem:
	ixgbe_free_desc_rings(adapter);
	return ret_val;
}

static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	u32 reg_data;

1770

1771
	/* Setup MAC loopback */
1772
	reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1773
	reg_data |= IXGBE_HLREG0_LPBK;
1774
	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1775

1776
	reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1777
	reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1778
	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1779

1780 1781 1782 1783 1784
	/* X540 and X550 needs to set the MACC.FLU bit to force link up */
	switch (adapter->hw.mac.type) {
	case ixgbe_mac_X540:
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
1785
	case ixgbe_mac_x550em_a:
1786 1787 1788
		reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
		reg_data |= IXGBE_MACC_FLU;
		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1789 1790
		break;
	default:
1791 1792 1793 1794 1795 1796 1797
		if (hw->mac.orig_autoc) {
			reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
		} else {
			return 10;
		}
	}
1798
	IXGBE_WRITE_FLUSH(hw);
1799
	usleep_range(10000, 20000);
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834

	/* Disable Atlas Tx lanes; re-enabled in reset path */
	if (hw->mac.type == ixgbe_mac_82598EB) {
		u8 atlas;

		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
		atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);

		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
		atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);

		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
		atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);

		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
		atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
	}

	return 0;
}

static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
{
	u32 reg_data;

	reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
	reg_data &= ~IXGBE_HLREG0_LPBK;
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
}

static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1835
				      unsigned int frame_size)
1836 1837
{
	memset(skb->data, 0xFF, frame_size);
1838 1839 1840 1841
	frame_size >>= 1;
	memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
	memset(&skb->data[frame_size + 10], 0xBE, 1);
	memset(&skb->data[frame_size + 12], 0xAF, 1);
1842 1843
}

1844 1845
static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
				     unsigned int frame_size)
1846
{
1847 1848 1849 1850 1851
	unsigned char *data;
	bool match = true;

	frame_size >>= 1;

1852
	data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1853 1854 1855 1856 1857 1858

	if (data[3] != 0xFF ||
	    data[frame_size + 10] != 0xBE ||
	    data[frame_size + 12] != 0xAF)
		match = false;

1859 1860
	kunmap(rx_buffer->page);

1861
	return match;
1862 1863
}

1864
static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1865 1866
				  struct ixgbe_ring *tx_ring,
				  unsigned int size)
1867 1868
{
	union ixgbe_adv_rx_desc *rx_desc;
1869 1870
	struct ixgbe_rx_buffer *rx_buffer;
	struct ixgbe_tx_buffer *tx_buffer;
1871 1872 1873 1874 1875
	u16 rx_ntc, tx_ntc, count = 0;

	/* initialize next to clean and descriptor values */
	rx_ntc = rx_ring->next_to_clean;
	tx_ntc = tx_ring->next_to_clean;
1876
	rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1877

1878
	while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1879
		/* check Rx buffer */
1880
		rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1881

1882 1883 1884 1885 1886
		/* sync Rx buffer for CPU read */
		dma_sync_single_for_cpu(rx_ring->dev,
					rx_buffer->dma,
					ixgbe_rx_bufsz(rx_ring),
					DMA_FROM_DEVICE);
1887 1888

		/* verify contents of skb */
1889
		if (ixgbe_check_lbtest_frame(rx_buffer, size))
1890 1891
			count++;

1892 1893 1894 1895 1896 1897
		/* sync Rx buffer for device write */
		dma_sync_single_for_device(rx_ring->dev,
					   rx_buffer->dma,
					   ixgbe_rx_bufsz(rx_ring),
					   DMA_FROM_DEVICE);

1898
		/* unmap buffer on Tx side */
1899 1900
		tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910

		/* increment Rx/Tx next to clean counters */
		rx_ntc++;
		if (rx_ntc == rx_ring->count)
			rx_ntc = 0;
		tx_ntc++;
		if (tx_ntc == tx_ring->count)
			tx_ntc = 0;

		/* fetch next descriptor */
1911
		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1912 1913
	}

1914 1915
	netdev_tx_reset_queue(txring_txq(tx_ring));

1916
	/* re-map buffers to ring, store next to clean values */
1917
	ixgbe_alloc_rx_buffers(rx_ring, count);
1918 1919 1920 1921 1922 1923
	rx_ring->next_to_clean = rx_ntc;
	tx_ring->next_to_clean = tx_ntc;

	return count;
}

1924 1925 1926 1927
static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
{
	struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
	struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1928 1929 1930 1931
	int i, j, lc, good_cnt, ret_val = 0;
	unsigned int size = 1024;
	netdev_tx_t tx_ret_val;
	struct sk_buff *skb;
1932 1933 1934 1935
	u32 flags_orig = adapter->flags;

	/* DCB can modify the frames on Tx */
	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1936 1937 1938 1939 1940

	/* allocate test skb */
	skb = alloc_skb(size, GFP_KERNEL);
	if (!skb)
		return 11;
1941

1942 1943 1944
	/* place data into test skb */
	ixgbe_create_lbtest_frame(skb, size);
	skb_put(skb, size);
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957

	/*
	 * Calculate the loop count based on the largest descriptor ring
	 * The idea is to wrap the largest ring a number of times using 64
	 * send/receive pairs during each loop
	 */

	if (rx_ring->count <= tx_ring->count)
		lc = ((tx_ring->count / 64) * 2) + 1;
	else
		lc = ((rx_ring->count / 64) * 2) + 1;

	for (j = 0; j <= lc; j++) {
1958
		/* reset count of good packets */
1959
		good_cnt = 0;
1960 1961 1962 1963 1964 1965 1966 1967

		/* place 64 packets on the transmit queue*/
		for (i = 0; i < 64; i++) {
			skb_get(skb);
			tx_ret_val = ixgbe_xmit_frame_ring(skb,
							   adapter,
							   tx_ring);
			if (tx_ret_val == NETDEV_TX_OK)
1968
				good_cnt++;
1969 1970
		}

1971
		if (good_cnt != 64) {
1972
			ret_val = 12;
1973 1974
			break;
		}
1975 1976 1977 1978

		/* allow 200 milliseconds for packets to go from Tx to Rx */
		msleep(200);

1979
		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1980 1981
		if (good_cnt != 64) {
			ret_val = 13;
1982 1983 1984 1985
			break;
		}
	}

1986 1987
	/* free the original skb */
	kfree_skb(skb);
1988
	adapter->flags = flags_orig;
1989

1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	return ret_val;
}

static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
{
	*data = ixgbe_setup_desc_rings(adapter);
	if (*data)
		goto out;
	*data = ixgbe_setup_loopback_test(adapter);
	if (*data)
		goto err_loopback;
	*data = ixgbe_run_loopback_test(adapter);
	ixgbe_loopback_cleanup(adapter);

err_loopback:
	ixgbe_free_desc_rings(adapter);
out:
	return *data;
}

static void ixgbe_diag_test(struct net_device *netdev,
2011
			    struct ethtool_test *eth_test, u64 *data)
2012 2013 2014 2015
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	bool if_running = netif_running(netdev);

2016 2017 2018 2019 2020 2021
	if (ixgbe_removed(adapter->hw.hw_addr)) {
		e_err(hw, "Adapter removed - test blocked\n");
		data[0] = 1;
		data[1] = 1;
		data[2] = 1;
		data[3] = 1;
2022
		data[4] = 1;
2023 2024 2025
		eth_test->flags |= ETH_TEST_FL_FAILED;
		return;
	}
2026 2027
	set_bit(__IXGBE_TESTING, &adapter->state);
	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2028 2029
		struct ixgbe_hw *hw = &adapter->hw;

2030 2031 2032 2033
		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
			int i;
			for (i = 0; i < adapter->num_vfs; i++) {
				if (adapter->vfinfo[i].clear_to_send) {
2034
					netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2035 2036 2037 2038
					data[0] = 1;
					data[1] = 1;
					data[2] = 1;
					data[3] = 1;
2039
					data[4] = 1;
2040 2041 2042 2043 2044 2045 2046 2047
					eth_test->flags |= ETH_TEST_FL_FAILED;
					clear_bit(__IXGBE_TESTING,
						  &adapter->state);
					goto skip_ol_tests;
				}
			}
		}

2048 2049 2050 2051 2052 2053 2054 2055 2056
		/* Offline tests */
		e_info(hw, "offline testing starting\n");

		/* Link test performed before hardware reset so autoneg doesn't
		 * interfere with test result
		 */
		if (ixgbe_link_test(adapter, &data[4]))
			eth_test->flags |= ETH_TEST_FL_FAILED;

2057 2058
		if (if_running)
			/* indicate we're in test mode */
2059
			ixgbe_close(netdev);
2060 2061 2062
		else
			ixgbe_reset(adapter);

2063
		e_info(hw, "register testing starting\n");
2064 2065 2066 2067
		if (ixgbe_reg_test(adapter, &data[0]))
			eth_test->flags |= ETH_TEST_FL_FAILED;

		ixgbe_reset(adapter);
2068
		e_info(hw, "eeprom testing starting\n");
2069 2070 2071 2072
		if (ixgbe_eeprom_test(adapter, &data[1]))
			eth_test->flags |= ETH_TEST_FL_FAILED;

		ixgbe_reset(adapter);
2073
		e_info(hw, "interrupt testing starting\n");
2074 2075 2076
		if (ixgbe_intr_test(adapter, &data[2]))
			eth_test->flags |= ETH_TEST_FL_FAILED;

2077 2078 2079 2080
		/* If SRIOV or VMDq is enabled then skip MAC
		 * loopback diagnostic. */
		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
				      IXGBE_FLAG_VMDQ_ENABLED)) {
2081
			e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2082 2083 2084 2085
			data[3] = 0;
			goto skip_loopback;
		}

2086
		ixgbe_reset(adapter);
2087
		e_info(hw, "loopback testing starting\n");
2088 2089 2090
		if (ixgbe_loopback_test(adapter, &data[3]))
			eth_test->flags |= ETH_TEST_FL_FAILED;

2091
skip_loopback:
2092 2093
		ixgbe_reset(adapter);

2094
		/* clear testing bit and return adapter to previous state */
2095 2096
		clear_bit(__IXGBE_TESTING, &adapter->state);
		if (if_running)
2097
			ixgbe_open(netdev);
2098 2099
		else if (hw->mac.ops.disable_tx_laser)
			hw->mac.ops.disable_tx_laser(hw);
2100
	} else {
2101
		e_info(hw, "online testing starting\n");
2102

2103 2104 2105 2106
		/* Online tests */
		if (ixgbe_link_test(adapter, &data[4]))
			eth_test->flags |= ETH_TEST_FL_FAILED;

2107
		/* Offline tests aren't run; pass by default */
2108 2109 2110 2111 2112 2113 2114
		data[0] = 0;
		data[1] = 0;
		data[2] = 0;
		data[3] = 0;

		clear_bit(__IXGBE_TESTING, &adapter->state);
	}
2115

2116
skip_ol_tests:
2117 2118
	msleep_interruptible(4 * 1000);
}
2119

2120
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2121
			       struct ethtool_wolinfo *wol)
2122 2123
{
	struct ixgbe_hw *hw = &adapter->hw;
2124
	int retval = 0;
E
Emil Tantilov 已提交
2125

2126 2127 2128 2129
	/* WOL not supported for all devices */
	if (!ixgbe_wol_supported(adapter, hw->device_id,
				 hw->subsystem_device_id)) {
		retval = 1;
2130 2131 2132 2133 2134 2135
		wol->supported = 0;
	}

	return retval;
}

2136
static void ixgbe_get_wol(struct net_device *netdev,
2137
			  struct ethtool_wolinfo *wol)
2138
{
2139 2140 2141
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	wol->supported = WAKE_UCAST | WAKE_MCAST |
2142
			 WAKE_BCAST | WAKE_MAGIC;
2143 2144
	wol->wolopts = 0;

2145 2146
	if (ixgbe_wol_exclusion(adapter, wol) ||
	    !device_can_wakeup(&adapter->pdev->dev))
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
		return;

	if (adapter->wol & IXGBE_WUFC_EX)
		wol->wolopts |= WAKE_UCAST;
	if (adapter->wol & IXGBE_WUFC_MC)
		wol->wolopts |= WAKE_MCAST;
	if (adapter->wol & IXGBE_WUFC_BC)
		wol->wolopts |= WAKE_BCAST;
	if (adapter->wol & IXGBE_WUFC_MAG)
		wol->wolopts |= WAKE_MAGIC;
2157 2158
}

2159 2160 2161 2162 2163 2164 2165
static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
		return -EOPNOTSUPP;

2166 2167 2168
	if (ixgbe_wol_exclusion(adapter, wol))
		return wol->wolopts ? -EOPNOTSUPP : 0;

2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
	adapter->wol = 0;

	if (wol->wolopts & WAKE_UCAST)
		adapter->wol |= IXGBE_WUFC_EX;
	if (wol->wolopts & WAKE_MCAST)
		adapter->wol |= IXGBE_WUFC_MC;
	if (wol->wolopts & WAKE_BCAST)
		adapter->wol |= IXGBE_WUFC_BC;
	if (wol->wolopts & WAKE_MAGIC)
		adapter->wol |= IXGBE_WUFC_MAG;

	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);

	return 0;
}

2185 2186 2187 2188
static int ixgbe_nway_reset(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

2189 2190
	if (netif_running(netdev))
		ixgbe_reinit_locked(adapter);
2191 2192 2193 2194

	return 0;
}

2195 2196
static int ixgbe_set_phys_id(struct net_device *netdev,
			     enum ethtool_phys_id_state state)
2197 2198
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2199
	struct ixgbe_hw *hw = &adapter->hw;
2200

2201 2202 2203 2204
	switch (state) {
	case ETHTOOL_ID_ACTIVE:
		adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
		return 2;
2205

2206
	case ETHTOOL_ID_ON:
2207
		hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2208 2209 2210
		break;

	case ETHTOOL_ID_OFF:
2211
		hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2212
		break;
2213

2214 2215 2216 2217 2218
	case ETHTOOL_ID_INACTIVE:
		/* Restore LED settings */
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
		break;
	}
2219 2220 2221 2222 2223

	return 0;
}

static int ixgbe_get_coalesce(struct net_device *netdev,
2224
			      struct ethtool_coalesce *ec)
2225 2226 2227
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

2228
	/* only valid if in constant ITR mode */
2229 2230 2231 2232
	if (adapter->rx_itr_setting <= 1)
		ec->rx_coalesce_usecs = adapter->rx_itr_setting;
	else
		ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2233

2234
	/* if in mixed tx/rx queues per vector mode, report only rx settings */
2235
	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2236 2237
		return 0;

2238
	/* only valid if in constant ITR mode */
2239 2240 2241 2242
	if (adapter->tx_itr_setting <= 1)
		ec->tx_coalesce_usecs = adapter->tx_itr_setting;
	else
		ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2243

2244 2245 2246
	return 0;
}

2247 2248 2249 2250
/*
 * this function must be called before setting the new value of
 * rx_itr_setting
 */
2251
static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2252 2253 2254
{
	struct net_device *netdev = adapter->netdev;

2255 2256 2257
	/* nothing to do if LRO or RSC are not enabled */
	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
	    !(netdev->features & NETIF_F_LRO))
2258 2259
		return false;

2260 2261 2262 2263
	/* check the feature flag value and enable RSC if necessary */
	if (adapter->rx_itr_setting == 1 ||
	    adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
		if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2264
			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2265
			e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2266 2267
			return true;
		}
2268 2269 2270 2271 2272
	/* if interrupt rate is too high then disable RSC */
	} else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
		e_info(probe, "rx-usecs set too low, disabling RSC\n");
		return true;
2273 2274 2275 2276
	}
	return false;
}

2277
static int ixgbe_set_coalesce(struct net_device *netdev,
2278
			      struct ethtool_coalesce *ec)
2279 2280
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
2281
	struct ixgbe_q_vector *q_vector;
2282
	int i;
E
Emil Tantilov 已提交
2283
	u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2284
	bool need_reset = false;
2285

E
Emil Tantilov 已提交
2286 2287 2288 2289 2290 2291 2292 2293
	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
		/* reject Tx specific changes in case of mixed RxTx vectors */
		if (ec->tx_coalesce_usecs)
			return -EINVAL;
		tx_itr_prev = adapter->rx_itr_setting;
	} else {
		tx_itr_prev = adapter->tx_itr_setting;
	}
2294

2295 2296 2297
	if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
	    (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
		return -EINVAL;
2298

2299 2300 2301 2302
	if (ec->rx_coalesce_usecs > 1)
		adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
	else
		adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2303

2304 2305 2306 2307
	if (adapter->rx_itr_setting == 1)
		rx_itr_param = IXGBE_20K_ITR;
	else
		rx_itr_param = adapter->rx_itr_setting;
2308

2309 2310 2311 2312
	if (ec->tx_coalesce_usecs > 1)
		adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
	else
		adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2313

2314
	if (adapter->tx_itr_setting == 1)
2315
		tx_itr_param = IXGBE_12K_ITR;
2316 2317
	else
		tx_itr_param = adapter->tx_itr_setting;
2318

E
Emil Tantilov 已提交
2319 2320 2321 2322 2323
	/* mixed Rx/Tx */
	if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
		adapter->tx_itr_setting = adapter->rx_itr_setting;

	/* detect ITR changes that require update of TXDCTL.WTHRESH */
2324
	if ((adapter->tx_itr_setting != 1) &&
E
Emil Tantilov 已提交
2325 2326
	    (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
		if ((tx_itr_prev == 1) ||
2327
		    (tx_itr_prev >= IXGBE_100K_ITR))
E
Emil Tantilov 已提交
2328 2329
			need_reset = true;
	} else {
2330
		if ((tx_itr_prev != 1) &&
E
Emil Tantilov 已提交
2331 2332 2333
		    (tx_itr_prev < IXGBE_100K_ITR))
			need_reset = true;
	}
2334

2335
	/* check the old value and enable RSC if necessary */
E
Emil Tantilov 已提交
2336
	need_reset |= ixgbe_update_rsc(adapter);
2337

2338
	for (i = 0; i < adapter->num_q_vectors; i++) {
2339 2340 2341 2342 2343 2344 2345
		q_vector = adapter->q_vector[i];
		if (q_vector->tx.count && !q_vector->rx.count)
			/* tx only */
			q_vector->itr = tx_itr_param;
		else
			/* rx only or mixed */
			q_vector->itr = rx_itr_param;
2346
		ixgbe_write_eitr(q_vector);
2347 2348
	}

2349 2350 2351 2352 2353
	/*
	 * do reset here at the end to make sure EITR==0 case is handled
	 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
	 * also locks in RSC enable/disable which requires reset
	 */
2354 2355
	if (need_reset)
		ixgbe_do_reset(netdev);
2356

2357 2358 2359
	return 0;
}

2360 2361 2362 2363 2364 2365
static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
					struct ethtool_rxnfc *cmd)
{
	union ixgbe_atr_input *mask = &adapter->fdir_mask;
	struct ethtool_rx_flow_spec *fsp =
		(struct ethtool_rx_flow_spec *)&cmd->fs;
2366
	struct hlist_node *node2;
2367 2368 2369 2370 2371
	struct ixgbe_fdir_filter *rule = NULL;

	/* report total rule count */
	cmd->data = (1024 << adapter->fdir_pballoc) - 2;

2372
	hlist_for_each_entry_safe(rule, node2,
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
				  &adapter->fdir_filter_list, fdir_node) {
		if (fsp->location <= rule->sw_idx)
			break;
	}

	if (!rule || fsp->location != rule->sw_idx)
		return -EINVAL;

	/* fill out the flow spec entry */

	/* set flow type field */
	switch (rule->filter.formatted.flow_type) {
	case IXGBE_ATR_FLOW_TYPE_TCPV4:
		fsp->flow_type = TCP_V4_FLOW;
		break;
	case IXGBE_ATR_FLOW_TYPE_UDPV4:
		fsp->flow_type = UDP_V4_FLOW;
		break;
	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
		fsp->flow_type = SCTP_V4_FLOW;
		break;
	case IXGBE_ATR_FLOW_TYPE_IPV4:
		fsp->flow_type = IP_USER_FLOW;
		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
		fsp->h_u.usr_ip4_spec.proto = 0;
		fsp->m_u.usr_ip4_spec.proto = 0;
		break;
	default:
		return -EINVAL;
	}

	fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
	fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
	fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
	fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
	fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
	fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
	fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
	fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
	fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
	fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
	fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
	fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
	fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
	fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
	fsp->flow_type |= FLOW_EXT;

	/* record action */
	if (rule->action == IXGBE_FDIR_DROP_QUEUE)
		fsp->ring_cookie = RX_CLS_FLOW_DISC;
	else
		fsp->ring_cookie = rule->action;

	return 0;
}

static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
				      struct ethtool_rxnfc *cmd,
				      u32 *rule_locs)
{
2433
	struct hlist_node *node2;
2434 2435 2436 2437 2438 2439
	struct ixgbe_fdir_filter *rule;
	int cnt = 0;

	/* report total rule count */
	cmd->data = (1024 << adapter->fdir_pballoc) - 2;

2440
	hlist_for_each_entry_safe(rule, node2,
2441 2442 2443 2444 2445 2446 2447
				  &adapter->fdir_filter_list, fdir_node) {
		if (cnt == cmd->rule_cnt)
			return -EMSGSIZE;
		rule_locs[cnt] = rule->sw_idx;
		cnt++;
	}

2448 2449
	cmd->rule_cnt = cnt;

2450 2451 2452
	return 0;
}

2453 2454 2455 2456 2457 2458 2459 2460 2461
static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
				   struct ethtool_rxnfc *cmd)
{
	cmd->data = 0;

	/* Report default options for RSS on ixgbe */
	switch (cmd->flow_type) {
	case TCP_V4_FLOW:
		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2462
		/* fallthrough */
2463 2464 2465
	case UDP_V4_FLOW:
		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2466
		/* fallthrough */
2467 2468 2469 2470 2471 2472 2473 2474 2475
	case SCTP_V4_FLOW:
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case IPV4_FLOW:
		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
		break;
	case TCP_V6_FLOW:
		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2476
		/* fallthrough */
2477 2478 2479
	case UDP_V6_FLOW:
		if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2480
		/* fallthrough */
2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
	case SCTP_V6_FLOW:
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case IPV6_FLOW:
		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

2495
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2496
			   u32 *rule_locs)
2497 2498 2499 2500 2501 2502 2503 2504 2505
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	int ret = -EOPNOTSUPP;

	switch (cmd->cmd) {
	case ETHTOOL_GRXRINGS:
		cmd->data = adapter->num_rx_queues;
		ret = 0;
		break;
2506 2507 2508 2509 2510 2511 2512 2513
	case ETHTOOL_GRXCLSRLCNT:
		cmd->rule_cnt = adapter->fdir_filter_count;
		ret = 0;
		break;
	case ETHTOOL_GRXCLSRULE:
		ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
		break;
	case ETHTOOL_GRXCLSRLALL:
2514
		ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2515
		break;
2516 2517 2518
	case ETHTOOL_GRXFH:
		ret = ixgbe_get_rss_hash_opts(adapter, cmd);
		break;
2519 2520 2521 2522 2523 2524 2525
	default:
		break;
	}

	return ret;
}

2526 2527 2528
int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
				    struct ixgbe_fdir_filter *input,
				    u16 sw_idx)
2529 2530
{
	struct ixgbe_hw *hw = &adapter->hw;
2531 2532
	struct hlist_node *node2;
	struct ixgbe_fdir_filter *rule, *parent;
2533 2534 2535 2536 2537
	int err = -EINVAL;

	parent = NULL;
	rule = NULL;

2538
	hlist_for_each_entry_safe(rule, node2,
2539 2540 2541 2542
				  &adapter->fdir_filter_list, fdir_node) {
		/* hash found, or no matching entry */
		if (rule->sw_idx >= sw_idx)
			break;
2543
		parent = rule;
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
	}

	/* if there is an old rule occupying our place remove it */
	if (rule && (rule->sw_idx == sw_idx)) {
		if (!input || (rule->filter.formatted.bkt_hash !=
			       input->filter.formatted.bkt_hash)) {
			err = ixgbe_fdir_erase_perfect_filter_82599(hw,
								&rule->filter,
								sw_idx);
		}

		hlist_del(&rule->fdir_node);
		kfree(rule);
		adapter->fdir_filter_count--;
	}

	/*
	 * If no input this was a delete, err should be 0 if a rule was
	 * successfully found and removed from the list else -EINVAL
	 */
	if (!input)
		return err;

	/* initialize node and set software index */
	INIT_HLIST_NODE(&input->fdir_node);

	/* add filter to the list */
	if (parent)
2572
		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
	else
		hlist_add_head(&input->fdir_node,
			       &adapter->fdir_filter_list);

	/* update counts */
	adapter->fdir_filter_count++;

	return 0;
}

static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
				       u8 *flow_type)
{
	switch (fsp->flow_type & ~FLOW_EXT) {
	case TCP_V4_FLOW:
		*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
		break;
	case UDP_V4_FLOW:
		*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
		break;
	case SCTP_V4_FLOW:
		*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
		break;
	case IP_USER_FLOW:
		switch (fsp->h_u.usr_ip4_spec.proto) {
		case IPPROTO_TCP:
			*flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
			break;
		case IPPROTO_UDP:
			*flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
			break;
		case IPPROTO_SCTP:
			*flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
			break;
		case 0:
			if (!fsp->m_u.usr_ip4_spec.proto) {
				*flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
				break;
			}
		default:
			return 0;
		}
		break;
	default:
		return 0;
	}

	return 1;
}

static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
					struct ethtool_rxnfc *cmd)
{
	struct ethtool_rx_flow_spec *fsp =
		(struct ethtool_rx_flow_spec *)&cmd->fs;
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_fdir_filter *input;
	union ixgbe_atr_input mask;
2631
	u8 queue;
2632 2633 2634 2635 2636
	int err;

	if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
		return -EOPNOTSUPP;

2637 2638
	/* ring_cookie is a masked into a set of queues and ixgbe pools or
	 * we use the drop index.
2639
	 */
2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
		queue = IXGBE_FDIR_DROP_QUEUE;
	} else {
		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);

		if (!vf && (ring >= adapter->num_rx_queues))
			return -EINVAL;
		else if (vf &&
			 ((vf > adapter->num_vfs) ||
			   ring >= adapter->num_rx_queues_per_pool))
			return -EINVAL;

		/* Map the ring onto the absolute queue index */
		if (!vf)
			queue = adapter->rx_ring[ring]->reg_idx;
		else
			queue = ((vf - 1) *
				adapter->num_rx_queues_per_pool) + ring;
	}
2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736

	/* Don't allow indexes to exist outside of available space */
	if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
		e_err(drv, "Location out of range\n");
		return -EINVAL;
	}

	input = kzalloc(sizeof(*input), GFP_ATOMIC);
	if (!input)
		return -ENOMEM;

	memset(&mask, 0, sizeof(union ixgbe_atr_input));

	/* set SW index */
	input->sw_idx = fsp->location;

	/* record flow type */
	if (!ixgbe_flowspec_to_flow_type(fsp,
					 &input->filter.formatted.flow_type)) {
		e_err(drv, "Unrecognized flow type\n");
		goto err_out;
	}

	mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
				   IXGBE_ATR_L4TYPE_MASK;

	if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
		mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;

	/* Copy input into formatted structures */
	input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
	mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
	input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
	mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
	input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
	mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
	input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
	mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;

	if (fsp->flow_type & FLOW_EXT) {
		input->filter.formatted.vm_pool =
				(unsigned char)ntohl(fsp->h_ext.data[1]);
		mask.formatted.vm_pool =
				(unsigned char)ntohl(fsp->m_ext.data[1]);
		input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
		mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
		input->filter.formatted.flex_bytes =
						fsp->h_ext.vlan_etype;
		mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
	}

	/* determine if we need to drop or route the packet */
	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
		input->action = IXGBE_FDIR_DROP_QUEUE;
	else
		input->action = fsp->ring_cookie;

	spin_lock(&adapter->fdir_perfect_lock);

	if (hlist_empty(&adapter->fdir_filter_list)) {
		/* save mask and program input mask into HW */
		memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
		err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
		if (err) {
			e_err(drv, "Error writing mask\n");
			goto err_out_w_lock;
		}
	} else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
		e_err(drv, "Only one mask supported per port\n");
		goto err_out_w_lock;
	}

	/* apply mask and compute/store hash */
	ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);

	/* program filters to filter memory */
	err = ixgbe_fdir_write_perfect_filter_82599(hw,
2737
				&input->filter, input->sw_idx, queue);
2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
	if (err)
		goto err_out_w_lock;

	ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);

	spin_unlock(&adapter->fdir_perfect_lock);

	return err;
err_out_w_lock:
	spin_unlock(&adapter->fdir_perfect_lock);
err_out:
	kfree(input);
	return -EINVAL;
}

static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
					struct ethtool_rxnfc *cmd)
{
	struct ethtool_rx_flow_spec *fsp =
		(struct ethtool_rx_flow_spec *)&cmd->fs;
	int err;

	spin_lock(&adapter->fdir_perfect_lock);
	err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
	spin_unlock(&adapter->fdir_perfect_lock);

	return err;
}

2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
		       IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
				  struct ethtool_rxnfc *nfc)
{
	u32 flags2 = adapter->flags2;

	/*
	 * RSS does not support anything other than hashing
	 * to queues on src and dst IPs and ports
	 */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	switch (nfc->flow_type) {
	case TCP_V4_FLOW:
	case TCP_V6_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST) ||
		    !(nfc->data & RXH_L4_B_0_1) ||
		    !(nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		break;
	case UDP_V4_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST))
			return -EINVAL;
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
			break;
		default:
			return -EINVAL;
		}
		break;
	case UDP_V6_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST))
			return -EINVAL;
		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
		case 0:
			flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
			break;
		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
			flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
			break;
		default:
			return -EINVAL;
		}
		break;
	case AH_ESP_V4_FLOW:
	case AH_V4_FLOW:
	case ESP_V4_FLOW:
	case SCTP_V4_FLOW:
	case AH_ESP_V6_FLOW:
	case AH_V6_FLOW:
	case ESP_V6_FLOW:
	case SCTP_V6_FLOW:
		if (!(nfc->data & RXH_IP_SRC) ||
		    !(nfc->data & RXH_IP_DST) ||
		    (nfc->data & RXH_L4_B_0_1) ||
		    (nfc->data & RXH_L4_B_2_3))
			return -EINVAL;
		break;
	default:
		return -EINVAL;
	}

	/* if we changed something we need to update flags */
	if (flags2 != adapter->flags2) {
		struct ixgbe_hw *hw = &adapter->hw;
2842 2843 2844 2845 2846 2847 2848 2849
		u32 mrqc;
		unsigned int pf_pool = adapter->num_vfs;

		if ((hw->mac.type >= ixgbe_mac_X550) &&
		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
			mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
		else
			mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2850 2851 2852

		if ((flags2 & UDP_RSS_FLAGS) &&
		    !(adapter->flags2 & UDP_RSS_FLAGS))
2853
			e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871

		adapter->flags2 = flags2;

		/* Perform hash on these packet types */
		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
		      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
		      | IXGBE_MRQC_RSS_FIELD_IPV6
		      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;

		mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
			  IXGBE_MRQC_RSS_FIELD_IPV6_UDP);

		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;

		if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
			mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;

2872 2873 2874 2875 2876
		if ((hw->mac.type >= ixgbe_mac_X550) &&
		    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
			IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
		else
			IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2877 2878 2879 2880 2881
	}

	return 0;
}

2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	int ret = -EOPNOTSUPP;

	switch (cmd->cmd) {
	case ETHTOOL_SRXCLSRLINS:
		ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
		break;
	case ETHTOOL_SRXCLSRLDEL:
		ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
		break;
2894 2895 2896
	case ETHTOOL_SRXFH:
		ret = ixgbe_set_rss_hash_opt(adapter, cmd);
		break;
2897 2898 2899 2900 2901 2902 2903
	default:
		break;
	}

	return ret;
}

2904 2905 2906 2907 2908 2909 2910 2911
static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
{
	if (adapter->hw.mac.type < ixgbe_mac_X550)
		return 16;
	else
		return 64;
}

2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	return sizeof(adapter->rss_key);
}

static u32 ixgbe_rss_indir_size(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	return ixgbe_rss_indir_tbl_entries(adapter);
}

static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
{
	int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);

	for (i = 0; i < reta_size; i++)
		indir[i] = adapter->rss_indir_tbl[i];
}

static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
			  u8 *hfunc)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	if (hfunc)
		*hfunc = ETH_RSS_HASH_TOP;

	if (indir)
		ixgbe_get_reta(adapter, indir);

	if (key)
		memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));

	return 0;
}

2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988
static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
			  const u8 *key, const u8 hfunc)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	int i;
	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);

	if (hfunc)
		return -EINVAL;

	/* Fill out the redirection table */
	if (indir) {
		int max_queues = min_t(int, adapter->num_rx_queues,
				       ixgbe_rss_indir_tbl_max(adapter));

		/*Allow at least 2 queues w/ SR-IOV.*/
		if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
		    (max_queues < 2))
			max_queues = 2;

		/* Verify user input. */
		for (i = 0; i < reta_entries; i++)
			if (indir[i] >= max_queues)
				return -EINVAL;

		for (i = 0; i < reta_entries; i++)
			adapter->rss_indir_tbl[i] = indir[i];
	}

	/* Fill out the rss hash key */
	if (key)
		memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));

	ixgbe_store_reta(adapter);

	return 0;
}

2989 2990 2991 2992 2993
static int ixgbe_get_ts_info(struct net_device *dev,
			     struct ethtool_ts_info *info)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);

2994 2995 2996
	/* we always support timestamping disabled */
	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);

2997
	switch (adapter->hw.mac.type) {
2998 2999
	case ixgbe_mac_X550:
	case ixgbe_mac_X550EM_x:
3000
	case ixgbe_mac_x550em_a:
3001 3002
		info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
		/* fallthrough */
3003 3004 3005
	case ixgbe_mac_X540:
	case ixgbe_mac_82599EB:
		info->so_timestamping =
3006 3007 3008
			SOF_TIMESTAMPING_TX_SOFTWARE |
			SOF_TIMESTAMPING_RX_SOFTWARE |
			SOF_TIMESTAMPING_SOFTWARE |
3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
			SOF_TIMESTAMPING_TX_HARDWARE |
			SOF_TIMESTAMPING_RX_HARDWARE |
			SOF_TIMESTAMPING_RAW_HARDWARE;

		if (adapter->ptp_clock)
			info->phc_index = ptp_clock_index(adapter->ptp_clock);
		else
			info->phc_index = -1;

		info->tx_types =
J
Jacob Keller 已提交
3019 3020
			BIT(HWTSTAMP_TX_OFF) |
			BIT(HWTSTAMP_TX_ON);
3021

3022
		info->rx_filters |=
J
Jacob Keller 已提交
3023 3024 3025
			BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
			BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
			BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3026 3027 3028 3029 3030 3031 3032
		break;
	default:
		return ethtool_op_get_ts_info(dev, info);
	}
	return 0;
}

3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
{
	unsigned int max_combined;
	u8 tcs = netdev_get_num_tc(adapter->netdev);

	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
		/* We only support one q_vector without MSI-X */
		max_combined = 1;
	} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
		/* SR-IOV currently only allows one queue on the PF */
		max_combined = 1;
	} else if (tcs > 1) {
		/* For DCB report channels per traffic class */
		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
			/* 8 TC w/ 4 queues per TC */
			max_combined = 4;
		} else if (tcs > 4) {
			/* 8 TC w/ 8 queues per TC */
			max_combined = 8;
		} else {
			/* 4 TC w/ 16 queues per TC */
			max_combined = 16;
		}
	} else if (adapter->atr_sample_rate) {
		/* support up to 64 queues with ATR */
		max_combined = IXGBE_MAX_FDIR_INDICES;
	} else {
		/* support up to 16 queues with RSS */
3061
		max_combined = ixgbe_max_rss_indices(adapter);
3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103
	}

	return max_combined;
}

static void ixgbe_get_channels(struct net_device *dev,
			       struct ethtool_channels *ch)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);

	/* report maximum channels */
	ch->max_combined = ixgbe_max_channels(adapter);

	/* report info for other vector */
	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
		ch->max_other = NON_Q_VECTORS;
		ch->other_count = NON_Q_VECTORS;
	}

	/* record RSS queues */
	ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;

	/* nothing else to report if RSS is disabled */
	if (ch->combined_count == 1)
		return;

	/* we do not support ATR queueing if SR-IOV is enabled */
	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
		return;

	/* same thing goes for being DCB enabled */
	if (netdev_get_num_tc(dev) > 1)
		return;

	/* if ATR is disabled we can exit */
	if (!adapter->atr_sample_rate)
		return;

	/* report flow director queues as maximum channels */
	ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
}

3104 3105 3106 3107 3108
static int ixgbe_set_channels(struct net_device *dev,
			      struct ethtool_channels *ch)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	unsigned int count = ch->combined_count;
3109
	u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125

	/* verify they are not requesting separate vectors */
	if (!count || ch->rx_count || ch->tx_count)
		return -EINVAL;

	/* verify other_count has not changed */
	if (ch->other_count != NON_Q_VECTORS)
		return -EINVAL;

	/* verify the number of channels does not exceed hardware limits */
	if (count > ixgbe_max_channels(adapter))
		return -EINVAL;

	/* update feature limits from largest to smallest supported values */
	adapter->ring_feature[RING_F_FDIR].limit = count;

3126 3127 3128
	/* cap RSS limit */
	if (count > max_rss_indices)
		count = max_rss_indices;
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
	adapter->ring_feature[RING_F_RSS].limit = count;

#ifdef IXGBE_FCOE
	/* cap FCoE limit at 8 */
	if (count > IXGBE_FCRETA_SIZE)
		count = IXGBE_FCRETA_SIZE;
	adapter->ring_feature[RING_F_FCOE].limit = count;

#endif
	/* use setup TC to update any traffic class queue mapping */
	return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
}

3142 3143 3144 3145 3146
static int ixgbe_get_module_info(struct net_device *dev,
				       struct ethtool_modinfo *modinfo)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ixgbe_hw *hw = &adapter->hw;
3147
	s32 status;
3148 3149 3150 3151 3152 3153 3154
	u8 sff8472_rev, addr_mode;
	bool page_swap = false;

	/* Check whether we support SFF-8472 or not */
	status = hw->phy.ops.read_i2c_eeprom(hw,
					     IXGBE_SFF_SFF_8472_COMP,
					     &sff8472_rev);
3155
	if (status)
3156
		return -EIO;
3157 3158 3159 3160 3161

	/* addressing mode is not supported */
	status = hw->phy.ops.read_i2c_eeprom(hw,
					     IXGBE_SFF_SFF_8472_SWAP,
					     &addr_mode);
3162
	if (status)
3163
		return -EIO;
3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179

	if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
		e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
		page_swap = true;
	}

	if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
		/* We have a SFP, but it does not support SFF-8472 */
		modinfo->type = ETH_MODULE_SFF_8079;
		modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
	} else {
		/* We have a SFP which supports a revision of SFF-8472. */
		modinfo->type = ETH_MODULE_SFF_8472;
		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
	}

3180
	return 0;
3181 3182 3183 3184 3185 3186 3187 3188
}

static int ixgbe_get_module_eeprom(struct net_device *dev,
					 struct ethtool_eeprom *ee,
					 u8 *data)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ixgbe_hw *hw = &adapter->hw;
3189
	s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3190 3191 3192
	u8 databyte = 0xFF;
	int i = 0;

3193 3194
	if (ee->len == 0)
		return -EINVAL;
3195

3196
	for (i = ee->offset; i < ee->offset + ee->len; i++) {
3197 3198 3199
		/* I2C reads can take long time */
		if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
			return -EBUSY;
3200

3201
		if (i < ETH_MODULE_SFF_8079_LEN)
3202
			status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3203 3204 3205
		else
			status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);

3206
		if (status)
3207
			return -EIO;
3208

3209
		data[i - ee->offset] = databyte;
3210 3211
	}

3212
	return 0;
3213 3214
}

3215
static const struct ethtool_ops ixgbe_ethtool_ops = {
3216 3217 3218 3219 3220 3221
	.get_settings           = ixgbe_get_settings,
	.set_settings           = ixgbe_set_settings,
	.get_drvinfo            = ixgbe_get_drvinfo,
	.get_regs_len           = ixgbe_get_regs_len,
	.get_regs               = ixgbe_get_regs,
	.get_wol                = ixgbe_get_wol,
3222
	.set_wol                = ixgbe_set_wol,
3223 3224 3225 3226
	.nway_reset             = ixgbe_nway_reset,
	.get_link               = ethtool_op_get_link,
	.get_eeprom_len         = ixgbe_get_eeprom_len,
	.get_eeprom             = ixgbe_get_eeprom,
3227
	.set_eeprom             = ixgbe_set_eeprom,
3228 3229 3230 3231 3232 3233
	.get_ringparam          = ixgbe_get_ringparam,
	.set_ringparam          = ixgbe_set_ringparam,
	.get_pauseparam         = ixgbe_get_pauseparam,
	.set_pauseparam         = ixgbe_set_pauseparam,
	.get_msglevel           = ixgbe_get_msglevel,
	.set_msglevel           = ixgbe_set_msglevel,
3234
	.self_test              = ixgbe_diag_test,
3235
	.get_strings            = ixgbe_get_strings,
3236
	.set_phys_id            = ixgbe_set_phys_id,
3237
	.get_sset_count         = ixgbe_get_sset_count,
3238 3239 3240
	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
	.get_coalesce           = ixgbe_get_coalesce,
	.set_coalesce           = ixgbe_set_coalesce,
3241
	.get_rxnfc		= ixgbe_get_rxnfc,
3242
	.set_rxnfc		= ixgbe_set_rxnfc,
3243 3244 3245
	.get_rxfh_indir_size	= ixgbe_rss_indir_size,
	.get_rxfh_key_size	= ixgbe_get_rxfh_key_size,
	.get_rxfh		= ixgbe_get_rxfh,
3246
	.set_rxfh		= ixgbe_set_rxfh,
3247
	.get_channels		= ixgbe_get_channels,
3248
	.set_channels		= ixgbe_set_channels,
3249
	.get_ts_info		= ixgbe_get_ts_info,
3250 3251
	.get_module_info	= ixgbe_get_module_info,
	.get_module_eeprom	= ixgbe_get_module_eeprom,
3252 3253 3254 3255
};

void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
3256
	netdev->ethtool_ops = &ixgbe_ethtool_ops;
3257
}