ixgbe_82598.c 35.6 KB
Newer Older
1 2 3
/*******************************************************************************

  Intel 10 Gigabit PCI Express Linux driver
4
  Copyright(c) 1999 - 2016 Intel Corporation.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  You should have received a copy of the GNU General Public License along with
  this program; if not, write to the Free Software Foundation, Inc.,
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Contact Information:
23
  Linux NICS <linux.nics@intel.com>
24 25 26 27 28 29 30 31 32
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497

*******************************************************************************/

#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>

S
Stephen Hemminger 已提交
33
#include "ixgbe.h"
34 35 36 37 38
#include "ixgbe_phy.h"

#define IXGBE_82598_MAX_TX_QUEUES 32
#define IXGBE_82598_MAX_RX_QUEUES 64
#define IXGBE_82598_RAR_ENTRIES   16
39 40
#define IXGBE_82598_MC_TBL_SIZE  128
#define IXGBE_82598_VFT_TBL_SIZE 128
41
#define IXGBE_82598_RX_PB_SIZE	 512
42

43
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
44 45
					 ixgbe_link_speed speed,
					 bool autoneg_wait_to_complete);
D
Donald Skidmore 已提交
46
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
47
				       u8 *eeprom_data);
48

49 50 51 52 53 54 55 56 57 58
/**
 *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
 *  @hw: pointer to the HW structure
 *
 *  The defaults for 82598 should be in the range of 50us to 50ms,
 *  however the hardware default for these parts is 500us to 1ms which is less
 *  than the 10ms recommended by the pci-e spec.  To address this we need to
 *  increase the value to either 10ms to 250ms for capability version 1 config,
 *  or 16ms to 55ms for version 2.
 **/
59
static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
60 61 62 63
{
	u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
	u16 pcie_devctl2;

64 65 66
	if (ixgbe_removed(hw->hw_addr))
		return;

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
	/* only take action if timeout value is defaulted to 0 */
	if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
		goto out;

	/*
	 * if capababilities version is type 1 we can write the
	 * timeout of 10ms to 250ms through the GCR register
	 */
	if (!(gcr & IXGBE_GCR_CAP_VER2)) {
		gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
		goto out;
	}

	/*
	 * for version 2 capabilities we need to write the config space
	 * directly in order to set the completion timeout value for
	 * 16ms to 55ms
	 */
85
	pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
86
	pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
87
	ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
88 89 90 91 92 93
out:
	/* disable completion timeout resend */
	gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
	IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}

94
static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
95 96 97 98 99 100 101 102 103
{
	struct ixgbe_mac_info *mac = &hw->mac;

	/* Call PHY identify routine to get the phy type */
	ixgbe_identify_phy_generic(hw);

	mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
	mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
	mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
104
	mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
105 106
	mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
	mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
107
	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
108 109 110 111 112 113 114 115 116 117 118 119 120

	return 0;
}

/**
 *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
 *  @hw: pointer to hardware structure
 *
 *  Initialize any function pointers that were not able to be
 *  set during get_invariants because the PHY/SFP type was
 *  not known.  Perform the SFP init if necessary.
 *
 **/
121
static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
122
{
123 124
	struct ixgbe_mac_info *mac = &hw->mac;
	struct ixgbe_phy_info *phy = &hw->phy;
125
	s32 ret_val;
D
Donald Skidmore 已提交
126
	u16 list_offset, data_offset;
127

128 129
	/* Identify the PHY */
	phy->ops.identify(hw);
130

131 132 133 134
	/* Overwrite the link function pointers if copper PHY */
	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
		mac->ops.get_link_capabilities =
135
			&ixgbe_get_copper_link_capabilities_generic;
136
	}
137

138
	switch (hw->phy.type) {
139
	case ixgbe_phy_tn:
140
		phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
141 142
		phy->ops.check_link = &ixgbe_check_phy_link_tnx;
		phy->ops.get_firmware_version =
143
			     &ixgbe_get_phy_firmware_version_tnx;
144
		break;
D
Donald Skidmore 已提交
145 146 147 148 149
	case ixgbe_phy_nl:
		phy->ops.reset = &ixgbe_reset_phy_nl;

		/* Call SFP+ identify routine to get the SFP+ module type */
		ret_val = phy->ops.identify_sfp(hw);
150 151 152 153
		if (ret_val)
			return ret_val;
		if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
			return IXGBE_ERR_SFP_NOT_SUPPORTED;
D
Donald Skidmore 已提交
154 155 156

		/* Check to see if SFP+ module is supported */
		ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
157 158
							    &list_offset,
							    &data_offset);
159 160
		if (ret_val)
			return IXGBE_ERR_SFP_NOT_SUPPORTED;
D
Donald Skidmore 已提交
161
		break;
162 163 164 165
	default:
		break;
	}

166
	return 0;
167 168
}

169 170 171 172 173
/**
 *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
 *  @hw: pointer to hardware structure
 *
 *  Starts the hardware using the generic start_hw function.
174 175
 *  Disables relaxed ordering for archs other than SPARC
 *  Then set pcie completion timeout
176
 *
177
 **/
178
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
179
{
180
#ifndef CONFIG_SPARC
181 182
	u32 regval;
	u32 i;
183
#endif
184
	s32 ret_val;
185 186 187

	ret_val = ixgbe_start_hw_generic(hw);

188
#ifndef CONFIG_SPARC
189 190 191 192
	/* Disable relaxed ordering */
	for (i = 0; ((i < hw->mac.max_tx_queues) &&
	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
		regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
193
		regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
194 195 196 197 198 199
		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
	}

	for (i = 0; ((i < hw->mac.max_rx_queues) &&
	     (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
		regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
200 201
		regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
			    IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
202 203
		IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
	}
204
#endif
205 206 207
	if (ret_val)
		return ret_val;

208
	/* set the completion timeout for interface */
209
	ixgbe_set_pcie_completion_timeout(hw);
210

211
	return 0;
212 213
}

214
/**
215
 *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
216 217 218 219
 *  @hw: pointer to hardware structure
 *  @speed: pointer to link speed
 *  @autoneg: boolean auto-negotiation value
 *
220
 *  Determines the link capabilities by reading the AUTOC register.
221
 **/
222
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
223 224
					     ixgbe_link_speed *speed,
					     bool *autoneg)
225
{
226
	u32 autoc = 0;
227

228 229
	/*
	 * Determine link capabilities based on the stored value of AUTOC,
230 231
	 * which represents EEPROM defaults.  If AUTOC value has not been
	 * stored, use the current register value.
232
	 */
233 234 235 236 237 238
	if (hw->mac.orig_link_settings_stored)
		autoc = hw->mac.orig_autoc;
	else
		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);

	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
		*speed = IXGBE_LINK_SPEED_1GB_FULL;
		*autoneg = false;
		break;

	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
		*speed = IXGBE_LINK_SPEED_10GB_FULL;
		*autoneg = false;
		break;

	case IXGBE_AUTOC_LMS_1G_AN:
		*speed = IXGBE_LINK_SPEED_1GB_FULL;
		*autoneg = true;
		break;

	case IXGBE_AUTOC_LMS_KX4_AN:
	case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
		*speed = IXGBE_LINK_SPEED_UNKNOWN;
257
		if (autoc & IXGBE_AUTOC_KX4_SUPP)
258
			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
259
		if (autoc & IXGBE_AUTOC_KX_SUPP)
260 261 262 263 264
			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
		*autoneg = true;
		break;

	default:
265
		return IXGBE_ERR_LINK_SETUP;
266 267
	}

268
	return 0;
269 270 271 272 273 274 275 276 277 278
}

/**
 *  ixgbe_get_media_type_82598 - Determines media type
 *  @hw: pointer to hardware structure
 *
 *  Returns the media type (fiber, copper, backplane)
 **/
static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
{
E
Emil Tantilov 已提交
279 280 281 282
	/* Detect if there is a copper PHY attached. */
	switch (hw->phy.type) {
	case ixgbe_phy_cu_unknown:
	case ixgbe_phy_tn:
283 284
		return ixgbe_media_type_copper;

E
Emil Tantilov 已提交
285 286 287 288
	default:
		break;
	}

289 290
	/* Media type for I82598 is based on device ID */
	switch (hw->device_id) {
D
Don Skidmore 已提交
291
	case IXGBE_DEV_ID_82598:
292
	case IXGBE_DEV_ID_82598_BX:
E
Emil Tantilov 已提交
293
		/* Default device ID is mezzanine card KX/KX4 */
294 295
		return ixgbe_media_type_backplane;

296 297
	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
D
Donald Skidmore 已提交
298 299
	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
300
	case IXGBE_DEV_ID_82598EB_XF_LR:
D
Donald Skidmore 已提交
301
	case IXGBE_DEV_ID_82598EB_SFP_LOM:
302 303
		return ixgbe_media_type_fiber;

304 305
	case IXGBE_DEV_ID_82598EB_CX4:
	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
306 307
		return ixgbe_media_type_cx4;

308
	case IXGBE_DEV_ID_82598AT:
309
	case IXGBE_DEV_ID_82598AT2:
310 311
		return ixgbe_media_type_copper;

312
	default:
313
		return ixgbe_media_type_unknown;
314 315 316
	}
}

317
/**
318
 *  ixgbe_fc_enable_82598 - Enable flow control
319 320
 *  @hw: pointer to hardware structure
 *
321
 *  Enable flow control according to the current settings.
322
 **/
323
static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
324
{
325
	u32 fctrl_reg;
326
	u32 rmcs_reg;
327
	u32 reg;
328
	u32 fcrtl, fcrth;
329
	u32 link_speed = 0;
330
	int i;
331
	bool link_up;
332

333
	/* Validate the water mark configuration */
334 335
	if (!hw->fc.pause_time)
		return IXGBE_ERR_INVALID_LINK_SETTINGS;
336

337 338 339 340 341 342 343
	/* Low water mark of zero causes XOFF floods */
	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
		    hw->fc.high_water[i]) {
			if (!hw->fc.low_water[i] ||
			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
				hw_dbg(hw, "Invalid water mark configuration\n");
344
				return IXGBE_ERR_INVALID_LINK_SETTINGS;
345 346 347 348
			}
		}
	}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	/*
	 * On 82598 having Rx FC on causes resets while doing 1G
	 * so if it's on turn it off once we know link_speed. For
	 * more details see 82598 Specification update.
	 */
	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
	if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
		switch (hw->fc.requested_mode) {
		case ixgbe_fc_full:
			hw->fc.requested_mode = ixgbe_fc_tx_pause;
			break;
		case ixgbe_fc_rx_pause:
			hw->fc.requested_mode = ixgbe_fc_none;
			break;
		default:
			/* no change */
			break;
		}
	}

369
	/* Negotiate the fc mode to use */
370
	ixgbe_fc_autoneg(hw);
371 372

	/* Disable any previous flow control settings */
373 374
	fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
	fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
375 376 377 378 379

	rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
	rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);

	/*
380
	 * The possible values of fc.current_mode are:
381
	 * 0: Flow control is completely disabled
382 383
	 * 1: Rx flow control is enabled (we can receive pause frames,
	 *    but not send pause frames).
384
	 * 2: Tx flow control is enabled (we can send pause frames but
385
	 *     we do not support receiving pause frames).
386
	 * 3: Both Rx and Tx flow control (symmetric) are enabled.
387
	 * other: Invalid.
388
	 */
389
	switch (hw->fc.current_mode) {
390
	case ixgbe_fc_none:
391 392 393 394
		/*
		 * Flow control is disabled by software override or autoneg.
		 * The code below will actually disable it in the HW.
		 */
395 396 397
		break;
	case ixgbe_fc_rx_pause:
		/*
398 399 400 401 402 403
		 * Rx Flow control is enabled and Tx Flow control is
		 * disabled by software override. Since there really
		 * isn't a way to advertise that we are capable of RX
		 * Pause ONLY, we will advertise that we support both
		 * symmetric and asymmetric Rx PAUSE.  Later, we will
		 * disable the adapter's ability to send PAUSE frames.
404
		 */
405
		fctrl_reg |= IXGBE_FCTRL_RFCE;
406 407 408
		break;
	case ixgbe_fc_tx_pause:
		/*
409 410
		 * Tx Flow control is enabled, and Rx Flow control is
		 * disabled by software override.
411 412 413 414
		 */
		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
		break;
	case ixgbe_fc_full:
415 416
		/* Flow control (both Rx and Tx) is enabled by SW override. */
		fctrl_reg |= IXGBE_FCTRL_RFCE;
417 418 419 420
		rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
		break;
	default:
		hw_dbg(hw, "Flow control param set incorrectly\n");
421
		return IXGBE_ERR_CONFIG;
422 423
	}

424
	/* Set 802.3x based flow control settings. */
425
	fctrl_reg |= IXGBE_FCTRL_DPF;
426
	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
427 428
	IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);

429 430 431 432
	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
		if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
		    hw->fc.high_water[i]) {
433
			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
434 435 436 437 438 439 440
			fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
		} else {
			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
		}
441

442 443
	}

444
	/* Configure pause time (2 TCs per register) */
445 446 447
	reg = hw->fc.pause_time * 0x00010001;
	for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
448

449 450
	/* Configure flow control refresh threshold value */
	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
451

452
	return 0;
453 454
}

455
/**
456
 *  ixgbe_start_mac_link_82598 - Configures MAC link settings
457 458 459 460 461
 *  @hw: pointer to hardware structure
 *
 *  Configures link settings based on values in the ixgbe_hw struct.
 *  Restarts the link.  Performs autonegotiation if needed.
 **/
462
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
463
				      bool autoneg_wait_to_complete)
464 465 466 467 468 469 470
{
	u32 autoc_reg;
	u32 links_reg;
	u32 i;
	s32 status = 0;

	/* Restart link */
471
	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
472 473 474 475
	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);

	/* Only poll for autoneg to complete if specified to do so */
476
	if (autoneg_wait_to_complete) {
477 478 479 480
		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
		     IXGBE_AUTOC_LMS_KX4_AN ||
		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
		     IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
481 482 483 484 485 486 487 488 489
			links_reg = 0; /* Just in case Autoneg time = 0 */
			for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
				links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
				if (links_reg & IXGBE_LINKS_KX_AN_COMP)
					break;
				msleep(100);
			}
			if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
				status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
490
				hw_dbg(hw, "Autonegotiation did not complete.\n");
491 492 493 494 495 496 497 498 499 500
			}
		}
	}

	/* Add delay to filter out noises during initial link setup */
	msleep(50);

	return status;
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
/**
 *  ixgbe_validate_link_ready - Function looks for phy link
 *  @hw: pointer to hardware structure
 *
 *  Function indicates success when phy link is available. If phy is not ready
 *  within 5 seconds of MAC indicating link, the function returns error.
 **/
static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
{
	u32 timeout;
	u16 an_reg;

	if (hw->device_id != IXGBE_DEV_ID_82598AT2)
		return 0;

	for (timeout = 0;
	     timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
		hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);

		if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
		    (an_reg & MDIO_STAT1_LSTATUS))
			break;

		msleep(100);
	}

	if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
		hw_dbg(hw, "Link was indicated but link is down\n");
		return IXGBE_ERR_LINK_SETUP;
	}

	return 0;
}

535 536 537 538 539
/**
 *  ixgbe_check_mac_link_82598 - Get link/speed status
 *  @hw: pointer to hardware structure
 *  @speed: pointer to link speed
 *  @link_up: true is link is up, false otherwise
540
 *  @link_up_wait_to_complete: bool used to wait for link up or not
541 542 543
 *
 *  Reads the links register to determine if link is up and the current speed
 **/
544
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
545 546
				      ixgbe_link_speed *speed, bool *link_up,
				      bool link_up_wait_to_complete)
547 548
{
	u32 links_reg;
549
	u32 i;
D
Donald Skidmore 已提交
550 551 552 553 554 555 556 557 558
	u16 link_reg, adapt_comp_reg;

	/*
	 * SERDES PHY requires us to read link status from register 0xC79F.
	 * Bit 0 set indicates link is up/ready; clear indicates link down.
	 * 0xC00C is read to check that the XAUI lanes are active.  Bit 0
	 * clear indicates active; set indicates inactive.
	 */
	if (hw->phy.type == ixgbe_phy_nl) {
559 560 561
		hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
		hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
		hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
562
				     &adapt_comp_reg);
D
Donald Skidmore 已提交
563 564 565 566 567 568 569 570 571 572 573
		if (link_up_wait_to_complete) {
			for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
				if ((link_reg & 1) &&
				    ((adapt_comp_reg & 1) == 0)) {
					*link_up = true;
					break;
				} else {
					*link_up = false;
				}
				msleep(100);
				hw->phy.ops.read_reg(hw, 0xC79F,
574 575
						     MDIO_MMD_PMAPMD,
						     &link_reg);
D
Donald Skidmore 已提交
576
				hw->phy.ops.read_reg(hw, 0xC00C,
577 578
						     MDIO_MMD_PMAPMD,
						     &adapt_comp_reg);
D
Donald Skidmore 已提交
579 580 581 582 583 584 585 586
			}
		} else {
			if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
				*link_up = true;
			else
				*link_up = false;
		}

587
		if (!*link_up)
588
			return 0;
D
Donald Skidmore 已提交
589
	}
590 591

	links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	if (link_up_wait_to_complete) {
		for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
			if (links_reg & IXGBE_LINKS_UP) {
				*link_up = true;
				break;
			} else {
				*link_up = false;
			}
			msleep(100);
			links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
		}
	} else {
		if (links_reg & IXGBE_LINKS_UP)
			*link_up = true;
		else
			*link_up = false;
	}
609 610 611 612 613 614

	if (links_reg & IXGBE_LINKS_SPEED)
		*speed = IXGBE_LINK_SPEED_10GB_FULL;
	else
		*speed = IXGBE_LINK_SPEED_1GB_FULL;

615
	if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
616 617 618
	    (ixgbe_validate_link_ready(hw) != 0))
		*link_up = false;

619 620 621 622
	return 0;
}

/**
623
 *  ixgbe_setup_mac_link_82598 - Set MAC link speed
624 625
 *  @hw: pointer to hardware structure
 *  @speed: new link speed
E
Emil Tantilov 已提交
626
 *  @autoneg_wait_to_complete: true when waiting for completion is needed
627 628 629
 *
 *  Set the link speed in the AUTOC register and restarts link.
 **/
630
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
J
Josh Hay 已提交
631 632
				      ixgbe_link_speed speed,
				      bool autoneg_wait_to_complete)
633
{
J
Josh Hay 已提交
634
	bool		 autoneg	   = false;
635 636 637 638
	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
	u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
	u32              autoc             = curr_autoc;
	u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
639

640 641 642 643 644
	/* Check to see if speed passed in is supported. */
	ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
	speed &= link_capabilities;

	if (speed == IXGBE_LINK_SPEED_UNKNOWN)
645
		return IXGBE_ERR_LINK_SETUP;
646 647 648

	/* Set KX4/KX support according to speed requested */
	else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
649
		 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
650 651 652 653 654 655 656
		autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
			autoc |= IXGBE_AUTOC_KX4_SUPP;
		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
			autoc |= IXGBE_AUTOC_KX_SUPP;
		if (autoc != curr_autoc)
			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
657 658
	}

659 660 661 662 663
	/* Setup and restart the link based on the new values in
	 * ixgbe_hw This will write the AUTOC register based on the new
	 * stored values
	 */
	return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
664 665 666 667
}


/**
668
 *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
669 670 671 672 673 674
 *  @hw: pointer to hardware structure
 *  @speed: new link speed
 *  @autoneg_wait_to_complete: true if waiting is needed to complete
 *
 *  Sets the link speed in the AUTOC register in the MAC and restarts link.
 **/
675
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
676 677
					       ixgbe_link_speed speed,
					       bool autoneg_wait_to_complete)
678
{
679
	s32 status;
680 681

	/* Setup the PHY according to input speed */
682
	status = hw->phy.ops.setup_link_speed(hw, speed,
683
					      autoneg_wait_to_complete);
684
	/* Set up MAC */
685
	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
686 687 688 689 690 691 692 693

	return status;
}

/**
 *  ixgbe_reset_hw_82598 - Performs hardware reset
 *  @hw: pointer to hardware structure
 *
694
 *  Resets the hardware by resetting the transmit and receive units, masks and
695 696 697 698 699
 *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
 *  reset.
 **/
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
700
	s32 status;
701
	s32 phy_status = 0;
702 703 704 705 706 707 708
	u32 ctrl;
	u32 gheccr;
	u32 i;
	u32 autoc;
	u8  analog_val;

	/* Call adapter stop to disable tx/rx and clear interrupts */
709
	status = hw->mac.ops.stop_adapter(hw);
710 711
	if (status)
		return status;
712 713

	/*
714 715
	 * Power up the Atlas Tx lanes if they are currently powered down.
	 * Atlas Tx lanes are powered down for MAC loopback tests, but
716 717
	 * they are not automatically restored on reset.
	 */
718
	hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
719
	if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
720 721
		/* Enable Tx Atlas so packets can be transmitted again */
		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
722
					     &analog_val);
723
		analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
724
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
725
					      analog_val);
726

727
		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
728
					     &analog_val);
729
		analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
730
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
731
					      analog_val);
732

733
		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
734
					     &analog_val);
735
		analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
736
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
737
					      analog_val);
738

739
		hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
740
					     &analog_val);
741
		analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
742
		hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
743
					      analog_val);
744 745 746
	}

	/* Reset PHY */
747 748 749 750
	if (hw->phy.reset_disable == false) {
		/* PHY ops must be identified and initialized prior to reset */

		/* Init PHY and function pointers, perform SFP setup */
751 752
		phy_status = hw->phy.ops.init(hw);
		if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
753
			return phy_status;
754 755
		if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
			goto mac_reset_top;
756

757
		hw->phy.ops.reset(hw);
758
	}
759

760
mac_reset_top:
761 762 763 764
	/*
	 * Issue global reset to the MAC.  This needs to be a SW reset.
	 * If link reset is used, it might reset the MAC when mng is using it
	 */
A
Alexander Duyck 已提交
765 766
	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
767
	IXGBE_WRITE_FLUSH(hw);
768
	usleep_range(1000, 1200);
769 770 771 772 773 774

	/* Poll for reset bit to self-clear indicating reset is complete */
	for (i = 0; i < 10; i++) {
		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
		if (!(ctrl & IXGBE_CTRL_RST))
			break;
775
		udelay(1);
776 777 778 779 780 781
	}
	if (ctrl & IXGBE_CTRL_RST) {
		status = IXGBE_ERR_RESET_FAILED;
		hw_dbg(hw, "Reset polling failed to complete.\n");
	}

A
Alexander Duyck 已提交
782 783
	msleep(50);

784 785 786
	/*
	 * Double resets are required for recovery from certain error
	 * conditions.  Between resets, it is necessary to stall to allow time
A
Alexander Duyck 已提交
787
	 * for any pending HW events to complete.
788 789 790 791 792 793
	 */
	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
		goto mac_reset_top;
	}

794 795 796 797 798
	gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
	gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
	IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);

	/*
799 800 801
	 * Store the original AUTOC value if it has not been
	 * stored off yet.  Otherwise restore the stored original
	 * AUTOC value since the reset operation sets back to deaults.
802 803
	 */
	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
804 805 806 807 808
	if (hw->mac.orig_link_settings_stored == false) {
		hw->mac.orig_autoc = autoc;
		hw->mac.orig_link_settings_stored = true;
	} else if (autoc != hw->mac.orig_autoc) {
		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
809 810
	}

811 812 813
	/* Store the permanent mac address */
	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);

814 815 816 817 818 819
	/*
	 * Store MAC address from RAR0, clear receive address registers, and
	 * clear the multicast table
	 */
	hw->mac.ops.init_rx_addrs(hw);

820 821 822
	if (phy_status)
		status = phy_status;

823 824 825
	return status;
}

826 827 828 829 830 831
/**
 *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
 *  @hw: pointer to hardware struct
 *  @rar: receive address register index to associate with a VMDq index
 *  @vmdq: VMDq set index
 **/
832
static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
833 834
{
	u32 rar_high;
835 836 837 838 839 840 841
	u32 rar_entries = hw->mac.num_rar_entries;

	/* Make sure we are using a valid rar index range */
	if (rar >= rar_entries) {
		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
		return IXGBE_ERR_INVALID_ARGUMENT;
	}
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860

	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
	rar_high &= ~IXGBE_RAH_VIND_MASK;
	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
	IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
	return 0;
}

/**
 *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
 *  @hw: pointer to hardware struct
 *  @rar: receive address register index to associate with a VMDq index
 *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
 **/
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
	u32 rar_high;
	u32 rar_entries = hw->mac.num_rar_entries;

861 862 863

	/* Make sure we are using a valid rar index range */
	if (rar >= rar_entries) {
864
		hw_dbg(hw, "RAR index %d is out of range.\n", rar);
865 866 867 868 869 870 871
		return IXGBE_ERR_INVALID_ARGUMENT;
	}

	rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
	if (rar_high & IXGBE_RAH_VIND_MASK) {
		rar_high &= ~IXGBE_RAH_VIND_MASK;
		IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
872 873 874 875 876 877 878 879 880 881 882
	}

	return 0;
}

/**
 *  ixgbe_set_vfta_82598 - Set VLAN filter table
 *  @hw: pointer to hardware structure
 *  @vlan: VLAN id to write to VLAN filter
 *  @vind: VMDq output index that maps queue to VLAN id in VFTA
 *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
883
 *  @vlvf_bypass: boolean flag - unused
884 885 886
 *
 *  Turn on/off specified VLAN in the VLAN filter table.
 **/
887
static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
888
				bool vlan_on, bool vlvf_bypass)
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
{
	u32 regindex;
	u32 bitindex;
	u32 bits;
	u32 vftabyte;

	if (vlan > 4095)
		return IXGBE_ERR_PARAM;

	/* Determine 32-bit word position in array */
	regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */

	/* Determine the location of the (VMD) queue index */
	vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
	bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */

	/* Set the nibble for VMD queue index */
	bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
	bits &= (~(0x0F << bitindex));
	bits |= (vind << bitindex);
	IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);

	/* Determine the location of the bit for this VLAN id */
	bitindex = vlan & 0x1F;   /* lower five bits */

	bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
	if (vlan_on)
		/* Turn on this VLAN id */
		bits |= (1 << bitindex);
	else
		/* Turn off this VLAN id */
		bits &= ~(1 << bitindex);
	IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);

	return 0;
}

/**
 *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
 *  @hw: pointer to hardware structure
 *
 *  Clears the VLAN filer table, and the VMDq index associated with the filter
 **/
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
	u32 offset;
	u32 vlanbyte;

	for (offset = 0; offset < hw->mac.vft_size; offset++)
		IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);

	for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
		for (offset = 0; offset < hw->mac.vft_size; offset++)
			IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
943
					0);
944 945 946 947 948 949 950 951 952 953 954 955

	return 0;
}

/**
 *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
 *  @hw: pointer to hardware structure
 *  @reg: analog register to read
 *  @val: read value
 *
 *  Performs read operation to Atlas analog register specified.
 **/
956
static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
957 958 959 960
{
	u32  atlas_ctl;

	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
961
			IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	IXGBE_WRITE_FLUSH(hw);
	udelay(10);
	atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
	*val = (u8)atlas_ctl;

	return 0;
}

/**
 *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
 *  @hw: pointer to hardware structure
 *  @reg: atlas register to write
 *  @val: value to write
 *
 *  Performs write operation to Atlas analog register specified.
 **/
978
static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
979 980 981 982 983 984 985 986 987 988 989
{
	u32  atlas_ctl;

	atlas_ctl = (reg << 8) | val;
	IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
	IXGBE_WRITE_FLUSH(hw);
	udelay(10);

	return 0;
}

D
Donald Skidmore 已提交
990
/**
991
 *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
D
Donald Skidmore 已提交
992
 *  @hw: pointer to hardware structure
993 994
 *  @dev_addr: address to read from
 *  @byte_offset: byte offset to read from dev_addr
D
Donald Skidmore 已提交
995 996
 *  @eeprom_data: value read
 *
997
 *  Performs 8 byte read operation to SFP module's data over I2C interface.
D
Donald Skidmore 已提交
998
 **/
999 1000
static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
				    u8 byte_offset, u8 *eeprom_data)
D
Donald Skidmore 已提交
1001 1002 1003 1004 1005
{
	s32 status = 0;
	u16 sfp_addr = 0;
	u16 sfp_data = 0;
	u16 sfp_stat = 0;
1006
	u16 gssr;
D
Donald Skidmore 已提交
1007 1008
	u32 i;

1009 1010 1011 1012 1013 1014 1015 1016
	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
		gssr = IXGBE_GSSR_PHY1_SM;
	else
		gssr = IXGBE_GSSR_PHY0_SM;

	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
		return IXGBE_ERR_SWFW_SYNC;

D
Donald Skidmore 已提交
1017 1018 1019 1020 1021 1022
	if (hw->phy.type == ixgbe_phy_nl) {
		/*
		 * phy SDA/SCL registers are at addresses 0xC30A to
		 * 0xC30D.  These registers are used to talk to the SFP+
		 * module's EEPROM through the SDA/SCL (I2C) interface.
		 */
1023
		sfp_addr = (dev_addr << 8) + byte_offset;
D
Donald Skidmore 已提交
1024
		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1025 1026 1027 1028
		hw->phy.ops.write_reg_mdi(hw,
					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
					  MDIO_MMD_PMAPMD,
					  sfp_addr);
D
Donald Skidmore 已提交
1029 1030 1031

		/* Poll status */
		for (i = 0; i < 100; i++) {
1032 1033 1034 1035
			hw->phy.ops.read_reg_mdi(hw,
						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
						MDIO_MMD_PMAPMD,
						&sfp_stat);
D
Donald Skidmore 已提交
1036 1037 1038
			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
				break;
1039
			usleep_range(10000, 20000);
D
Donald Skidmore 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048
		}

		if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
			hw_dbg(hw, "EEPROM read did not pass.\n");
			status = IXGBE_ERR_SFP_NOT_PRESENT;
			goto out;
		}

		/* Read data */
1049 1050
		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
					MDIO_MMD_PMAPMD, &sfp_data);
D
Donald Skidmore 已提交
1051 1052 1053 1054 1055 1056 1057

		*eeprom_data = (u8)(sfp_data >> 8);
	} else {
		status = IXGBE_ERR_PHY;
	}

out:
1058
	hw->mac.ops.release_swfw_sync(hw, gssr);
D
Donald Skidmore 已提交
1059 1060 1061
	return status;
}

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
/**
 *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
 *  @hw: pointer to hardware structure
 *  @byte_offset: EEPROM byte offset to read
 *  @eeprom_data: value read
 *
 *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
 **/
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
				       u8 *eeprom_data)
{
	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
					byte_offset, eeprom_data);
}

/**
 *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
 *  @hw: pointer to hardware structure
 *  @byte_offset: byte offset at address 0xA2
 *  @eeprom_data: value read
 *
 *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
 **/
static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
				       u8 *sff8472_data)
{
	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
					byte_offset, sff8472_data);
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
/**
 *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
 *  port devices.
 *  @hw: pointer to the HW structure
 *
 *  Calls common function and corrects issue with some single port devices
 *  that enable LAN1 but not LAN0.
 **/
static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
{
	struct ixgbe_bus_info *bus = &hw->bus;
	u16 pci_gen = 0;
	u16 pci_ctrl2 = 0;

	ixgbe_set_lan_id_multi_port_pcie(hw);

	/* check if LAN0 is disabled */
	hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
	if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {

		hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);

		/* if LAN0 is completely disabled force function to 0 */
		if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
		    !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {

			bus->func = 0;
		}
	}
}

1124
/**
1125
 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1126
 * @hw: pointer to hardware structure
1127 1128 1129 1130 1131 1132
 * @num_pb: number of packet buffers to allocate
 * @headroom: reserve n KB of headroom
 * @strategy: packet buffer allocation strategy
 **/
static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
				  u32 headroom, int strategy)
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
{
	u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
	u8  i = 0;

	if (!num_pb)
		return;

	/* Setup Rx packet buffer sizes */
	switch (strategy) {
	case PBA_STRATEGY_WEIGHTED:
		/* Setup the first four at 80KB */
		rxpktsize = IXGBE_RXPBSIZE_80KB;
		for (; i < 4; i++)
			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
		/* Setup the last four at 48KB...don't re-init i */
		rxpktsize = IXGBE_RXPBSIZE_48KB;
		/* Fall Through */
	case PBA_STRATEGY_EQUAL:
	default:
		/* Divide the remaining Rx packet buffer evenly among the TCs */
		for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
			IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
		break;
	}

	/* Setup Tx packet buffer sizes */
	for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
		IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
}

1163
static const struct ixgbe_mac_operations mac_ops_82598 = {
1164 1165
	.init_hw		= &ixgbe_init_hw_generic,
	.reset_hw		= &ixgbe_reset_hw_82598,
1166
	.start_hw		= &ixgbe_start_hw_82598,
1167
	.clear_hw_cntrs		= &ixgbe_clear_hw_cntrs_generic,
1168
	.get_media_type		= &ixgbe_get_media_type_82598,
1169
	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
1170 1171
	.get_mac_addr		= &ixgbe_get_mac_addr_generic,
	.stop_adapter		= &ixgbe_stop_adapter_generic,
1172
	.get_bus_info           = &ixgbe_get_bus_info_generic,
1173
	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie_82598,
1174 1175
	.read_analog_reg8	= &ixgbe_read_analog_reg8_82598,
	.write_analog_reg8	= &ixgbe_write_analog_reg8_82598,
1176
	.setup_link		= &ixgbe_setup_mac_link_82598,
1177
	.set_rxpba		= &ixgbe_set_rxpba_82598,
1178 1179 1180 1181
	.check_link		= &ixgbe_check_mac_link_82598,
	.get_link_capabilities	= &ixgbe_get_link_capabilities_82598,
	.led_on			= &ixgbe_led_on_generic,
	.led_off		= &ixgbe_led_off_generic,
1182 1183
	.blink_led_start	= &ixgbe_blink_led_start_generic,
	.blink_led_stop		= &ixgbe_blink_led_stop_generic,
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	.set_rar		= &ixgbe_set_rar_generic,
	.clear_rar		= &ixgbe_clear_rar_generic,
	.set_vmdq		= &ixgbe_set_vmdq_82598,
	.clear_vmdq		= &ixgbe_clear_vmdq_82598,
	.init_rx_addrs		= &ixgbe_init_rx_addrs_generic,
	.update_mc_addr_list	= &ixgbe_update_mc_addr_list_generic,
	.enable_mc		= &ixgbe_enable_mc_generic,
	.disable_mc		= &ixgbe_disable_mc_generic,
	.clear_vfta		= &ixgbe_clear_vfta_82598,
	.set_vfta		= &ixgbe_set_vfta_82598,
1194
	.fc_enable		= &ixgbe_fc_enable_82598,
E
Emil Tantilov 已提交
1195
	.set_fw_drv_ver         = NULL,
1196 1197
	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync,
	.release_swfw_sync      = &ixgbe_release_swfw_sync,
1198 1199
	.get_thermal_sensor_data = NULL,
	.init_thermal_sensor_thresh = NULL,
1200 1201
	.prot_autoc_read	= &prot_autoc_read_generic,
	.prot_autoc_write	= &prot_autoc_write_generic,
1202 1203
	.enable_rx		= &ixgbe_enable_rx_generic,
	.disable_rx		= &ixgbe_disable_rx_generic,
1204 1205
};

1206
static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1207
	.init_params		= &ixgbe_init_eeprom_params_generic,
1208
	.read			= &ixgbe_read_eerd_generic,
1209 1210
	.write			= &ixgbe_write_eeprom_generic,
	.write_buffer		= &ixgbe_write_eeprom_buffer_bit_bang_generic,
1211
	.read_buffer		= &ixgbe_read_eerd_buffer_generic,
1212
	.calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
1213 1214 1215 1216
	.validate_checksum	= &ixgbe_validate_eeprom_checksum_generic,
	.update_checksum	= &ixgbe_update_eeprom_checksum_generic,
};

1217
static const struct ixgbe_phy_operations phy_ops_82598 = {
1218
	.identify		= &ixgbe_identify_phy_generic,
1219
	.identify_sfp		= &ixgbe_identify_module_generic,
1220
	.init			= &ixgbe_init_phy_ops_82598,
1221 1222 1223
	.reset			= &ixgbe_reset_phy_generic,
	.read_reg		= &ixgbe_read_phy_reg_generic,
	.write_reg		= &ixgbe_write_phy_reg_generic,
1224 1225
	.read_reg_mdi		= &ixgbe_read_phy_reg_mdi,
	.write_reg_mdi		= &ixgbe_write_phy_reg_mdi,
1226 1227
	.setup_link		= &ixgbe_setup_phy_link_generic,
	.setup_link_speed	= &ixgbe_setup_phy_link_speed_generic,
1228
	.read_i2c_sff8472	= &ixgbe_read_i2c_sff8472_82598,
D
Donald Skidmore 已提交
1229
	.read_i2c_eeprom	= &ixgbe_read_i2c_eeprom_82598,
1230
	.check_overtemp		= &ixgbe_tn_check_overtemp,
1231 1232
};

1233
const struct ixgbe_info ixgbe_82598_info = {
1234 1235 1236
	.mac			= ixgbe_mac_82598EB,
	.get_invariants		= &ixgbe_get_invariants_82598,
	.mac_ops		= &mac_ops_82598,
1237 1238
	.eeprom_ops		= &eeprom_ops_82598,
	.phy_ops		= &phy_ops_82598,
1239
	.mvals			= ixgbe_mvals_8259X,
1240
};