提交 f96a8a0b 编写于 作者: C Carolyn Wyborny 提交者: Jeff Kirsher

igb: Add Support for new i210/i211 devices.

This patch adds new initialization functions and device support
for i210 and i211 devices.
Signed-off-by: NCarolyn Wyborny <carolyn.wyborny@intel.com>
Tested-by: NJeff Pieper <jeffrey.e.pieper@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 da02cde1
......@@ -33,6 +33,7 @@
obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
e1000_i210.o
igb-$(CONFIG_IGB_PTP) += igb_ptp.o
......@@ -36,6 +36,7 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
#include "e1000_i210.h"
static s32 igb_get_invariants_82575(struct e1000_hw *);
static s32 igb_acquire_phy_82575(struct e1000_hw *);
......@@ -98,6 +99,8 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
break;
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
reg = rd32(E1000_MDICNFG);
ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
break;
......@@ -152,6 +155,17 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_I350_SGMII:
mac->type = e1000_i350;
break;
case E1000_DEV_ID_I210_COPPER:
case E1000_DEV_ID_I210_COPPER_OEM1:
case E1000_DEV_ID_I210_COPPER_IT:
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
mac->type = e1000_i211;
break;
default:
return -E1000_ERR_MAC_INIT;
break;
......@@ -184,26 +198,44 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* Set mta register count */
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
if (mac->type == e1000_82576)
switch (mac->type) {
case e1000_82576:
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if (mac->type == e1000_82580)
break;
case e1000_82580:
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
if (mac->type == e1000_i350)
break;
case e1000_i350:
case e1000_i210:
case e1000_i211:
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
break;
}
/* reset */
if (mac->type >= e1000_82580)
mac->ops.reset_hw = igb_reset_hw_82580;
else
mac->ops.reset_hw = igb_reset_hw_82575;
if (mac->type >= e1000_i210) {
mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
} else {
mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
/* Set if part includes ASF firmware */
mac->asf_firmware_present = true;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid =
(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
/* enable EEE on i350 parts */
if (mac->type == e1000_i350)
/* enable EEE on i350 parts and later parts */
if (mac->type >= e1000_i350)
dev_spec->eee_disable = false;
else
dev_spec->eee_disable = true;
......@@ -215,26 +247,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* NVM initialization */
eecd = rd32(E1000_EECD);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
switch (nvm->override) {
case e1000_nvm_override_spi_large:
nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
break;
}
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
......@@ -244,6 +256,33 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
switch (nvm->override) {
case e1000_nvm_override_spi_large:
nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
nvm->page_size = eecd
& E1000_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd
& E1000_EECD_ADDR_BITS ? 16 : 8;
break;
}
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
} else
nvm->type = e1000_nvm_flash_hw;
/*
* Check for invalid size
*/
......@@ -251,32 +290,60 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
pr_notice("The NVM size is not valid, defaulting to 32K\n");
size = 15;
}
nvm->word_size = 1 << size;
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
/* NVM Function Pointers */
nvm->ops.acquire = igb_acquire_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.release = igb_release_nvm_82575;
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
nvm->ops.acquire = igb_acquire_nvm_82575;
nvm->ops.release = igb_release_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
nvm->ops.acquire = igb_acquire_nvm_82575;
nvm->ops.release = igb_release_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i210:
nvm->ops.validate = igb_validate_nvm_checksum_i210;
nvm->ops.update = igb_update_nvm_checksum_i210;
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_srrd_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
break;
case e1000_i211:
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_i211;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
nvm->ops.write = NULL;
break;
default:
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
nvm->ops.acquire = igb_acquire_nvm_82575;
nvm->ops.release = igb_release_nvm_82575;
if (nvm->word_size < (1 << 15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
nvm->ops.write = igb_write_nvm_spi;
break;
}
nvm->ops.write = igb_write_nvm_spi;
/* if part supports SR-IOV then initialize mailbox parameters */
switch (mac->type) {
......@@ -314,9 +381,13 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
} else if (hw->mac.type >= e1000_82580) {
} else if ((hw->mac.type == e1000_82580)
|| (hw->mac.type == e1000_i350)) {
phy->ops.read_reg = igb_read_phy_reg_82580;
phy->ops.write_reg = igb_write_phy_reg_82580;
} else if (hw->phy.type >= e1000_phy_i210) {
phy->ops.read_reg = igb_read_phy_reg_gs40g;
phy->ops.write_reg = igb_write_phy_reg_gs40g;
} else {
phy->ops.read_reg = igb_read_phy_reg_igp;
phy->ops.write_reg = igb_write_phy_reg_igp;
......@@ -345,6 +416,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
if (phy->id == I210_I_PHY_ID) {
phy->ops.get_cable_length =
igb_get_cable_length_m88_gen2;
phy->ops.set_d0_lplu_state =
igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state =
igb_set_d3_lplu_state_82580;
}
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
case IGP03E1000_E_PHY_ID:
......@@ -364,6 +443,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break;
case I210_I_PHY_ID:
phy->type = e1000_phy_i210;
phy->ops.get_phy_info = igb_get_phy_info_m88;
phy->ops.check_polarity = igb_check_polarity_m88;
phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
default:
return -E1000_ERR_PHY;
}
......@@ -389,7 +477,7 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
else if (hw->bus.func == E1000_FUNC_3)
mask = E1000_SWFW_PHY3_SM;
return igb_acquire_swfw_sync_82575(hw, mask);
return hw->mac.ops.acquire_swfw_sync(hw, mask);
}
/**
......@@ -410,7 +498,7 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
else if (hw->bus.func == E1000_FUNC_3)
mask = E1000_SWFW_PHY3_SM;
igb_release_swfw_sync_82575(hw, mask);
hw->mac.ops.release_swfw_sync(hw, mask);
}
/**
......@@ -514,6 +602,8 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
break;
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
mdic = rd32(E1000_MDICNFG);
mdic &= E1000_MDICNFG_PHY_MASK;
phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
......@@ -780,14 +870,14 @@ static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
{
s32 ret_val;
ret_val = igb_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
if (ret_val)
goto out;
ret_val = igb_acquire_nvm(hw);
if (ret_val)
igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
out:
return ret_val;
......@@ -803,7 +893,7 @@ static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
static void igb_release_nvm_82575(struct e1000_hw *hw)
{
igb_release_nvm(hw);
igb_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
}
/**
......@@ -1174,7 +1264,6 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
* is no link.
*/
igb_clear_hw_cntrs_82575(hw);
return ret_val;
}
......@@ -1211,6 +1300,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
}
}
switch (hw->phy.type) {
case e1000_phy_i210:
case e1000_phy_m88:
if (hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID)
......@@ -1851,7 +1941,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
global_device_reset = false;
if (global_device_reset &&
......@@ -1897,7 +1987,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Release semaphore */
if (global_device_reset)
igb_release_swfw_sync_82575(hw, swmbsw_mask);
hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
return ret_val;
}
......
......@@ -55,10 +55,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_SRRCTL_DROP_EN 0x80000000
#define E1000_SRRCTL_TIMESTAMP 0x40000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
......
......@@ -458,6 +458,7 @@
#define E1000_ERR_INVALID_ARGUMENT 16
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
......@@ -595,6 +596,25 @@
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
/* Offset to data in NVM read/write registers */
#define E1000_NVM_RW_REG_DATA 16
......@@ -613,6 +633,16 @@
#define NVM_CHECKSUM_REG 0x003F
#define NVM_COMPATIBILITY_REG_3 0x0003
#define NVM_COMPATIBILITY_BIT_MASK 0x8000
#define NVM_MAC_ADDR 0x0000
#define NVM_SUB_DEV_ID 0x000B
#define NVM_SUB_VEN_ID 0x000C
#define NVM_DEV_ID 0x000D
#define NVM_VEN_ID 0x000E
#define NVM_INIT_CTRL_2 0x000F
#define NVM_INIT_CTRL_4 0x0013
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
......@@ -639,6 +669,7 @@
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
#define NVM_RESERVED_WORD 0xFFFF
#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
......@@ -696,6 +727,7 @@
#define I82580_I_PHY_ID 0x015403A0
#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
......@@ -815,6 +847,7 @@
#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
/* SerDes Control */
......
......@@ -63,6 +63,13 @@ struct e1000_hw;
#define E1000_DEV_ID_I350_FIBER 0x1522
#define E1000_DEV_ID_I350_SERDES 0x1523
#define E1000_DEV_ID_I350_SGMII 0x1524
#define E1000_DEV_ID_I210_COPPER 0x1533
#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
#define E1000_DEV_ID_I210_COPPER_IT 0x1535
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_REVISION_2 2
#define E1000_REVISION_4 4
......@@ -83,6 +90,8 @@ enum e1000_mac_type {
e1000_82576,
e1000_82580,
e1000_i350,
e1000_i210,
e1000_i211,
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
};
......@@ -117,6 +126,7 @@ enum e1000_phy_type {
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_82580,
e1000_phy_i210,
};
enum e1000_bus_type {
......@@ -313,6 +323,9 @@ struct e1000_mac_operations {
void (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
};
struct e1000_phy_operations {
......@@ -338,6 +351,7 @@ struct e1000_nvm_operations {
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
s32 (*update)(struct e1000_hw *);
s32 (*validate)(struct e1000_hw *);
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
struct e1000_info {
......
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
******************************************************************************/
/* e1000_i210
* e1000_i211
*/
#include <linux/types.h>
#include <linux/if_ether.h>
#include "e1000_hw.h"
#include "e1000_i210.h"
static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
/**
* igb_acquire_nvm_i210 - Request for access to EEPROM
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
{
return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
/**
* igb_release_nvm_i210 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
void igb_release_nvm_i210(struct e1000_hw *hw)
{
igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
/**
* igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
while (i < timeout) {
if (igb_get_hw_semaphore_i210(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = rd32(E1000_SW_FW_SYNC);
if (!(swfw_sync & fwmask))
break;
/*
* Firmware currently using resource (fwmask)
*/
igb_put_hw_semaphore_i210(hw);
mdelay(5);
i++;
}
if (i == timeout) {
hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
wr32(E1000_SW_FW_SYNC, swfw_sync);
igb_put_hw_semaphore_i210(hw);
out:
return ret_val;
}
/**
* igb_release_swfw_sync_i210 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
wr32(E1000_SW_FW_SYNC, swfw_sync);
igb_put_hw_semaphore_i210(hw);
}
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
s32 ret_val = E1000_SUCCESS;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = rd32(E1000_SWSM);
wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
udelay(50);
}
if (i == timeout) {
/* Release semaphores */
igb_put_hw_semaphore(hw);
hw_dbg("Driver can't access the NVM\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igb_put_hw_semaphore_i210 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
swsm = rd32(E1000_SWSM);
swsm &= ~E1000_SWSM_SWESMBI;
wr32(E1000_SWSM, swsm);
}
/**
* igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
* @words: number of words to read
* @data: word read from the Shadow Ram
*
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
**/
s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to read in bursts than synchronizing access for each word. */
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
status = igb_read_nvm_eerd(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
} else {
status = E1000_ERR_SWFW_SYNC;
}
if (status != E1000_SUCCESS)
break;
}
return status;
}
/**
* igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow RAM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow RAM
*
* Writes data to Shadow RAM at offset using EEWR register.
*
* If e1000_update_nvm_checksum is not called after this function , the
* data will not be committed to FLASH and also Shadow RAM will most likely
* contain an invalid checksum.
*
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
**/
s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to write in bursts than synchronizing access for each word. */
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
} else {
status = E1000_ERR_SWFW_SYNC;
}
if (status != E1000_SUCCESS)
break;
}
return status;
}
/**
* igb_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow Ram
*
* Writes data to Shadow Ram at offset using EEWR register.
*
* If igb_update_nvm_checksum is not called after this function , the
* Shadow Ram will most likely contain an invalid checksum.
**/
static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, k, eewr = 0;
u32 attempts = 100000;
s32 ret_val = E1000_SUCCESS;
/*
* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
for (i = 0; i < words; i++) {
eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
(data[i] << E1000_NVM_RW_REG_DATA) |
E1000_NVM_RW_REG_START;
wr32(E1000_SRWR, eewr);
for (k = 0; k < attempts; k++) {
if (E1000_NVM_RW_REG_DONE &
rd32(E1000_SRWR)) {
ret_val = E1000_SUCCESS;
break;
}
udelay(5);
}
if (ret_val != E1000_SUCCESS) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
}
out:
return ret_val;
}
/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 ret_val = E1000_SUCCESS;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
ret_val = igb_read_invm_i211(hw, offset, &data[0]);
ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_ID_LED_SETTINGS:
case NVM_INIT_CTRL_2:
case NVM_INIT_CTRL_4:
case NVM_LED_1_CFG:
case NVM_LED_0_2_CFG:
igb_read_invm_i211(hw, offset, data);
break;
case NVM_COMPAT:
*data = ID_LED_DEFAULT_I210;
break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
case NVM_SUB_VEN_ID:
*data = hw->subsystem_vendor_id;
break;
case NVM_DEV_ID:
*data = hw->device_id;
break;
case NVM_VEN_ID:
*data = hw->vendor_id;
break;
default:
hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
*data = NVM_RESERVED_WORD;
break;
}
return ret_val;
}
/**
* igb_read_invm_i211 - Reads OTP
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
* @data: pointer to the data read
*
* Reads 16-bit words from the OTP. Return error when the word is not
* stored in OTP.
**/
s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
{
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
u32 invm_dword;
u16 i;
u8 record_type, word_address;
for (i = 0; i < E1000_INVM_SIZE; i++) {
invm_dword = rd32(E1000_INVM_DATA_REG(i));
/* Get record type */
record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
break;
if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
if (word_address == (u8)address) {
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x",
address, *data);
status = E1000_SUCCESS;
break;
}
}
}
if (status != E1000_SUCCESS)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
/**
* igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
/*
* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
* We have semaphore taken already here.
*/
read_op_ptr = hw->nvm.ops.read;
hw->nvm.ops.read = igb_read_nvm_eerd;
status = igb_validate_nvm_checksum(hw);
/* Revert original read operation. */
hw->nvm.ops.read = read_op_ptr;
hw->nvm.ops.release(hw);
} else {
status = E1000_ERR_SWFW_SYNC;
}
return status;
}
/**
* igb_update_nvm_checksum_i210 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
**/
s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 checksum = 0;
u16 i, nvm_data;
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
if (ret_val != E1000_SUCCESS) {
hw_dbg("EEPROM read failed\n");
goto out;
}
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
/*
* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
*/
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val != E1000_SUCCESS) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
}
hw->nvm.ops.release(hw);
ret_val = igb_update_flash_i210(hw);
} else {
ret_val = -E1000_ERR_SWFW_SYNC;
}
out:
return ret_val;
}
/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
**/
s32 igb_update_flash_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u32 flup;
ret_val = igb_pool_flash_update_done_i210(hw);
if (ret_val == -E1000_ERR_NVM) {
hw_dbg("Flash update time out\n");
goto out;
}
flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
wr32(E1000_EECD, flup);
ret_val = igb_pool_flash_update_done_i210(hw);
if (ret_val == E1000_SUCCESS)
hw_dbg("Flash update complete\n");
else
hw_dbg("Flash update time out\n");
out:
return ret_val;
}
/**
* igb_pool_flash_update_done_i210 - Pool FLUDONE status.
* @hw: pointer to the HW structure
*
**/
s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_NVM;
u32 i, reg;
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
ret_val = E1000_SUCCESS;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igb_valid_led_default_i210 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
*
* Read the EEPROM for the current default LED configuration. If the
* LED configuration is not valid, set to a valid LED configuration.
**/
s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
{
s32 ret_val;
ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_I210_SERDES;
break;
case e1000_media_type_copper:
default:
*data = ID_LED_DEFAULT_I210;
break;
}
}
out:
return ret_val;
}
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
extern s32 igb_update_flash_i210(struct e1000_hw *hw);
extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
(u8)((invm_dword) & 0x7)
#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
(u8)(((invm_dword) & 0x0000FE00) >> 9)
#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
(u16)(((invm_dword) & 0xFFFF0000) >> 16)
enum E1000_INVM_STRUCTURE_TYPE {
E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
};
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
(ID_LED_OFF1_OFF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#endif
......@@ -658,6 +658,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
ret_val = igb_set_fc_watermarks(hw);
out:
return ret_val;
}
......
......@@ -710,4 +710,3 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
out:
return ret_val;
}
......@@ -35,6 +35,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
u16 *phy_ctrl);
static s32 igb_wait_autoneg(struct e1000_hw *hw);
static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] =
......@@ -570,6 +571,11 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
goto out;
}
if (phy->type == e1000_phy_i210) {
ret_val = igb_set_master_slave_mode(hw);
if (ret_val)
return ret_val;
}
out:
return ret_val;
......@@ -1213,12 +1219,22 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
goto out;
if (!link) {
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID) {
bool reset_dsp = true;
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case I210_I_PHY_ID:
reset_dsp = false;
break;
default:
if (hw->phy.type != e1000_phy_m88)
reset_dsp = false;
break;
}
if (!reset_dsp)
hw_dbg("Link taking longer than expected.\n");
} else {
else {
/*
* We didn't get link.
* Reset the DSP and cross our fingers.
......@@ -1243,7 +1259,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_m88 ||
hw->phy.id == I347AT4_E_PHY_ID ||
hw->phy.id == M88E1112_E_PHY_ID)
hw->phy.id == M88E1112_E_PHY_ID ||
hw->phy.id == I210_I_PHY_ID)
goto out;
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
......@@ -1441,6 +1458,7 @@ s32 igb_check_downshift(struct e1000_hw *hw)
u16 phy_data, offset, mask;
switch (phy->type) {
case e1000_phy_i210:
case e1000_phy_m88:
case e1000_phy_gg82563:
offset = M88E1000_PHY_SPEC_STATUS;
......@@ -1476,7 +1494,7 @@ s32 igb_check_downshift(struct e1000_hw *hw)
*
* Polarity is determined based on the PHY specific status register.
**/
static s32 igb_check_polarity_m88(struct e1000_hw *hw)
s32 igb_check_polarity_m88(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
......@@ -1665,6 +1683,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
u16 phy_data, phy_data2, index, default_page, is_cm;
switch (hw->phy.id) {
case I210_I_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
......@@ -2129,10 +2148,16 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
void igb_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
if (hw->phy.type == e1000_phy_i210) {
hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
power_reg &= ~GS40G_CS_POWER_DOWN;
hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
}
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
......@@ -2146,10 +2171,18 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
void igb_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
/* i210 Phy requires an additional bit for power up/down */
if (hw->phy.type == e1000_phy_i210) {
hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
power_reg |= GS40G_CS_POWER_DOWN;
hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
}
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
msleep(1);
}
......@@ -2345,3 +2378,103 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw)
out:
return ret_val;
}
/**
* igb_write_phy_reg_gs40g - Write GS40G PHY register
* @hw: pointer to the HW structure
* @offset: lower half is register offset to write to
* upper half is page to use.
* @data: data to write at register offset
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
u16 page = offset >> GS40G_PAGE_SHIFT;
offset = offset & GS40G_OFFSET_MASK;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
if (ret_val)
goto release;
ret_val = igb_write_phy_reg_mdic(hw, offset, data);
release:
hw->phy.ops.release(hw);
return ret_val;
}
/**
* igb_read_phy_reg_gs40g - Read GS40G PHY register
* @hw: pointer to the HW structure
* @offset: lower half is register offset to read to
* upper half is page to use.
* @data: data to read at register offset
*
* Acquires semaphore, if necessary, then reads the data in the PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
u16 page = offset >> GS40G_PAGE_SHIFT;
offset = offset & GS40G_OFFSET_MASK;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
if (ret_val)
goto release;
ret_val = igb_read_phy_reg_mdic(hw, offset, data);
release:
hw->phy.ops.release(hw);
return ret_val;
}
/**
* igb_set_master_slave_mode - Setup PHY for Master/slave mode
* @hw: pointer to the HW structure
*
* Sets up Master/slave mode
**/
static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
{
s32 ret_val;
u16 phy_data;
/* Resolve Master/Slave mode */
ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
if (ret_val)
return ret_val;
/* load defaults for future use */
hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
((phy_data & CR_1000T_MS_VALUE) ?
e1000_ms_force_master :
e1000_ms_force_slave) : e1000_ms_auto;
switch (hw->phy.ms_type) {
case e1000_ms_force_master:
phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
break;
case e1000_ms_force_slave:
phy_data |= CR_1000T_MS_ENABLE;
phy_data &= ~(CR_1000T_MS_VALUE);
break;
case e1000_ms_auto:
phy_data &= ~CR_1000T_MS_ENABLE;
/* fall-through */
default:
break;
}
return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
}
......@@ -73,6 +73,9 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
s32 igb_get_phy_info_82580(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
s32 igb_get_cable_length_82580(struct e1000_hw *hw);
s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_check_polarity_m88(struct e1000_hw *hw);
/* IGP01E1000 Specific Registers */
#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
......@@ -140,4 +143,16 @@ s32 igb_get_cable_length_82580(struct e1000_hw *hw);
#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
/* GS40G - I210 PHY defines */
#define GS40G_PAGE_SELECT 0x16
#define GS40G_PAGE_SHIFT 16
#define GS40G_OFFSET_MASK 0xFFFF
#define GS40G_PAGE_2 0x20000
#define GS40G_MAC_REG2 0x15
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
#define GS40G_CS_POWER_DOWN 0x0002
#define GS40G_LINE_LB 0x4000
#endif
......@@ -352,4 +352,18 @@
#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
#define E1000_I210_FLMNGCTL 0x12038
#define E1000_I210_FLMNGDATA 0x1203C
#define E1000_I210_FLMNGCNT 0x12040
#define E1000_I210_FLSWCTL 0x12048
#define E1000_I210_FLSWDATA 0x1204C
#define E1000_I210_FLSWCNT 0x12050
#define E1000_I210_FLA 0x1201C
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
#endif
......@@ -65,10 +65,13 @@ struct igb_adapter;
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
(hw->mac.type > e1000_82575 ? 8 : 4))
#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \
(hw->mac.type > e1000_82575 ? 8 : 4)))
#define IGB_MAX_RX_QUEUES_I210 4
#define IGB_MAX_RX_QUEUES_I211 2
#define IGB_MAX_TX_QUEUES 16
#define IGB_MAX_TX_QUEUES_I210 4
#define IGB_MAX_TX_QUEUES_I211 2
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
......
......@@ -552,10 +552,13 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[548] = rd32(E1000_TDFT);
regs_buff[549] = rd32(E1000_TDFHS);
regs_buff[550] = rd32(E1000_TDFPC);
regs_buff[551] = adapter->stats.o2bgptc;
regs_buff[552] = adapter->stats.b2ospc;
regs_buff[553] = adapter->stats.o2bspc;
regs_buff[554] = adapter->stats.b2ogprc;
if (hw->mac.type > e1000_82580) {
regs_buff[551] = adapter->stats.o2bgptc;
regs_buff[552] = adapter->stats.b2ospc;
regs_buff[553] = adapter->stats.o2bspc;
regs_buff[554] = adapter->stats.b2ogprc;
}
if (hw->mac.type != e1000_82576)
return;
......@@ -660,6 +663,9 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
if (hw->mac.type == e1000_i211)
return -EOPNOTSUPP;
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
......@@ -887,6 +893,36 @@ struct igb_reg_test {
#define TABLE64_TEST_LO 5
#define TABLE64_TEST_HI 6
/* i210 reg test */
static struct igb_reg_test reg_test_i210[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* RDH is read-only for i210, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI,
0x900FFFFF, 0xFFFFFFFF },
{ E1000_MTA, 0, 128, TABLE32_TEST,
0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0, 0 }
};
/* i350 reg test */
static struct igb_reg_test reg_test_i350[] = {
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
......@@ -1109,6 +1145,11 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
test = reg_test_i350;
toggle = 0x7FEFF3FF;
break;
case e1000_i210:
case e1000_i211:
test = reg_test_i210;
toggle = 0x7FEFF3FF;
break;
case e1000_82580:
test = reg_test_82580;
toggle = 0x7FEFF3FF;
......@@ -1190,23 +1231,13 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
u16 temp;
u16 checksum = 0;
u16 i;
*data = 0;
/* Read and add up the contents of the EEPROM */
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
*data = 1;
break;
}
checksum += temp;
}
/* If Checksum is not Correct return error else test passed */
if ((checksum != (u16) NVM_SUM) && !(*data))
*data = 2;
/* Validate eeprom on all parts but i211 */
if (adapter->hw.mac.type != e1000_i211) {
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
}
return *data;
}
......@@ -1272,6 +1303,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
ics_mask = 0x77DCFED5;
break;
case e1000_i350:
case e1000_i210:
case e1000_i211:
ics_mask = 0x77DCFED5;
break;
default:
......@@ -1438,23 +1471,35 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
u16 phy_reg = 0;
hw->mac.autoneg = false;
if (hw->phy.type == e1000_phy_m88) {
switch (hw->phy.type) {
case e1000_phy_m88:
/* Auto-MDI/MDIX Off */
igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
/* autoneg off */
igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
} else if (hw->phy.type == e1000_phy_82580) {
break;
case e1000_phy_82580:
/* enable MII loopback */
igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
break;
case e1000_phy_i210:
/* set loopback speed in PHY */
igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
&phy_reg);
phy_reg |= GS40G_MAC_SPEED_1G;
igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
phy_reg);
ctrl_reg = rd32(E1000_CTRL_EXT);
default:
break;
}
ctrl_reg = rd32(E1000_CTRL);
/* force 1000, set loopback */
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
......@@ -1467,7 +1512,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
E1000_CTRL_FD | /* Force Duplex to FULL */
E1000_CTRL_SLU); /* Set link up enable bit */
if (hw->phy.type == e1000_phy_m88)
if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
wr32(E1000_CTRL, ctrl_reg);
......@@ -1475,7 +1520,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
if (hw->phy.type == e1000_phy_m88)
if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
igb_phy_disable_receiver(adapter);
udelay(500);
......@@ -1740,6 +1785,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
goto out;
}
if ((adapter->hw.mac.type == e1000_i210)
|| (adapter->hw.mac.type == e1000_i210)) {
dev_err(&adapter->pdev->dev,
"Loopback test not supported "
"on this part at this time.\n");
*data = 0;
goto out;
}
*data = igb_setup_desc_rings(adapter);
if (*data)
goto out;
......
......@@ -75,6 +75,11 @@ static const struct e1000_info *igb_info_tbl[] = {
};
static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
......@@ -641,6 +646,8 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
......@@ -727,8 +734,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
/* On i350, loopback VLAN packets have the tag byte-swapped. */
if (adapter->hw.mac.type == e1000_i350)
/*
* On i350, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
* */
if (adapter->hw.mac.type >= e1000_i350)
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
adapter->rx_ring[i] = ring;
......@@ -822,6 +832,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
break;
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
/*
* On 82580 and newer adapters the scheme is similar to 82576
* however instead of ordering column-major we have things
......@@ -888,6 +900,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
case e1000_82576:
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
......@@ -1034,6 +1048,11 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
/* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
if ((adapter->hw.mac.type == e1000_i210)
|| (adapter->hw.mac.type == e1000_i211))
numvecs = 4;
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
......@@ -1041,6 +1060,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
numvecs++;
adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
GFP_KERNEL);
if (!adapter->msix_entries)
goto msi_only;
......@@ -1631,6 +1651,8 @@ void igb_reset(struct igb_adapter *adapter)
pba &= E1000_RXPBS_SIZE_MASK_82576;
break;
case e1000_82575:
case e1000_i210:
case e1000_i211:
default:
pba = E1000_PBA_34K;
break;
......@@ -1826,7 +1848,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
*/
if (pdev->is_virtfn) {
WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
pci_name(pdev), pdev->vendor, pdev->device);
pci_name(pdev), pdev->vendor, pdev->device);
return -EINVAL;
}
......@@ -1980,11 +2002,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
* known good starting state */
hw->mac.ops.reset_hw(hw);
/* make sure the NVM is good */
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
/*
* make sure the NVM is good , i211 parts have special NVM that
* doesn't contain a checksum
*/
if (hw->mac.type != e1000_i211) {
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
}
/* copy the MAC address out of the NVM */
......@@ -2118,6 +2145,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
adapter->num_rx_queues, adapter->num_tx_queues);
switch (hw->mac.type) {
case e1000_i350:
case e1000_i210:
case e1000_i211:
igb_set_eee_i350(hw);
break;
default:
......@@ -2244,9 +2273,14 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
{
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
int old_vfs = igb_find_enabled_vfs(adapter);
int i;
/* Virtualization features not supported on i210 family. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
if (old_vfs) {
dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
"max_vfs setting of %d\n", old_vfs, max_vfs);
......@@ -2258,6 +2292,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
sizeof(struct vf_data_storage), GFP_KERNEL);
/* if allocation failed then we do not support SR-IOV */
if (!adapter->vf_data) {
adapter->vfs_allocated_count = 0;
......@@ -2332,11 +2367,28 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
} else
adapter->vfs_allocated_count = max_vfs;
break;
case e1000_i210:
case e1000_i211:
adapter->vfs_allocated_count = 0;
break;
default:
break;
}
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
switch (hw->mac.type) {
case e1000_i210:
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
num_online_cpus());
break;
case e1000_i211:
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
num_online_cpus());
break;
default:
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
num_online_cpus());
break;
}
/* i350 cannot do RSS and SR-IOV at the same time */
if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
adapter->rss_queues = 1;
......@@ -2366,7 +2418,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable(adapter);
if (hw->mac.type == e1000_i350)
if (hw->mac.type >= e1000_i350)
adapter->flags &= ~IGB_FLAG_DMAC;
set_bit(__IGB_DOWN, &adapter->state);
......@@ -2819,6 +2871,17 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* Don't need to set TUOFL or IPOFL, they default to 1 */
wr32(E1000_RXCSUM, rxcsum);
/*
* Generate RSS hash based on TCP port numbers and/or
* IPv4/v6 src and dst addresses since UDP cannot be
* hashed reliably due to IP fragmentation
*/
mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
......@@ -2834,25 +2897,15 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
wr32(E1000_VT_CTL, vtctl);
}
if (adapter->rss_queues > 1)
mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
else
mrqc = E1000_MRQC_ENABLE_VMDQ;
mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
mrqc = E1000_MRQC_ENABLE_RSS_4Q;
if (hw->mac.type != e1000_i211)
mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
}
igb_vmm_control(adapter);
/*
* Generate RSS hash based on TCP port numbers and/or
* IPv4/v6 src and dst addresses since UDP cannot be
* hashed reliably due to IP fragmentation
*/
mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
wr32(E1000_MRQC, mrqc);
}
......@@ -3454,7 +3507,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
* we will have issues with VLAN tag stripping not being done for frames
* that are only arriving because we are the default pool
*/
if (hw->mac.type < e1000_82576)
if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
return;
vmolr |= rd32(E1000_VMOLR(vfn)) &
......@@ -3551,7 +3604,7 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
bool ret = false;
u32 ctrl_ext, thstat;
/* check for thermal sensor event on i350, copper only */
/* check for thermal sensor event on i350 copper only */
if (hw->mac.type == e1000_i350) {
thstat = rd32(E1000_THSTAT);
ctrl_ext = rd32(E1000_CTRL_EXT);
......@@ -7027,6 +7080,8 @@ static void igb_vmm_control(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82575:
case e1000_i210:
case e1000_i211:
default:
/* replication is not supported for 82575 */
return;
......
......@@ -262,6 +262,8 @@ void igb_ptp_init(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
switch (hw->mac.type) {
case e1000_i210:
case e1000_i211:
case e1000_i350:
case e1000_82580:
adapter->caps.owner = THIS_MODULE;
......@@ -362,6 +364,8 @@ void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
unsigned long flags;
switch (adapter->hw.mac.type) {
case e1000_i210:
case e1000_i211:
case e1000_i350:
case e1000_82580:
case e1000_82576:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册