提交 c44ade9e 编写于 作者: J Jesse Brandeburg 提交者: Jeff Garzik

ixgbe: update to latest common code module

This is a massive update that includes infrastructure for further patches
where we will add support for more phy types and eeprom types.

This code is shared as much as possible with other drivers, so the code may
seem a little obtuse at times but wherever possible we keep to the linux
style and methods.
Signed-off-by: NJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: NJeff Garzik <jgarzik@redhat.com>
上级 f0848276
......@@ -41,8 +41,6 @@
#include <linux/dca.h>
#endif
#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
#define PFX "ixgbe: "
#define DPRINTK(nlevel, klevel, fmt, args...) \
((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
......
此差异已折叠。
此差异已折叠。
......@@ -31,36 +31,45 @@
#include "ixgbe_type.h"
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_start_hw(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num);
s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
u32 enable_addr);
s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next);
s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *uc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
u32 addr_count, ixgbe_mc_addr_itr func);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
......
......@@ -128,9 +128,10 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising = (ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
ecmd->port = PORT_FIBRE;
ecmd->autoneg = AUTONEG_DISABLE;
}
adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up, false);
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
SPEED_10000 : SPEED_1000;
......@@ -327,7 +328,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL);
regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
/* Flow Control */
......@@ -373,7 +374,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
for (i = 0; i < 16; i++)
regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE);
regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
......@@ -605,8 +606,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
return -ENOMEM;
for (i = 0; i < eeprom_len; i++) {
if ((ret_val = ixgbe_read_eeprom(hw, first_word + i,
&eeprom_buff[i])))
if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
&eeprom_buff[i])))
break;
}
......@@ -807,7 +808,7 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u8 *p = data;
char *p = (char *)data;
int i;
switch (stringset) {
......@@ -857,16 +858,17 @@ static int ixgbe_nway_reset(struct net_device *netdev)
static int ixgbe_phys_id(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL);
struct ixgbe_hw *hw = &adapter->hw;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
u32 i;
if (!data || data > 300)
data = 300;
for (i = 0; i < (data * 1000); i += 400) {
ixgbe_led_on(&adapter->hw, IXGBE_LED_ON);
hw->mac.ops.led_on(hw, IXGBE_LED_ON);
msleep_interruptible(200);
ixgbe_led_off(&adapter->hw, IXGBE_LED_ON);
hw->mac.ops.led_off(hw, IXGBE_LED_ON);
msleep_interruptible(200);
}
......
......@@ -1749,14 +1749,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
ixgbe_set_vfta(&adapter->hw, vid, 0, true);
hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
......@@ -1767,7 +1769,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
ixgbe_irq_enable(adapter);
/* remove VID from filter table */
ixgbe_set_vfta(&adapter->hw, vid, 0, false);
hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
}
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
......@@ -1843,15 +1845,15 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
addr_count = netdev->uc_count;
if (addr_count)
addr_list = netdev->uc_list->dmi_addr;
ixgbe_update_uc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
/* reprogram multicast list */
addr_count = netdev->mc_count;
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
ixgbe_update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
......@@ -2016,11 +2018,12 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
if (ixgbe_init_hw(&adapter->hw))
DPRINTK(PROBE, ERR, "Hardware Error\n");
struct ixgbe_hw *hw = &adapter->hw;
if (hw->mac.ops.init_hw(hw))
dev_err(&adapter->pdev->dev, "Hardware Error\n");
/* reprogram the RAR[0] in case user changed it. */
ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
......@@ -2637,6 +2640,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
unsigned int rss;
/* PCI config space info */
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->revision_id = pdev->revision;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
/* Set capability flags */
rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
......@@ -2652,15 +2663,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* select 10G link by default */
hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
if (hw->mac.ops.reset(hw)) {
dev_err(&pdev->dev, "HW Init failed\n");
return -EIO;
}
if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
false)) {
dev_err(&pdev->dev, "Link Speed setup failed\n");
return -EIO;
}
/* enable itr by default in dynamic mode */
adapter->itr_setting = 1;
......@@ -2675,7 +2677,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
/* initialize eeprom parameters */
if (ixgbe_init_eeprom(hw)) {
if (ixgbe_init_eeprom_params_generic(hw)) {
dev_err(&pdev->dev, "EEPROM initialization failed\n");
return -EIO;
}
......@@ -3622,7 +3624,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
adapter->hw.mac.ops.set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
return 0;
}
......@@ -3645,6 +3647,22 @@ static void ixgbe_netpoll(struct net_device *netdev)
}
#endif
/**
* ixgbe_link_config - set up initial link with default speed and duplex
* @hw: pointer to private hardware struct
*
* Returns 0 on success, negative on failure
**/
static int ixgbe_link_config(struct ixgbe_hw *hw)
{
u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
/* must always autoneg for both 1G and 10G link */
hw->mac.autoneg = true;
return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
}
/**
* ixgbe_napi_add_all - prep napi structs for use
* @adapter: private struct
......@@ -3691,7 +3709,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
static int cards_found;
int i, err, pci_using_dac;
u16 link_status, link_speed, link_width;
u32 part_num;
u32 part_num, eec;
err = pci_enable_device(pdev);
if (err)
......@@ -3705,8 +3723,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err) {
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
dev_err(&pdev->dev, "No usable DMA configuration, "
"aborting\n");
goto err_dma;
}
}
......@@ -3772,17 +3790,21 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->bd_number = cards_found;
/* PCI config space info */
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->revision_id = pdev->revision;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
/* EEPROM */
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
if (!(eec & (1 << 8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
/* PHY */
memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
/* phy->sfp_type = ixgbe_sfp_type_unknown; */
err = ii->get_invariants(hw);
if (err)
goto err_hw_init;
......@@ -3792,6 +3814,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
/* reset_hw fills in the perm_addr as well */
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
goto err_sw_init;
}
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
......@@ -3812,7 +3841,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_HIGHDMA;
/* make sure the EEPROM is good */
if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
......@@ -3821,7 +3850,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
dev_err(&pdev->dev, "invalid MAC address\n");
err = -EIO;
goto err_eeprom;
}
......@@ -3853,7 +3883,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
"Unknown"),
netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
ixgbe_read_part_num(hw, &part_num);
ixgbe_read_pba_num_generic(hw, &part_num);
dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
hw->mac.type, hw->phy.type,
(part_num >> 8), (part_num & 0xff));
......@@ -3867,7 +3897,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
}
/* reset the hardware with the new settings */
ixgbe_start_hw(hw);
hw->mac.ops.start_hw(hw);
/* link_config depends on start_hw being called at least once */
err = ixgbe_link_config(hw);
if (err) {
dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
goto err_register;
}
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
......
......@@ -33,32 +33,36 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
/**
* ixgbe_identify_phy - Get physical layer module
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
* Determines the physical layer module found on the current adapter.
**/
s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u32 phy_addr;
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
if (ixgbe_validate_phy_addr(hw, phy_addr)) {
hw->phy.addr = phy_addr;
ixgbe_get_phy_id(hw);
hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
status = 0;
break;
if (hw->phy.type == ixgbe_phy_unknown) {
for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
if (ixgbe_validate_phy_addr(hw, phy_addr)) {
hw->phy.addr = phy_addr;
ixgbe_get_phy_id(hw);
hw->phy.type =
ixgbe_get_phy_type_from_id(hw->phy.id);
status = 0;
break;
}
}
} else {
status = 0;
}
return status;
}
......@@ -73,10 +77,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
bool valid = false;
hw->phy.addr = phy_addr;
ixgbe_read_phy_reg(hw,
IXGBE_MDIO_PHY_ID_HIGH,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&phy_id);
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
if (phy_id != 0xFFFF && phy_id != 0x0)
valid = true;
......@@ -95,21 +97,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
u16 phy_id_high = 0;
u16 phy_id_low = 0;
status = ixgbe_read_phy_reg(hw,
IXGBE_MDIO_PHY_ID_HIGH,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&phy_id_high);
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&phy_id_high);
if (status == 0) {
hw->phy.id = (u32)(phy_id_high << 16);
status = ixgbe_read_phy_reg(hw,
IXGBE_MDIO_PHY_ID_LOW,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&phy_id_low);
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
return status;
}
......@@ -123,9 +122,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
enum ixgbe_phy_type phy_type;
switch (phy_id) {
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
case QT2022_PHY_ID:
phy_type = ixgbe_phy_qt;
break;
......@@ -138,32 +134,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
}
/**
* ixgbe_reset_phy - Performs a PHY reset
* ixgbe_reset_phy_generic - Performs a PHY reset
* @hw: pointer to hardware structure
**/
s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
*/
return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
IXGBE_MDIO_PHY_XS_DEV_TYPE,
IXGBE_MDIO_PHY_XS_RESET);
return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
IXGBE_MDIO_PHY_XS_DEV_TYPE,
IXGBE_MDIO_PHY_XS_RESET);
}
/**
* ixgbe_read_phy_reg - Reads a value from a specified PHY register
* ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
u32 command;
u32 i;
u32 timeout = 10;
u32 data;
s32 status = 0;
u16 gssr;
......@@ -179,9 +174,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
if (status == 0) {
/* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
......@@ -190,7 +185,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
* The MDI Command bit will clear when the operation is
* complete
*/
for (i = 0; i < timeout; i++) {
for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
udelay(10);
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
......@@ -210,9 +205,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
* command
*/
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
......@@ -221,7 +216,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
* completed. The MDI Command bit will clear when the
* operation is complete
*/
for (i = 0; i < timeout; i++) {
for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
udelay(10);
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
......@@ -231,8 +226,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw,
"PHY read command didn't complete\n");
hw_dbg(hw, "PHY read command didn't complete\n");
status = IXGBE_ERR_PHY;
} else {
/*
......@@ -247,22 +241,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
ixgbe_release_swfw_sync(hw, gssr);
}
return status;
}
/**
* ixgbe_write_phy_reg - Writes a value to specified PHY register
* ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
u32 command;
u32 i;
u32 timeout = 10;
s32 status = 0;
u16 gssr;
......@@ -280,9 +274,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
/* Setup and write the address cycle command */
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
......@@ -291,19 +285,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
* The MDI Command bit will clear when the operation is
* complete
*/
for (i = 0; i < timeout; i++) {
for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
udelay(10);
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n");
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
break;
}
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n");
status = IXGBE_ERR_PHY;
}
if (status == 0) {
/*
......@@ -311,9 +305,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
* command
*/
command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
(device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
(hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
(IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
......@@ -322,20 +316,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
* completed. The MDI Command bit will clear when the
* operation is complete
*/
for (i = 0; i < timeout; i++) {
for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
udelay(10);
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) {
hw_dbg(hw, "PHY write command did not "
"complete.\n");
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
break;
}
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0)
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
hw_dbg(hw, "PHY address cmd didn't complete\n");
status = IXGBE_ERR_PHY;
}
}
ixgbe_release_swfw_sync(hw, gssr);
......@@ -345,67 +338,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
}
/**
* ixgbe_setup_tnx_phy_link - Set and restart autoneg
* ixgbe_setup_phy_link_generic - Set and restart autoneg
* @hw: pointer to hardware structure
*
* Restart autonegotiation and PHY and waits for completion.
**/
s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_NOT_IMPLEMENTED;
u32 time_out;
u32 max_time_out = 10;
u16 autoneg_speed_selection_register = 0x10;
u16 autoneg_restart_mask = 0x0200;
u16 autoneg_complete_mask = 0x0020;
u16 autoneg_reg = 0;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
/*
* Set advertisement settings in PHY based on autoneg_advertised
* settings. If autoneg_advertised = 0, then advertise default values
* txn devices cannot be "forced" to a autoneg 10G and fail. But can
* tnx devices cannot be "forced" to a autoneg 10G and fail. But can
* for a 1G.
*/
ixgbe_read_phy_reg(hw,
autoneg_speed_selection_register,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
&autoneg_reg);
hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
else
autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
ixgbe_write_phy_reg(hw,
autoneg_speed_selection_register,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
autoneg_reg);
hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
/* Restart PHY autonegotiation and wait for completion */
ixgbe_read_phy_reg(hw,
IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
&autoneg_reg);
hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
autoneg_reg |= autoneg_restart_mask;
autoneg_reg |= IXGBE_MII_RESTART;
ixgbe_write_phy_reg(hw,
IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
autoneg_reg);
hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
/* Wait for autonegotiation to finish */
for (time_out = 0; time_out < max_time_out; time_out++) {
udelay(10);
/* Restart PHY autonegotiation and wait for completion */
status = ixgbe_read_phy_reg(hw,
IXGBE_MDIO_AUTO_NEG_STATUS,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
&autoneg_reg);
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
&autoneg_reg);
autoneg_reg &= autoneg_complete_mask;
if (autoneg_reg == autoneg_complete_mask) {
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
status = 0;
break;
}
......@@ -418,64 +398,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
}
/**
* ixgbe_check_tnx_phy_link - Determine link and speed status
* @hw: pointer to hardware structure
*
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
**/
s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
bool *link_up)
{
s32 status = 0;
u32 time_out;
u32 max_time_out = 10;
u16 phy_link = 0;
u16 phy_speed = 0;
u16 phy_data = 0;
/* Initialize speed and link to default case */
*link_up = false;
*speed = IXGBE_LINK_SPEED_10GB_FULL;
/*
* Check current speed and link status of the PHY register.
* This is a vendor specific register and may have to
* be changed for other copper PHYs.
*/
for (time_out = 0; time_out < max_time_out; time_out++) {
udelay(10);
if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
*link_up = true;
if (phy_speed ==
IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
} else {
status = ixgbe_read_phy_reg(hw,
IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
&phy_data);
phy_link = phy_data &
IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
phy_speed = phy_data &
IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
}
}
return status;
}
/**
* ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: true if autonegotiation enabled
**/
s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
bool autoneg,
bool autoneg_wait_to_complete)
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
{
/*
* Clear autoneg_advertised and set new values based on input link
* speed.
......@@ -484,11 +417,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
/* Setup link based on the new speed settings */
ixgbe_setup_tnx_phy_link(hw);
hw->phy.ops.setup_link(hw);
return 0;
}
......@@ -30,20 +30,52 @@
#define _IXGBE_PHY_H_
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
/* PHY specific */
s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw);
s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up);
s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg,
bool autoneg_wait_to_complete);
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
#define IXGBE_SFF_IDENTIFIER_SFP 0x3
#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
/* Bitmasks */
#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Bit-shift macros */
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
#endif /* _IXGBE_PHY_H_ */
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册