提交 e28383dd 编写于 作者: D David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-10-07

This series contains updates to i40e and i40evf only.

Paul updates i40e to simply increase the amount of time we wait for a
reset to complete since we have seen in some rare occasions the reset
can take longer to complete.

Shannon updates the driver to turn on Wake-on-LAN by default if it is
enabled in the hardware config to begin with, rather than always disable
it and wait for the user to expressly turn it on.  Added new device id's
and support for future devices.  Fixed a possible type compare problem
between a size and possible negative number.  Also fixed a shift value
that was wrong, which ended up with a bad bitmask.  Did general house
cleaning of the driver to cleanup several low lying fruit in the
driver.  Fixed an issue where new unicast address's would be added to
the VSI list and then immediately removed and would never actually
make it down to the hardware.  Resolved the issue by removing the
separation from unicast and multicast in the search for filters to be
deleted.

Mitch fixes an issue where the hardware would continue to access the
memory formerly used by the rings for a VF which have been removed,
causing memory corruption or DMAR errors.  To relieve this condition,
explicitly stop all rings associated with each VF before releasing its
resources.  Also fixed a panic if the driver is unable to enable MSI-X
or its unable to acquire enough vectors, so propagate interrupt
allocation failure information to the calling function.  Cleaned up
opcode that is not required.

Carolyn extends the size of the test available for the interrupt names
so that all the descriptive data available for the Flow Director
interrupts is not truncated.

Catherine fixes an issue where there was a possibility of speed getting
set to 0 if advertised is set to 0 (which is the case when autoneg is
disabled).

Jesse fixes the checksum on big endian machines, so added code to swap
it correctly.  Also fixed a bug in the return from get_link_status()
where only true or false was being returned, but false could mean
multiple things.  So allow the caller to get all the return values
in the call chain bubbled back to the source so that the reason for
the failure does not get lost.

Anjali adds statistics to keep track of how many times we ask the stack
to linearize the SKB because the hardware cannot handle SKBs with more
than 8 frags per segment/single packet.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -97,7 +97,7 @@
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
......@@ -475,6 +475,7 @@ struct i40e_vsi {
#endif
u32 tx_restart;
u32 tx_busy;
u64 tx_linearize;
u32 rx_buf_failed;
u32 rx_page_failed;
......@@ -740,7 +741,7 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
int i40e_init_pf_fcoe(struct i40e_pf *pf);
void i40e_init_pf_fcoe(struct i40e_pf *pf);
int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
......
......@@ -756,6 +756,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_error;
}
hw->aq.asq_last_status = I40E_AQ_RC_OK;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
......
......@@ -144,8 +144,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
if (aq_rc >= (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])) ||
aq_rc < 0)
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
return aq_to_posix[aq_rc];
......
......@@ -2062,6 +2062,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8
#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0
#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3
......@@ -2070,7 +2071,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA
#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xB
#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10
#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
......
......@@ -51,6 +51,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
......@@ -1256,7 +1257,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
for (cnt = 0; cnt < grst_del + 10; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
......@@ -2234,27 +2235,28 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
/**
* i40e_get_link_status - get status of the HW network link
* @hw: pointer to the hw struct
* @link_up: pointer to bool (true/false = linkup/linkdown)
*
* Returns true if link is up, false if link is down.
* Variable link_up true if link is up, false if link is down.
* The variable link_up is invalid if returned value of status != 0
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
bool i40e_get_link_status(struct i40e_hw *hw)
i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
i40e_status status = 0;
bool link_status = false;
if (hw->phy.get_link_info) {
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
goto i40e_get_link_status_exit;
i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
status);
}
link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
i40e_get_link_status_exit:
return link_status;
return status;
}
/**
......
......@@ -531,37 +531,55 @@ static void i40e_cee_to_dcb_config(
dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
I40E_AQC_CEE_APP_STATUS_SHIFT;
i = 0;
status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
I40E_AQC_CEE_FCOE_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
/* Add APPs if Error is False and Oper/Sync is True */
/* Add FCoE APP if Error is False and Oper/Sync is True */
if (!err && sync && oper) {
/* CEE operating configuration supports FCoE/iSCSI/FIP only */
dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
/* FCoE APP */
dcbcfg->app[0].priority =
dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
I40E_AQC_CEE_APP_FCOE_SHIFT;
dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
i++;
}
status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
/* Add iSCSI APP if Error is False and Oper/Sync is True */
if (!err && sync && oper) {
/* iSCSI APP */
dcbcfg->app[1].priority =
dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
I40E_AQC_CEE_APP_ISCSI_SHIFT;
dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
i++;
}
status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
I40E_AQC_CEE_FIP_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
/* Add FIP APP if Error is False and Oper/Sync is True */
if (!err && sync && oper) {
/* FIP APP */
dcbcfg->app[2].priority =
dcbcfg->app[i].priority =
(app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
I40E_AQC_CEE_APP_FIP_SHIFT;
dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
i++;
}
dcbcfg->numapps = i;
}
/**
......
......@@ -240,9 +240,8 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
if (err)
dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
__func__, pf->vsi[v]->seid,
err, app->selector,
dev_info(&pf->pdev->dev, "Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
pf->vsi[v]->seid, err, app->selector,
app->protocolid, app->priority);
}
}
......
......@@ -87,6 +87,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize),
};
static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
......@@ -425,6 +426,7 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
ecmd->supported = SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
......@@ -665,6 +667,13 @@ static int i40e_set_settings(struct net_device *netdev,
advertise & ADVERTISED_40000baseLR4_Full)
config.link_speed |= I40E_LINK_SPEED_40GB;
/* If speed didn't get set, set it to what it currently is.
* This is needed because if advertise is 0 (as it is when autoneg
* is disabled) then speed won't get set.
*/
if (!config.link_speed)
config.link_speed = abilities.link_speed;
if (change || (abilities.link_speed != config.link_speed)) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
......@@ -1508,9 +1517,18 @@ static int i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
i40e_status status;
bool link_up = false;
netif_info(pf, hw, netdev, "link test\n");
if (i40e_get_link_status(&pf->hw))
status = i40e_get_link_status(&pf->hw, &link_up);
if (status) {
netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
*data = 1;
return *data;
}
if (link_up)
*data = 0;
else
*data = 1;
......
......@@ -272,10 +272,8 @@ static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
* @pf: pointer to PF
*
* Returns 0 if FCoE is supported otherwise the error code
**/
int i40e_init_pf_fcoe(struct i40e_pf *pf)
void i40e_init_pf_fcoe(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
......@@ -287,13 +285,13 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
if (!pf->hw.func_caps.fcoe) {
dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
return 0;
return;
}
if (!pf->hw.func_caps.dcb) {
dev_warn(&pf->pdev->dev,
"Hardware is not DCB capable not enabling FCoE.\n");
return 0;
return;
}
/* enable FCoE hash filter */
......@@ -326,7 +324,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
wr32(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
return 0;
return;
}
/**
......
......@@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
......@@ -878,6 +879,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
......@@ -896,7 +898,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
*/
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = 0;
tx_restart = tx_busy = tx_linearize = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
......@@ -913,6 +915,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
......@@ -929,6 +932,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
......@@ -1725,36 +1729,27 @@ static void i40e_set_rx_mode(struct net_device *netdev)
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
bool found = false;
if (!f->is_netdev)
continue;
if (is_multicast_ether_addr(f->macaddr)) {
netdev_for_each_mc_addr(mca, netdev) {
if (ether_addr_equal(mca->addr, f->macaddr)) {
found = true;
break;
}
}
} else {
netdev_for_each_uc_addr(uca, netdev) {
if (ether_addr_equal(uca->addr, f->macaddr)) {
found = true;
break;
}
}
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
goto bottom_of_search_loop;
for_each_dev_addr(netdev, ha) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
found = true;
break;
}
}
}
if (!found)
i40e_del_filter(
vsi, f->macaddr, I40E_VLAN_ANY, false, true);
netdev_for_each_uc_addr(uca, netdev)
if (ether_addr_equal(uca->addr, f->macaddr))
goto bottom_of_search_loop;
for_each_dev_addr(netdev, ha)
if (ether_addr_equal(ha->addr, f->macaddr))
goto bottom_of_search_loop;
/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
bottom_of_search_loop:
continue;
}
/* check for other flag changes */
......@@ -3155,8 +3150,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector);
if (err) {
dev_info(&pf->pdev->dev,
"%s: request_irq failed, error: %d\n",
__func__, err);
"MSIX request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* assign the mask for this irq */
......@@ -3680,9 +3674,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
ret = i40e_pf_txq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: VSI seid %d Tx ring %d %sable timeout\n",
__func__, vsi->seid, pf_q,
(enable ? "en" : "dis"));
"VSI seid %d Tx ring %d %sable timeout\n",
vsi->seid, pf_q, (enable ? "en" : "dis"));
break;
}
}
......@@ -3758,9 +3751,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: VSI seid %d Rx ring %d %sable timeout\n",
__func__, vsi->seid, pf_q,
(enable ? "en" : "dis"));
"VSI seid %d Rx ring %d %sable timeout\n",
vsi->seid, pf_q, (enable ? "en" : "dis"));
break;
}
}
......@@ -4055,8 +4047,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
vsi->type == I40E_VSI_FCOE) {
dev_dbg(&vsi->back->pdev->dev,
"%s: VSI seid %d skipping FCoE VSI disable\n",
__func__, vsi->seid);
"VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
return;
}
......@@ -4130,8 +4121,8 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
ret = i40e_pf_txq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: VSI seid %d Tx ring %d disable timeout\n",
__func__, vsi->seid, pf_q);
"VSI seid %d Tx ring %d disable timeout\n",
vsi->seid, pf_q);
return ret;
}
}
......@@ -5422,8 +5413,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
need_reconfig);
dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig;
}
......@@ -5450,16 +5440,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
dev_dbg(&pf->pdev->dev,
"%s: LLDP event mib bridge type 0x%x\n", __func__, type);
dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
return ret;
/* Check MIB Type and return if event for Remote MIB update */
type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
dev_dbg(&pf->pdev->dev,
"%s: LLDP event mib type %s\n", __func__,
type ? "remote" : "local");
"LLDP event mib type %s\n", type ? "remote" : "local");
if (type == I40E_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
......@@ -5842,15 +5830,23 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
bool new_link, old_link;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
i40e_status status;
bool new_link, old_link;
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
new_link = i40e_get_link_status(&pf->hw);
status = i40e_get_link_status(&pf->hw, &new_link);
if (status) {
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
status);
return;
}
old_link_speed = pf->hw.phy.link_info_old.link_speed;
new_link_speed = pf->hw.phy.link_info.link_speed;
......@@ -6572,9 +6568,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
#endif /* CONFIG_I40E_DCB */
#ifdef I40E_FCOE
ret = i40e_init_pf_fcoe(pf);
if (ret)
dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
i40e_init_pf_fcoe(pf);
#endif
/* do basic switch setup */
......@@ -7975,9 +7969,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
}
#ifdef I40E_FCOE
err = i40e_init_pf_fcoe(pf);
if (err)
dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
i40e_init_pf_fcoe(pf);
#endif /* I40E_FCOE */
#ifdef CONFIG_PCI_IOV
......@@ -9057,8 +9049,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (veb) {
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
dev_info(&vsi->back->pdev->dev,
"%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
__func__);
"New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
}
/* We come up by default in VEPA mode if SRIOV is not
......@@ -9947,6 +9938,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 wol_nvm_bits;
u16 link_status;
int err = 0;
u32 len;
......@@ -10163,8 +10155,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
/* WoL defaults to disabled */
pf->wol_en = false;
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
pf->wol_en = false;
else
pf->wol_en = true;
device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
/* set up the main switch operations */
......@@ -10496,7 +10492,7 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
int err;
u32 reg;
dev_info(&pdev->dev, "%s\n", __func__);
dev_dbg(&pdev->dev, "%s\n", __func__);
if (pci_enable_device_mem(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
......@@ -10536,7 +10532,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
dev_dbg(&pdev->dev, "%s\n", __func__);
if (test_bit(__I40E_SUSPENDED, &pf->state))
return;
......@@ -10628,9 +10624,7 @@ static int i40e_resume(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
"%s: Cannot enable PCI device from suspend\n",
__func__);
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
......
......@@ -547,11 +547,13 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
u16 checksum;
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
le_sum = cpu_to_le16(checksum);
if (!ret_code)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &checksum, true);
1, &le_sum, true);
return ret_code;
}
......
......@@ -258,7 +258,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
u32 *max_bw, u32 *min_bw, bool *min_valid,
......
......@@ -2771,10 +2771,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
if (i40e_chk_linearize(skb, tx_flags))
if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
tx_ring->tx_stats.tx_linearize++;
}
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
......
......@@ -188,6 +188,7 @@ struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
u64 tx_linearize;
};
struct i40e_rx_queue_stats {
......
......@@ -46,6 +46,7 @@
#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_SFP_X722 0x37D0
......
......@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
......
......@@ -872,6 +872,11 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
false);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
false);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
......
......@@ -696,6 +696,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_error;
}
hw->aq.asq_last_status = I40E_AQ_RC_OK;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
......
......@@ -144,8 +144,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
return -EAGAIN;
if (aq_rc >= (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])) ||
aq_rc < 0)
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
return aq_to_posix[aq_rc];
......
......@@ -51,6 +51,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
......
......@@ -1927,10 +1927,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
if (i40e_chk_linearize(skb, tx_flags))
if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
tx_ring->tx_stats.tx_linearize++;
}
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
......
......@@ -187,6 +187,7 @@ struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
u64 tx_linearize;
};
struct i40e_rx_queue_stats {
......
......@@ -46,6 +46,7 @@
#define I40E_DEV_ID_10G_BASE_T 0x1586
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_SFP_X722 0x37D0
......
......@@ -81,7 +81,6 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
......
......@@ -489,8 +489,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector);
if (err) {
dev_info(&adapter->pdev->dev,
"%s: request_irq failed, error: %d\n",
__func__, err);
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
/* assign the mask for this irq */
......@@ -856,6 +855,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
struct i40evf_mac_filter *f, *ftmp;
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
int count = 50;
/* add addr if not already in the filter list */
......@@ -877,29 +877,27 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
}
/* remove filter if not in netdev list */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
bool found = false;
if (is_multicast_ether_addr(f->macaddr)) {
netdev_for_each_mc_addr(mca, netdev) {
if (ether_addr_equal(mca->addr, f->macaddr)) {
found = true;
break;
}
}
} else {
netdev_for_each_uc_addr(uca, netdev) {
if (ether_addr_equal(uca->addr, f->macaddr)) {
found = true;
break;
}
}
if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
found = true;
}
if (!found) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
goto bottom_of_search_loop;
netdev_for_each_uc_addr(uca, netdev)
if (ether_addr_equal(uca->addr, f->macaddr))
goto bottom_of_search_loop;
for_each_dev_addr(netdev, ha)
if (ether_addr_equal(ha->addr, f->macaddr))
goto bottom_of_search_loop;
if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
goto bottom_of_search_loop;
/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
bottom_of_search_loop:
continue;
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
......@@ -1165,7 +1163,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
i40evf_acquire_msix_vectors(adapter, v_budget);
err = i40evf_acquire_msix_vectors(adapter, v_budget);
out:
adapter->netdev->real_num_tx_queues = pairs;
......@@ -1853,8 +1851,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
if (!err)
continue;
dev_err(&adapter->pdev->dev,
"%s: Allocation for Tx Queue %u failed\n",
__func__, i);
"Allocation for Tx Queue %u failed\n", i);
break;
}
......@@ -1881,8 +1878,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
if (!err)
continue;
dev_err(&adapter->pdev->dev,
"%s: Allocation for Rx Queue %u failed\n",
__func__, i);
"Allocation for Rx Queue %u failed\n", i);
break;
}
return err;
......
......@@ -234,8 +234,8 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
......@@ -288,8 +288,8 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
......@@ -313,8 +313,8 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
......@@ -341,8 +341,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
......@@ -393,8 +393,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
adapter->current_op);
return;
}
list_for_each_entry(f, &adapter->mac_filter_list, list) {
......@@ -410,8 +410,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
......@@ -453,8 +452,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
adapter->current_op);
return;
}
list_for_each_entry(f, &adapter->mac_filter_list, list) {
......@@ -470,8 +469,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
sizeof(struct i40e_virtchnl_ether_addr);
......@@ -513,8 +511,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
adapter->current_op);
return;
}
......@@ -531,8 +529,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
......@@ -572,8 +569,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
adapter->current_op);
return;
}
......@@ -590,8 +587,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
sizeof(u16);
......@@ -629,8 +625,8 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
__func__, adapter->current_op);
dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
adapter->current_op);
return;
}
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
......@@ -720,17 +716,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
dev_err(&adapter->pdev->dev,
"%s: Unknown event %d from pf\n",
__func__, vpe->event);
dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
vpe->event);
break;
}
return;
}
if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
__func__, v_retval,
i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, i40evf_stat_str(&adapter->hw, v_retval),
v_opcode);
}
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS: {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册