提交 21396689 编写于 作者: D David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to igb, i40e and i40evf.

I provide a code comment fix which David Miller noticed in the last
series of patches I submitted.

Shannon provides a patch to cleanup the NAPI structs when deleting the
netdev.

Anjali provides several patches for i40e, first fixes a bug in the update
filter logic which was causing a kernel panic.  Then provides a fix to
rename an error bit to correctly indicate the error.  Adds a definition
for a new state variable to keep track of features automatically disabled
due to hardware resource limitations versus user enforced feature disabled.
Anjali provides a patch to add code to handle when there is a filter
programming error due to a full table, which also resolves a previous
compile warning about an unused "*pf" variable introduced in the last i40e
series patch submission.

Jesse provides three i40e patches to cleanup strings to make more
consistent and to align with other Intel drivers.

Akeem cleans up a misleading function header comment for i40e.

Mitch provides a fix for i40e/i40evf to use the correctly reported number
of MSI-X vectors in the PF an VF.  Then provides a patch to use
dma_set_mask_and_coherent() which was introduced in v3.13 and simplifies
the DMA mapping code a bit.

v2:
- dropped the 2 ixgbe patches from Emil based on feedback from David Miller,
  where the 2 fixes should be handled in the net core to fix all drivers
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -152,7 +152,10 @@ struct i40e_lump_tracking {
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 200
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
......@@ -263,6 +266,9 @@ struct i40e_pf {
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
struct i40e_hw_port_stats stats_offsets;
......@@ -550,6 +556,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
int i40e_get_current_fd_count(struct i40e_pf *pf);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
......
......@@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
**/
static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
{
if (enable)
if (enable) {
pf->flags |= flag;
else
} else {
pf->flags &= ~flag;
pf->auto_disable_flags |= flag;
}
dev_info(&pf->pdev->dev, "requesting a pf reset\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
}
......@@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->msg_enable);
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
dev_info(&pf->pdev->dev, "forcing PFR\n");
dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing CoreR\n");
dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
dev_info(&pf->pdev->dev, "forcing GlobR\n");
dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "empr", 4) == 0) {
dev_info(&pf->pdev->dev, "forcing EMPR\n");
dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
......@@ -1670,6 +1672,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
bool add = false;
int ret;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
goto command_write_done;
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
goto command_write_done;
asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
GFP_KERNEL);
if (!asc_packet)
......@@ -1684,8 +1695,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
if (strncmp(cmd_buf, "add", 3) == 0)
add = true;
cnt = sscanf(&cmd_buf[13],
"%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
......
......@@ -1356,6 +1356,24 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return 0;
}
/**
* i40e_match_fdir_input_set - Match a new filter against an existing one
* @rule: The filter already added
* @input: The new filter to comapre against
*
* Returns true if the two input set match
**/
static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
struct i40e_fdir_filter *input)
{
if ((rule->dst_ip[0] != input->dst_ip[0]) ||
(rule->src_ip[0] != input->src_ip[0]) ||
(rule->dst_port != input->dst_port) ||
(rule->src_port != input->src_port))
return false;
return true;
}
/**
* i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
* @vsi: Pointer to the targeted VSI
......@@ -1391,11 +1409,10 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
/* if there is an old rule occupying our place remove it */
if (rule && (rule->fd_id == sw_idx)) {
if (!input || (rule->fd_id != input->fd_id)) {
cmd->fs.flow_type = rule->flow_type;
err = i40e_add_del_fdir_ethtool(vsi, cmd, false);
}
if (input && !i40e_match_fdir_input_set(rule, input))
err = i40e_add_del_fdir(vsi, rule, false);
else if (!input)
err = i40e_add_del_fdir(vsi, rule, false);
hlist_del(&rule->fdir_node);
kfree(rule);
pf->fdir_pf_active_filters--;
......@@ -1443,6 +1460,7 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
i40e_fdir_check_and_reenable(pf);
return ret;
}
......@@ -1466,9 +1484,16 @@ static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
if (!vsi)
return -EINVAL;
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
pf = vsi->back;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return -EOPNOTSUPP;
if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
return -ENOSPC;
fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
pf->hw.func_caps.fd_filters_guaranteed)) {
return -EINVAL;
......
......@@ -1985,7 +1985,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
* @netdev: network interface to be adjusted
* @vid: vlan id to be removed
*
* net_device_ops implementation for adding vlan ids
* net_device_ops implementation for removing vlan ids
**/
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
......@@ -2436,6 +2436,9 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
i40e_add_del_fdir(vsi, filter, true);
......@@ -2453,8 +2456,6 @@ static int i40e_vsi_configure(struct i40e_vsi *vsi)
i40e_set_vsi_rx_mode(vsi);
i40e_restore_vlan(vsi);
i40e_vsi_config_dcb_rings(vsi);
if (vsi->type == I40E_VSI_FDIR)
i40e_fdir_filter_restore(vsi);
err = i40e_vsi_configure_tx(vsi);
if (!err)
err = i40e_vsi_configure_rx(vsi);
......@@ -2582,7 +2583,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
/* Associate the queue pair to the vector and enable the q int */
/* Associate the queue pair to the vector and enable the queue int */
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
......@@ -2891,8 +2892,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0_remaining);
if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
dev_info(&pf->pdev->dev, "device will be reset\n");
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_service_event_schedule(pf);
......@@ -3755,8 +3755,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
"%s: AQ command Config VSI BW allocation per TC failed = %d\n",
__func__, vsi->back->hw.aq.asq_last_status);
"AQ command Config VSI BW allocation per TC failed = %d\n",
vsi->back->hw.aq.asq_last_status);
return -EINVAL;
}
......@@ -4085,6 +4085,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
} else if (vsi->netdev) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
}
/* replay FDIR SB filters */
if (vsi->type == I40E_VSI_FDIR)
i40e_fdir_filter_restore(vsi);
i40e_service_event_schedule(pf);
return 0;
......@@ -4364,7 +4368,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the warning interrupt will deal with the shutdown
* and recovery of the switch setup.
*/
dev_info(&pf->pdev->dev, "GlobalR requested\n");
dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
......@@ -4375,7 +4379,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
*
* Same as Global Reset, except does *not* include the MAC/PHY
*/
dev_info(&pf->pdev->dev, "CoreR requested\n");
dev_dbg(&pf->pdev->dev, "CoreR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_CORER_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
......@@ -4409,7 +4413,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* the switch, since we need to do all the recovery as
* for the Core Reset.
*/
dev_info(&pf->pdev->dev, "PFR requested\n");
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
......@@ -4458,18 +4462,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->etscfg.prioritytable,
sizeof(new_cfg->etscfg.prioritytable))) {
need_reconfig = true;
dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
}
if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable)))
dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable)))
dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
}
/* Check if PFC configuration has changed */
......@@ -4477,7 +4481,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->pfc,
sizeof(new_cfg->pfc))) {
need_reconfig = true;
dev_info(&pf->pdev->dev, "PFC config change detected.\n");
dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
}
/* Check if APP Table has changed */
......@@ -4485,7 +4489,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
&old_cfg->app,
sizeof(new_cfg->app))) {
need_reconfig = true;
dev_info(&pf->pdev->dev, "APP Table change detected.\n");
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
}
return need_reconfig;
......@@ -4535,7 +4539,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
goto exit;
}
......@@ -4593,8 +4597,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_vf *vf;
u16 vf_id;
dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
__func__, queue, qtx_ctl);
dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
queue, qtx_ctl);
/* Queue belongs to VF, find the VF and issue VF reset */
if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
......@@ -4623,6 +4627,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
}
/**
* i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
* @pf: board private structure
**/
int i40e_get_current_fd_count(struct i40e_pf *pf)
{
int val, fcnt_prog;
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
return fcnt_prog;
}
/**
* i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
* @pf: board private structure
**/
void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
{
u32 fcnt_prog, fcnt_avail;
/* Check if, FD SB or ATR was auto disabled and if there is enough room
* to re-enable
*/
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
fcnt_prog = i40e_get_current_fd_count(pf);
fcnt_avail = pf->hw.fdir_shared_filter_count +
pf->fdir_pf_filter_count;
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
/* Wait for some more space to be available to turn on ATR */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
}
/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
......@@ -4632,11 +4684,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
return;
pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
/* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state))
return;
i40e_fdir_check_and_reenable(pf);
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
}
/**
......@@ -4946,7 +5001,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
event.msg_size);
break;
case i40e_aqc_opc_lldp_update_mib:
dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
#ifdef CONFIG_I40E_DCB
rtnl_lock();
ret = i40e_handle_lldp_event(pf, &event);
......@@ -4954,7 +5009,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
case i40e_aqc_opc_send_msg_to_peer:
......@@ -5231,7 +5286,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return 0;
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
if (i40e_check_asq_alive(hw))
i40e_vc_notify_reset(pf);
......@@ -5278,7 +5333,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (test_bit(__I40E_DOWN, &pf->state))
goto end_core_reset;
dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
......@@ -5328,7 +5383,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
* try to recover minimal use by getting the basic PF VSI working.
*/
if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
/* find the one VEB connected to the MAC, and find orphans */
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
......@@ -5393,7 +5448,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
dv.subbuild_version = 0;
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
dev_info(&pf->pdev->dev, "PF reset done\n");
dev_info(&pf->pdev->dev, "reset complete\n");
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
......@@ -5442,7 +5497,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
>> I40E_GL_MDET_TX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
"Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
"Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
......@@ -5456,7 +5511,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
>> I40E_GL_MDET_RX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
"Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
"Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
......@@ -6293,12 +6348,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
dev_info(&pf->pdev->dev,
"Flow Director ATR mode Enabled\n");
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev,
"Flow Director Side Band mode Enabled\n");
} else {
dev_info(&pf->pdev->dev,
"Flow Director Side Band mode Disabled in MFP mode\n");
......@@ -6322,9 +6373,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
dev_info(&pf->pdev->dev,
"Number of VFs being requested for PF[%d] = %d\n",
pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
......@@ -6806,8 +6854,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
if (vsi->netdev) {
/* results in a call to i40e_close() */
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
} else {
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
......@@ -6826,6 +6872,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
if (vsi->netdev) {
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
i40e_vsi_clear_rings(vsi);
i40e_vsi_clear(vsi);
......@@ -6880,8 +6930,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
}
if (vsi->base_vector) {
dev_info(&pf->pdev->dev,
"VSI %d has non-zero base vector %d\n",
dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
vsi->seid, vsi->base_vector);
return -EEXIST;
}
......@@ -6900,7 +6949,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
"failed to get q tracking for VSI %d, err=%d\n",
"failed to get queue tracking for VSI %d, err=%d\n",
vsi->seid, vsi->base_vector);
i40e_vsi_free_q_vectors(vsi);
ret = -ENOENT;
......@@ -7857,6 +7906,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
return 0;
}
#define INFO_STRING_LEN 255
static void i40e_print_features(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
char *buf, *string;
string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
if (!string) {
dev_err(&pf->pdev->dev, "Features string allocation failed\n");
return;
}
buf = string;
buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
#ifdef CONFIG_PCI_IOV
buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
#endif
buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
buf += sprintf(buf, "FDir ");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
buf += sprintf(buf, "ATR ");
if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
buf += sprintf(buf, "NTUPLE ");
if (pf->flags & I40E_FLAG_DCB_ENABLED)
buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
BUG_ON(buf > (string + INFO_STRING_LEN));
dev_info(&pf->pdev->dev, "%s\n", string);
kfree(string);
}
/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
......@@ -7883,16 +7970,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
/* set up for high or low dma */
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/* coherent mask for the same size will always succeed if
* dma_set_mask does
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
} else {
dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
err = -EIO;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
"DMA configuration failed: 0x%x\n", err);
goto err_dma;
}
......@@ -8130,7 +8213,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_set_pci_config_data(hw, link_status);
dev_info(&pdev->dev, "PCI Express: %s %s\n",
dev_info(&pdev->dev, "PCI-Express: %s %s\n",
(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
......@@ -8147,6 +8230,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
}
/* print a string summarizing features */
i40e_print_features(pf);
return 0;
/* Unwind what we've done if something failed in the setup */
......
......@@ -430,23 +430,61 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
/**
* i40e_fd_handle_status - check the Programming Status for FD
* @rx_ring: the Rx ring for this descriptor
* @qw: the descriptor data
* @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
* @prog_id: the id originally used for programming
*
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, u8 prog_id)
{
struct pci_dev *pdev = rx_ring->vsi->back->pdev;
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
u32 fcnt_prog, fcnt_avail;
u32 error;
u64 qw;
qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
/* for now just print the Status */
dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
prog_id, error);
if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
rx_desc->wb.qword0.hi_dword.fd_id);
/* filter programming failed most likely due to table full */
fcnt_prog = i40e_get_current_fd_count(pf);
fcnt_avail = pf->hw.fdir_shared_filter_count +
pf->fdir_pf_filter_count;
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
/* Turn off ATR first */
if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_ATR_ENABLED;
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
} else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED;
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
}
} else {
dev_info(&pdev->dev, "FD filter programming error");
}
} else if (error ==
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
netdev_info(rx_ring->vsi->netdev, "ntuple filter loc = %d, could not be removed\n",
rx_desc->wb.qword0.hi_dword.fd_id);
}
}
/**
......@@ -843,7 +881,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, qw, id);
i40e_fd_handle_status(rx_ring, rx_desc, id);
}
/**
......@@ -1536,8 +1574,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
tx_ring->atr_count++;
/* snag network header to get L4 type and address */
hdr.network = skb_network_header(skb);
......@@ -1559,6 +1595,12 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
tx_ring->atr_count++;
/* sample on all syn/fin packets or once every atr sample rate */
if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
return;
......
......@@ -458,6 +458,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
/* Flow director filter id in case of
* Programming status desc WB
*/
__le32 fd_id;
} hi_dword;
} qword0;
struct {
......@@ -698,7 +702,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
......
......@@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
{
struct i40e_pf *pf = vf->pf;
return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
}
/***********************vf resource mgmt routines*****************/
......@@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
(pf->hw.func_caps.num_msix_vectors_vf
* vf->vf_id) + (vector_id - 1));
((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
(vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
/* Special case - No queues mapped on this vector */
......@@ -506,7 +506,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
/* disable interrupts so the VF starts in a known state */
for (i = 0; i < msix_vf; i++) {
/* format is same for both registers */
......
......@@ -464,6 +464,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
/* Flow director filter id in case of
* Programming status desc WB
*/
__le32 fd_id;
} hi_dword;
} qword0;
struct {
......@@ -704,7 +708,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
......
......@@ -1141,7 +1141,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
* (roughly) twice the number of vectors as there are CPU's.
*/
v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
v_budget = min(v_budget, (int)adapter->vf_res->max_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter.
......@@ -2182,17 +2182,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
/* coherent mask for the same size will always succeed if
* dma_set_mask does
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
} else {
dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
__func__, err);
err = -EIO;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
"DMA configuration failed: 0x%x\n", err);
goto err_dma;
}
......
......@@ -1978,7 +1978,7 @@ void igb_reset(struct igb_adapter *adapter)
}
}
#endif
/*Re-establish EEE setting */
/* Re-establish EEE setting */
if (hw->phy.media_type == e1000_media_type_copper) {
switch (mac->type) {
case e1000_i350:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册