提交 27fa589d 编写于 作者: D David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-06-09

This series contains more updates to i40e and i40evf.

Shannon adds checks for error status bits on the admin event queue and
provides notification if seen.  Cleans up unused variable and memory
allocation which was used earlier in driver development and is no longer
needed.  Also fixes the driver to not complain about removing
non-existent MAC addresses.  Bumps the driver versions for both i40e
and i40evf.

Catherine fixes a function header comment to make sure the comment correctly
reflects the function name.

Mitch adds code to allow for additional VSIs since the number of VSIs that
the firmware reports to us is a guaranteed minimum, not an absolute
maximum.  The hardware actually supports for more than the reported value,
which we often need.  Implements anti-spoofing for VFs for both MAC
addresses and VLANs, as well as enable this feature by default for all VFs.

Anjali changes the interrupt distribution policy to change the way
resources for special features are handled.  Fixes the driver to not fall
back to one queue if the only feature enabled is ATR, since FD_SB
and FD_ATR need to be checked independently in order to decide if we
will support multiple queue or not.  Allows the RSS table entry range
and GPS to be any number, not necessarily a power of 2 because hardware
does not restrict us to use a power of 2 GPS in the case of RSS as long as
we are not sharing the RSS table with another VSI (VMDq).

Frank modifies the driver to keep SR-IOV enabled in the case that RSS,
VMFq, FD_SB and DCB are disabled so that SR-IOV does not get turned off
unnecessarily.

Jesse fixes a bug in receive checksum where the driver was not marking
packets with bad checksums correctly, especially IPv6 packets with a bad
checksum.  To do this correctly, we need a define that may be set by
hardware in rare cases.

Greg fixes the driver to delete all the old and stale MAC filters for the
VF VSI when the host administrator changes the VF MAC address from under
its feet.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -72,6 +72,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
......@@ -215,6 +216,7 @@ struct i40e_pf {
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
bool wol_en;
......@@ -295,7 +297,6 @@ struct i40e_pf {
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
struct i40e_aqc_get_switch_config_data *sw_config;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
......
......@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_ieee_app_priority_table *app)
{
int v, err;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
if (err)
......
......@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
if (seid < 0)
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
else
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
return pf->vsi[i];
......@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
int i;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
i, pf->vsi[i]->seid);
......@@ -1526,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
......
......@@ -119,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
I40E_PF_STAT("tx_timeout", tx_timeout_count),
I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
......
......@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 5
#define DRV_VERSION_BUILD 7
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
......@@ -652,7 +652,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
return;
/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (!vsi || !vsi->tx_rings[0])
......@@ -706,7 +706,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (!vsi || !vsi->tx_rings[0])
......@@ -1366,7 +1366,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
num_tc_qps = vsi->alloc_queue_pairs/numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
......@@ -1595,7 +1595,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
if (aq_ret)
if (aq_ret &&
pf->hw.aq.asq_last_status !=
I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
aq_ret,
......@@ -1607,7 +1609,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list, num_del, NULL);
num_del = 0;
if (aq_ret)
if (aq_ret &&
pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
......@@ -1734,7 +1737,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
pf->flags &= ~I40E_FLAG_FILTER_SYNC;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
i40e_sync_vsi_filters(pf->vsi[v]);
......@@ -3524,7 +3527,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
i40e_vsi_free_q_vectors(pf->vsi[i]);
i40e_reset_interrupt_capability(pf);
......@@ -3614,7 +3617,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
int v;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_quiesce_vsi(pf->vsi[v]);
}
......@@ -3628,7 +3631,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
int v;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_unquiesce_vsi(pf->vsi[v]);
}
......@@ -4069,7 +4072,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
}
/* Update each VSI */
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v])
continue;
......@@ -4592,7 +4595,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* Find the VSI(s) that requested a re-init */
dev_info(&pf->pdev->dev,
"VSI reinit requested\n");
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
......@@ -4919,7 +4922,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
i40e_veb_link_event(pf->veb[i], link_up);
/* ... now the local VSIs */
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
i40e_vsi_link_event(pf->vsi[i], link_up);
}
......@@ -4976,7 +4979,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
* for each q_vector
* force an interrupt
*/
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
int armed = 0;
......@@ -5026,7 +5029,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
......@@ -5132,11 +5135,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u16 pending, i = 0;
i40e_status ret;
u16 opcode;
u32 oldval;
u32 val;
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return;
/* check for error indications */
val = rd32(&pf->hw, pf->hw.aq.arq.len);
oldval = val;
if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
}
if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
}
if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
}
if (oldval != val)
wr32(&pf->hw, pf->hw.aq.arq.len, val);
val = rd32(&pf->hw, pf->hw.aq.asq.len);
oldval = val;
if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
}
if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
}
if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
}
if (oldval != val)
wr32(&pf->hw, pf->hw.aq.asq.len, val);
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf)
......@@ -5242,7 +5281,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
int ret;
/* build VSI that owns this VEB, temporarily attached to base VEB */
for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
if (pf->vsi[v] &&
pf->vsi[v]->veb_idx == veb->idx &&
pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
......@@ -5272,7 +5311,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
goto end_reconstitute;
/* create the remaining VSIs attached to this VEB */
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
continue;
......@@ -5385,7 +5424,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
/* find existing VSI and see if it needs configuring */
vsi = NULL;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
break;
......@@ -5415,7 +5454,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
int i;
i40e_fdir_filter_exit(pf);
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
break;
......@@ -5444,7 +5483,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
pf->vsi[v]->seid = 0;
}
......@@ -5924,15 +5963,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
* find next empty vsi slot, looping back around if necessary
*/
i = pf->next_vsi;
while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
while (i < pf->num_alloc_vsi && pf->vsi[i])
i++;
if (i >= pf->hw.func_caps.num_vsis) {
if (i >= pf->num_alloc_vsi) {
i = 0;
while (i < pf->next_vsi && pf->vsi[i])
i++;
}
if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
......@@ -6189,6 +6228,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
vec = i40e_reserve_msix_vectors(pf, v_budget);
if (vec != v_budget) {
/* If we have limited resources, we will start with no vectors
* for the special features and then allocate vectors to some
* of these features based on the policy and at the end disable
* the features that did not get any vectors.
*/
pf->num_vmdq_msix = 0;
}
if (vec < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
......@@ -6197,27 +6246,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
pf->num_vmdq_msix = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
} else if (vec != v_budget) {
/* reserve the misc vector */
vec--;
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
vec--; /* reserve the misc vector */
pf->num_vmdq_vsis = 1;
/* partition out the remaining vectors */
switch (vec) {
case 2:
pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 1;
break;
case 3:
pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 2;
break;
default:
......@@ -6229,6 +6276,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
(pf->num_vmdq_msix == 0)) {
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
}
return err;
}
......@@ -6446,7 +6498,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
return 0;
queue_count = min_t(int, queue_count, pf->rss_size_max);
queue_count = rounddown_pow_of_two(queue_count);
if (queue_count != pf->rss_size) {
i40e_prep_for_reset(pf);
......@@ -6502,7 +6553,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
......@@ -6848,6 +6898,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
......@@ -7082,6 +7133,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
if (pf->vf[vsi->vf_id].spoofchk) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
ctxt.info.sec_flags |=
(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
}
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
......@@ -7193,7 +7251,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
* the orphan VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] &&
pf->vsi[i]->uplink_seid == uplink_seid &&
(pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
......@@ -7372,7 +7430,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (!veb && uplink_seid != pf->mac_seid) {
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
vsi = pf->vsi[i];
break;
......@@ -7615,7 +7673,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!pf->vsi[i])
continue;
if (pf->vsi[i]->uplink_seid == branch_seid &&
......@@ -7667,7 +7725,7 @@ void i40e_veb_release(struct i40e_veb *veb)
pf = veb->pf;
/* find the remaining VSI and check for extras */
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
n++;
vsi = pf->vsi[i];
......@@ -7779,10 +7837,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
break;
if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
vsi_seid);
return NULL;
......@@ -7954,15 +8012,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
"header: %d reported %d total\n",
num_reported, num_total);
if (num_reported) {
int sz = sizeof(*sw_config) * num_reported;
kfree(pf->sw_config);
pf->sw_config = kzalloc(sz, GFP_KERNEL);
if (pf->sw_config)
memcpy(pf->sw_config, sw_config, sz);
}
for (i = 0; i < num_reported; i++) {
struct i40e_aqc_switch_config_element_resp *ele =
&sw_config->element[i];
......@@ -8129,9 +8178,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
!(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
!(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_DCB_ENABLED))) {
!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
......@@ -8143,6 +8190,19 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED))) {
/* one qp for PF */
pf->rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else {
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
......@@ -8448,10 +8508,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
/* Set up the *vsi struct based on the number of VSIs in the HW,
* and set up our local tracking of the MAIN PF vsi.
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
* the other PFs. We allocate space for more than the guarantee with
* the understanding that we might not get them all later.
*/
len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
else
pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
pf->vsi = kzalloc(len, GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
......@@ -8464,7 +8532,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_vsis;
}
/* if FDIR VSI was set up, start it now */
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_open(pf->vsi[i]);
break;
......@@ -8659,7 +8727,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
......@@ -8674,7 +8742,6 @@ static void i40e_remove(struct pci_dev *pdev)
kfree(pf->qp_pile);
kfree(pf->irq_pile);
kfree(pf->sw_config);
kfree(pf->vsi);
/* force a PF reset to clean anything leftover */
......
......@@ -62,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* find existing FDIR VSI */
vsi = NULL;
for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi)
......@@ -1193,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
__sum16 csum;
struct iphdr *iph;
__sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
......@@ -1207,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
/* did the hardware decode the packet and checksum? */
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
if (!(decoded.known && decoded.outer_ip))
return;
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
ipv4 = true;
else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
ipv6 = true;
if (ipv4 &&
(rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
if (ipv6 &&
decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* IP or L4 or outmost IP checksum error */
if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
vsi->back->hw_csum_rx_error++;
/* there was some L4 error, count error and punt packet to the stack */
if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
}
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
* it in the driver, hardware does not do it for us.
* Since L3L4P bit was set we assume a valid IHL value (>=5)
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
if (ipv4_tunnel &&
(decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
* it in the driver, hardware does not do it for us.
* Since L3L4P bit was set we assume a valid IHL value (>=5)
* so the total length of IPv4 header is IHL*4 bytes
*/
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
......@@ -1246,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
if (udp_hdr(skb)->check != csum) {
vsi->back->hw_csum_rx_error++;
return;
}
if (udp_hdr(skb)->check != csum)
goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
vsi->back->hw_csum_rx_error++;
}
/**
......@@ -1429,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
goto next_desc;
}
......
......@@ -541,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
......
......@@ -899,6 +899,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
ret = -ENOMEM;
goto err_alloc;
}
pf->vf = vfs;
/* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) {
......@@ -908,13 +909,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
/* vf resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
i40e_enable_pf_switch_lb(pf);
......@@ -2062,14 +2063,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
true, false);
/* add the new mac address */
f = i40e_add_filter(vsi, mac, vf->port_vlan_id, true, false);
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF ucast filter\n");
ret = -ENOMEM;
goto error_param;
}
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
......@@ -2328,7 +2326,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
ivi->spoofchk = vf->spoofchk;
ret = 0;
error_param:
......@@ -2395,3 +2393,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
error_out:
return ret;
}
/**
* i40e_ndo_set_vf_spoofchk
* @netdev: network interface device structure
* @vf_id: vf identifier
* @enable: flag to enable or disable feature
*
* Enable or disable VF spoof checking
**/
int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_vsi_context ctxt;
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
int ret = 0;
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
ret = -EINVAL;
goto out;
}
vf = &(pf->vf[vf_id]);
if (enable == vf->spoofchk)
goto out;
vf->spoofchk = enable;
memset(&ctxt, 0, sizeof(ctxt));
ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
if (enable)
ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
ret);
ret = -EIO;
}
out:
return ret;
}
......@@ -101,6 +101,7 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
bool spoofchk;
};
void i40e_free_vfs(struct i40e_pf *pf);
......@@ -121,6 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
......
......@@ -728,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
__sum16 csum;
struct iphdr *iph;
__sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
......@@ -742,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
/* did the hardware decode the packet and checksum? */
if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
/* both known and outer_ip must be set for the below code to work */
if (!(decoded.known && decoded.outer_ip))
return;
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
ipv4 = true;
else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
ipv6 = true;
if (ipv4 &&
(rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
if (ipv6 &&
decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
/* don't increment checksum err here, non-fatal err */
return;
/* IP or L4 or outmost IP checksum error */
if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
(1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
vsi->back->hw_csum_rx_error++;
/* there was some L4 error, count error and punt packet to the stack */
if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
}
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
* it in the driver, hardware does not do it for us.
* Since L3L4P bit was set we assume a valid IHL value (>=5)
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
if (ipv4_tunnel &&
(decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
/* If VXLAN traffic has an outer UDPv4 checksum we need to check
* it in the driver, hardware does not do it for us.
* Since L3L4P bit was set we assume a valid IHL value (>=5)
* so the total length of IPv4 header is IHL*4 bytes
*/
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
......@@ -781,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
if (udp_hdr(skb)->check != csum) {
vsi->back->hw_csum_rx_error++;
return;
}
if (udp_hdr(skb)->check != csum)
goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
vsi->back->hw_csum_rx_error++;
}
/**
......@@ -956,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
goto next_desc;
}
......
......@@ -541,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
......
......@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
#define DRV_VERSION "0.9.29"
#define DRV_VERSION "0.9.31"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
......@@ -1395,7 +1395,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
}
/**
* i40evf_configure_rss - increment to next available tx queue
* next_queue - increment to next available tx queue
* @adapter: board private structure
* @j: queue counter
*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册