提交 c6271b76 编写于 作者: D David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-06-04

This series contains updates to i40e and i40evf.

Anjali provides three fixes, first to resolve a Tx queue hang if mixed
size frags are passed to the driver while using TSO.  There was a corner
case where we needed to linearize but we were not.  Next fixes a bug in
the default configuration which prevented a software bridge loaded on the
PF interface from working correctly because broadcast packets are
incorrectly looped back.  Lastly fixes an NPAR bug when SRIOV is enabled,
where we need to be in VEB mode, not VEPA mode at probe.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -317,6 +317,7 @@ struct i40e_pf {
#endif
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
......
......@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf,
BIT_ULL(__I40E_PF_RESET_REQUESTED));
}
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
if (vsi)
dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
......
......@@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
veb->bridge_mode = BRIDGE_MODE_VEB;
else
veb->bridge_mode = BRIDGE_MODE_VEPA;
i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */
......@@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
} else if (mode != veb->bridge_mode) {
/* Existing HW bridge but different mode needs reset */
veb->bridge_mode = mode;
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
/* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
if (mode == BRIDGE_MODE_VEB)
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
break;
}
}
......@@ -8343,7 +8352,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (i40e_is_vsi_uplink_mode_veb(vsi)) {
if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
(i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id =
......@@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__);
return NULL;
}
/* We come up by default in VEPA mode if SRIOV is not
* already enabled, in which case we can't force VEPA
* mode.
*/
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
veb->bridge_mode = BRIDGE_MODE_VEPA;
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
}
i40e_config_bridge_mode(veb);
}
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
......@@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
!test_bit(__I40E_BAD_EEPROM, &pf->state)) {
if (pci_num_vf(pdev))
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
#endif
err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
......
......@@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @tx_flags: collected send information
* @hdr_len: size of the packet header
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
const u8 hdr_len)
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
{
struct skb_frag_struct *frag;
bool linearize = false;
......@@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
u16 j = 1;
u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done;
......@@ -2440,22 +2438,19 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done;
}
frag = &skb_shinfo(skb)->frags[0];
size = hdr_len;
/* we might still have more fragments per segment */
do {
size += skb_frag_size(frag);
frag++; j++;
if ((size >= skb_shinfo(skb)->gso_size) &&
(j < I40E_MAX_BUFFER_TXD)) {
size = (size % skb_shinfo(skb)->gso_size);
j = (size) ? 1 : 0;
}
if (j == I40E_MAX_BUFFER_TXD) {
if (size < skb_shinfo(skb)->gso_size) {
linearize = true;
break;
}
j = 1;
size -= skb_shinfo(skb)->gso_size;
if (size)
j++;
size += hdr_len;
}
num_frags--;
} while (num_frags);
} else {
......@@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
if (i40e_chk_linearize(skb, tx_flags, hdr_len))
if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb))
goto out_drop;
......
......@@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
if (num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf,
BIT_ULL(__I40E_PF_RESET_REQUESTED));
}
return i40e_pci_sriov_enable(pdev, num_vfs);
}
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL;
......
......@@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @tx_flags: collected send information
* @hdr_len: size of the packet header
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
const u8 hdr_len)
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
{
struct skb_frag_struct *frag;
bool linearize = false;
......@@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
u16 j = 1;
u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done;
......@@ -1649,22 +1647,19 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done;
}
frag = &skb_shinfo(skb)->frags[0];
size = hdr_len;
/* we might still have more fragments per segment */
do {
size += skb_frag_size(frag);
frag++; j++;
if ((size >= skb_shinfo(skb)->gso_size) &&
(j < I40E_MAX_BUFFER_TXD)) {
size = (size % skb_shinfo(skb)->gso_size);
j = (size) ? 1 : 0;
}
if (j == I40E_MAX_BUFFER_TXD) {
if (size < skb_shinfo(skb)->gso_size) {
linearize = true;
break;
}
j = 1;
size -= skb_shinfo(skb)->gso_size;
if (size)
j++;
size += hdr_len;
}
num_frags--;
} while (num_frags);
} else {
......@@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
if (i40e_chk_linearize(skb, tx_flags, hdr_len))
if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb))
goto out_drop;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册