提交 0ea1041b 编写于 作者: D David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Updates for net-next.

This series includes these main enhancements:

1. Link related changes
    - add NRZ/PAM4 link signal mode to the link up message if known
    - rely on firmware to bring down the link during ifdown

2. SRIOV related changes
    - allow VF promiscuous mode if the VF is trusted
    - allow ndo operations to configure VF when the PF is ifdown
    - fix the scenario of the VF taking back control of it's MAC address
    - add Hyper-V VF device IDs

3. Support the option to transmit without FCS/CRC.

4. Implement .ndo_features_check() to disable offload when the UDP
   encap. packets are not supported.

v2: Patch10: Reverse the check for supported UDP ports to be more straight
forward.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -122,7 +122,10 @@ enum board_idx { ...@@ -122,7 +122,10 @@ enum board_idx {
NETXTREME_E_VF, NETXTREME_E_VF,
NETXTREME_C_VF, NETXTREME_C_VF,
NETXTREME_S_VF, NETXTREME_S_VF,
NETXTREME_C_VF_HV,
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF, NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
}; };
/* indexed by enum above */ /* indexed by enum above */
...@@ -170,7 +173,10 @@ static const struct { ...@@ -170,7 +173,10 @@ static const struct {
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
}; };
static const struct pci_device_id bnxt_pci_tbl[] = { static const struct pci_device_id bnxt_pci_tbl[] = {
...@@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif #endif
{ 0 } { 0 }
...@@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq; ...@@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx) static bool bnxt_vf_pciid(enum board_idx idx)
{ {
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
} }
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
...@@ -358,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -358,6 +375,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct pci_dev *pdev = bp->pdev; struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr; struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf; struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
i = skb_get_queue_mapping(skb); i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) { if (unlikely(i >= bp->tx_nr_rings)) {
...@@ -399,6 +417,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -399,6 +417,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
} }
if (unlikely(skb->no_fcs)) {
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
goto normal_tx;
}
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
struct tx_push_buffer *tx_push_buf = txr->tx_push; struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd; struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
...@@ -500,7 +523,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -500,7 +523,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd1 = (struct tx_bd_ext *) txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = 0; txbd1->tx_bd_hsize_lflags = lflags;
if (skb_is_gso(skb)) { if (skb_is_gso(skb)) {
u32 hdr_len; u32 hdr_len;
...@@ -512,14 +535,14 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -512,14 +535,14 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + hdr_len = skb_transport_offset(skb) +
tcp_hdrlen(skb); tcp_hdrlen(skb);
txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO | txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID | TX_BD_FLAGS_T_IPID |
(hdr_len << (TX_BD_HSIZE_SHIFT - 1))); (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
length = skb_shinfo(skb)->gso_size; length = skb_shinfo(skb)->gso_size;
txbd1->tx_bd_mss = cpu_to_le32(length); txbd1->tx_bd_mss = cpu_to_le32(length);
length += hdr_len; length += hdr_len;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
txbd1->tx_bd_hsize_lflags = txbd1->tx_bd_hsize_lflags |=
cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
txbd1->tx_bd_mss = 0; txbd1->tx_bd_mss = 0;
} }
...@@ -4145,7 +4168,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) ...@@ -4145,7 +4168,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_ntp_fltrs(bp, irq_re_init); bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) { if (irq_re_init) {
bnxt_free_ring_stats(bp); bnxt_free_ring_stats(bp);
if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) || if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_free_port_stats(bp); bnxt_free_port_stats(bp);
bnxt_free_ring_grps(bp); bnxt_free_ring_grps(bp);
...@@ -8340,11 +8363,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) ...@@ -8340,11 +8363,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif #endif
} }
/* Allow PF and VF with default VLAN to be in promiscuous mode */ /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp) static bool bnxt_promisc_ok(struct bnxt *bp)
{ {
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp) && !bp->vf.vlan) if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
return false; return false;
#endif #endif
return true; return true;
...@@ -8441,7 +8464,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) ...@@ -8441,7 +8464,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_BROADCAST) if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) if (bp->dev->flags & IFF_PROMISC)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) { if (bp->dev->flags & IFF_ALLMULTI) {
...@@ -9075,8 +9098,9 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info) ...@@ -9075,8 +9098,9 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
static void bnxt_report_link(struct bnxt *bp) static void bnxt_report_link(struct bnxt *bp)
{ {
if (bp->link_info.link_up) { if (bp->link_info.link_up) {
const char *duplex; const char *signal = "";
const char *flow_ctrl; const char *flow_ctrl;
const char *duplex;
u32 speed; u32 speed;
u16 fec; u16 fec;
...@@ -9098,9 +9122,24 @@ static void bnxt_report_link(struct bnxt *bp) ...@@ -9098,9 +9122,24 @@ static void bnxt_report_link(struct bnxt *bp)
flow_ctrl = "ON - receive"; flow_ctrl = "ON - receive";
else else
flow_ctrl = "none"; flow_ctrl = "none";
netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n", if (bp->link_info.phy_qcfg_resp.option_flags &
speed, duplex, flow_ctrl); PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
if (bp->flags & BNXT_FLAG_EEE_CAP) u8 sig_mode = bp->link_info.active_fec_sig_mode &
PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
switch (sig_mode) {
case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
signal = "(NRZ) ";
break;
case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
signal = "(PAM4) ";
break;
default:
break;
}
}
netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
speed, signal, duplex, flow_ctrl);
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
netdev_info(bp->dev, "EEE is %s\n", netdev_info(bp->dev, "EEE is %s\n",
bp->eee.eee_active ? "active" : bp->eee.eee_active ? "active" :
"not active"); "not active");
...@@ -9132,10 +9171,6 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) ...@@ -9132,10 +9171,6 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
bp->flags &= ~BNXT_FLAG_EEE_CAP;
if (bp->test_info)
bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
BNXT_TEST_FL_AN_PHY_LPBK);
if (bp->hwrm_spec_code < 0x10201) if (bp->hwrm_spec_code < 0x10201)
return 0; return 0;
...@@ -9146,31 +9181,17 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) ...@@ -9146,31 +9181,17 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc) if (rc)
goto hwrm_phy_qcaps_exit; goto hwrm_phy_qcaps_exit;
bp->phy_flags = resp->flags;
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee; struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
bp->flags |= BNXT_FLAG_EEE_CAP;
eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
} }
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
if (bp->test_info)
bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
}
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
if (bp->test_info)
bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
}
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
if (BNXT_PF(bp))
bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
}
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
if (bp->hwrm_spec_code >= 0x10a01) { if (bp->hwrm_spec_code >= 0x10a01) {
if (bnxt_phy_qcaps_no_speed(resp)) { if (bnxt_phy_qcaps_no_speed(resp)) {
...@@ -9261,7 +9282,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) ...@@ -9261,7 +9282,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
link_info->module_status = resp->module_status; link_info->module_status = resp->module_status;
if (bp->flags & BNXT_FLAG_EEE_CAP) { if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
struct ethtool_eee *eee = &bp->eee; struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds; u16 fw_speeds;
...@@ -9497,7 +9518,8 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) ...@@ -9497,7 +9518,8 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_SINGLE_PF(bp))
return 0; return 0;
if (pci_num_vf(bp->pdev)) if (pci_num_vf(bp->pdev) &&
!(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
return 0; return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
...@@ -9839,7 +9861,7 @@ static bool bnxt_eee_config_ok(struct bnxt *bp) ...@@ -9839,7 +9861,7 @@ static bool bnxt_eee_config_ok(struct bnxt *bp)
struct ethtool_eee *eee = &bp->eee; struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true; return true;
if (eee->eee_enabled) { if (eee->eee_enabled) {
...@@ -10486,7 +10508,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) ...@@ -10486,7 +10508,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) if (dev->flags & IFF_PROMISC)
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp); uc_update = bnxt_uc_list_updated(bp);
...@@ -10562,6 +10584,9 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) ...@@ -10562,6 +10584,9 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
} }
skip_uc: skip_uc:
if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
if (rc && vnic->mc_list_count) { if (rc && vnic->mc_list_count) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
...@@ -10756,6 +10781,40 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) ...@@ -10756,6 +10781,40 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc; return rc;
} }
static netdev_features_t bnxt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
struct bnxt *bp;
__be16 udp_port;
u8 l4_proto = 0;
features = vlan_features_check(skb, features);
if (!skb->encapsulation)
return features;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
l4_proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
l4_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
return features;
}
if (l4_proto != IPPROTO_UDP)
return features;
bp = netdev_priv(dev);
/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
udp_port = udp_hdr(skb)->dest;
if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
return features;
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf) u32 *reg_buf)
{ {
...@@ -12263,10 +12322,13 @@ static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) ...@@ -12263,10 +12322,13 @@ static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
unsigned int cmd; unsigned int cmd;
udp_tunnel_nic_get_port(netdev, table, 0, &ti); udp_tunnel_nic_get_port(netdev, table, 0, &ti);
if (ti.type == UDP_TUNNEL_TYPE_VXLAN) if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
bp->vxlan_port = ti.port;
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else } else {
bp->nge_port = ti.port;
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
}
if (ti.port) if (ti.port)
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
...@@ -12366,6 +12428,7 @@ static const struct net_device_ops bnxt_netdev_ops = { ...@@ -12366,6 +12428,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_change_mtu = bnxt_change_mtu, .ndo_change_mtu = bnxt_change_mtu,
.ndo_fix_features = bnxt_fix_features, .ndo_fix_features = bnxt_fix_features,
.ndo_set_features = bnxt_set_features, .ndo_set_features = bnxt_set_features,
.ndo_features_check = bnxt_features_check,
.ndo_tx_timeout = bnxt_tx_timeout, .ndo_tx_timeout = bnxt_tx_timeout,
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
.ndo_get_vf_config = bnxt_get_vf_config, .ndo_get_vf_config = bnxt_get_vf_config,
...@@ -12434,12 +12497,17 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) ...@@ -12434,12 +12497,17 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
int rc = 0; int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
bp->phy_flags = 0;
rc = bnxt_hwrm_phy_qcaps(bp); rc = bnxt_hwrm_phy_qcaps(bp);
if (rc) { if (rc) {
netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
rc); rc);
return rc; return rc;
} }
if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
if (!fw_dflt) if (!fw_dflt)
return 0; return 0;
......
...@@ -1341,9 +1341,6 @@ struct bnxt_led_info { ...@@ -1341,9 +1341,6 @@ struct bnxt_led_info {
struct bnxt_test_info { struct bnxt_test_info {
u8 offline_mask; u8 offline_mask;
u8 flags;
#define BNXT_TEST_FL_EXT_LPBK 0x1
#define BNXT_TEST_FL_AN_PHY_LPBK 0x2
u16 timeout; u16 timeout;
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
}; };
...@@ -1693,7 +1690,6 @@ struct bnxt { ...@@ -1693,7 +1690,6 @@ struct bnxt {
#define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000 #define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_WOL_CAP 0x4000 #define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000 #define BNXT_FLAG_ROCEV1_CAP 0x8000
...@@ -1720,8 +1716,10 @@ struct bnxt { ...@@ -1720,8 +1716,10 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type) #define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) #define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp)) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_SH_PORT_CFG_OK(bp) (BNXT_PF(bp) && \
((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG))
#define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \ #define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \
((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) && \ BNXT_SH_PORT_CFG_OK(bp)) && \
(bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED) (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
...@@ -1871,11 +1869,9 @@ struct bnxt { ...@@ -1871,11 +1869,9 @@ struct bnxt {
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
#define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_FW_CAP_HOT_RESET 0x00200000
#define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
#define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000 #define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
...@@ -1918,6 +1914,8 @@ struct bnxt { ...@@ -1918,6 +1914,8 @@ struct bnxt {
u16 vxlan_fw_dst_port_id; u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id; u16 nge_fw_dst_port_id;
__be16 vxlan_port;
__be16 nge_port;
u8 port_partition_type; u8 port_partition_type;
u8 port_count; u8 port_count;
u16 br_mode; u16 br_mode;
...@@ -2010,6 +2008,17 @@ struct bnxt { ...@@ -2010,6 +2008,17 @@ struct bnxt {
u32 lpi_tmr_lo; u32 lpi_tmr_lo;
u32 lpi_tmr_hi; u32 lpi_tmr_hi;
/* copied from flags in hwrm_port_phy_qcaps_output */
u8 phy_flags;
#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
#define BNXT_PHY_FL_SHARED_PORT_CFG PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED
#define BNXT_PHY_FL_PORT_STATS_NO_RESET PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET
#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
u8 num_tests; u8 num_tests;
struct bnxt_test_info *test_info; struct bnxt_test_info *test_info;
......
...@@ -2912,7 +2912,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) ...@@ -2912,7 +2912,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
if (!BNXT_PHY_CFG_ABLE(bp)) if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mutex_lock(&bp->link_lock); mutex_lock(&bp->link_lock);
...@@ -2963,7 +2963,7 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) ...@@ -2963,7 +2963,7 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return -EOPNOTSUPP; return -EOPNOTSUPP;
*edata = bp->eee; *edata = bp->eee;
...@@ -3215,7 +3215,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, ...@@ -3215,7 +3215,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
int rc; int rc;
if (!link_info->autoneg || if (!link_info->autoneg ||
(bp->test_info->flags & BNXT_TEST_FL_AN_PHY_LPBK)) (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
return 0; return 0;
rc = bnxt_query_force_speeds(bp, &fw_advertising); rc = bnxt_query_force_speeds(bp, &fw_advertising);
...@@ -3416,7 +3416,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, ...@@ -3416,7 +3416,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
} }
if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
(bp->test_info->flags & BNXT_TEST_FL_EXT_LPBK)) (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
do_ext_lpbk = true; do_ext_lpbk = true;
if (etest->flags & ETH_TEST_FL_OFFLINE) { if (etest->flags & ETH_TEST_FL_OFFLINE) {
......
...@@ -49,10 +49,6 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, ...@@ -49,10 +49,6 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{ {
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
netdev_err(bp->dev, "vf ndo called though PF is down\n");
return -EINVAL;
}
if (!bp->pf.active_vfs) { if (!bp->pf.active_vfs) {
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
return -EINVAL; return -EINVAL;
...@@ -113,7 +109,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) ...@@ -113,7 +109,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
int rc; int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) { if (rc) {
...@@ -125,9 +121,9 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) ...@@ -125,9 +121,9 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
return 0; return 0;
} }
static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
{ {
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return !!(vf->flags & BNXT_VF_TRUST); return !!(vf->flags & BNXT_VF_TRUST);
bnxt_hwrm_func_qcfg_flags(bp, vf); bnxt_hwrm_func_qcfg_flags(bp, vf);
...@@ -1120,10 +1116,38 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) ...@@ -1120,10 +1116,38 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
} }
} }
int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input req = {0};
int rc = 0;
if (!BNXT_VF(bp))
return 0;
if (bp->hwrm_spec_code < 0x10202) {
if (is_valid_ether_addr(bp->vf.mac_addr))
rc = -EADDRNOTAVAIL;
goto mac_done;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
mac_done:
if (rc && strict) {
rc = -EADDRNOTAVAIL;
netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
mac);
return rc;
}
return 0;
}
void bnxt_update_vf_mac(struct bnxt *bp) void bnxt_update_vf_mac(struct bnxt *bp)
{ {
struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_input req = {0};
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
bool inform_pf = false;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
req.fid = cpu_to_le16(0xffff); req.fid = cpu_to_le16(0xffff);
...@@ -1139,42 +1163,24 @@ void bnxt_update_vf_mac(struct bnxt *bp) ...@@ -1139,42 +1163,24 @@ void bnxt_update_vf_mac(struct bnxt *bp)
* default but the stored zero MAC will allow the VF user to change * default but the stored zero MAC will allow the VF user to change
* the random MAC address using ndo_set_mac_address() if he wants. * the random MAC address using ndo_set_mac_address() if he wants.
*/ */
if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
/* This means we are now using our own MAC address, let
* the PF know about this MAC address.
*/
if (!is_valid_ether_addr(bp->vf.mac_addr))
inform_pf = true;
}
/* overwrite netdev dev_addr with admin VF MAC */ /* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr)) if (is_valid_ether_addr(bp->vf.mac_addr))
memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit: update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (inform_pf)
bnxt_approve_mac(bp, bp->dev->dev_addr, false);
} }
int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input req = {0};
int rc = 0;
if (!BNXT_VF(bp))
return 0;
if (bp->hwrm_spec_code < 0x10202) {
if (is_valid_ether_addr(bp->vf.mac_addr))
rc = -EADDRNOTAVAIL;
goto mac_done;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
mac_done:
if (rc && strict) {
rc = -EADDRNOTAVAIL;
netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
mac);
return rc;
}
return 0;
}
#else #else
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
......
...@@ -34,6 +34,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); ...@@ -34,6 +34,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int); int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int); int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool); int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册