提交 8ef5cc4f 编写于 作者: D David S. Miller

Merge branch 'bnxt_en-Driver-updates'

Michael Chan says:

====================
bnxt_en: Driver updates.

This patch series adds some extended statistics available with the new
firmware interface, package version from firmware, aRFS support on
57500 chips, new PCI IDs, and some miscellaneous fixes and improvements.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -114,6 +114,7 @@ enum board_idx { ...@@ -114,6 +114,7 @@ enum board_idx {
BCM5745x_NPAR, BCM5745x_NPAR,
BCM57508, BCM57508,
BCM57504, BCM57504,
BCM57502,
BCM58802, BCM58802,
BCM58804, BCM58804,
BCM58808, BCM58808,
...@@ -158,6 +159,7 @@ static const struct { ...@@ -158,6 +159,7 @@ static const struct {
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
...@@ -205,6 +207,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -205,6 +207,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV #ifdef CONFIG_BNXT_SRIOV
...@@ -216,6 +219,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { ...@@ -216,6 +219,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif #endif
...@@ -3396,6 +3400,12 @@ static void bnxt_free_port_stats(struct bnxt *bp) ...@@ -3396,6 +3400,12 @@ static void bnxt_free_port_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map); bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL; bp->hw_rx_port_stats_ext = NULL;
} }
if (bp->hw_pcie_stats) {
dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
bp->hw_pcie_stats, bp->hw_pcie_stats_map);
bp->hw_pcie_stats = NULL;
}
} }
static void bnxt_free_ring_stats(struct bnxt *bp) static void bnxt_free_ring_stats(struct bnxt *bp)
...@@ -3440,7 +3450,9 @@ static int bnxt_alloc_stats(struct bnxt *bp) ...@@ -3440,7 +3450,9 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
} }
if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
return 0;
if (bp->hw_rx_port_stats) if (bp->hw_rx_port_stats)
goto alloc_ext_stats; goto alloc_ext_stats;
...@@ -3454,34 +3466,32 @@ static int bnxt_alloc_stats(struct bnxt *bp) ...@@ -3454,34 +3466,32 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats) if (!bp->hw_rx_port_stats)
return -ENOMEM; return -ENOMEM;
bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
512;
bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats) + 512; sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS; bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats: alloc_ext_stats:
/* Display extended statistics only if FW supports it */ /* Display extended statistics only if FW supports it */
if (bp->hwrm_spec_code < 0x10804 || if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
bp->hwrm_spec_code == 0x10900) if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0; return 0;
if (bp->hw_rx_port_stats_ext) if (bp->hw_rx_port_stats_ext)
goto alloc_tx_ext_stats; goto alloc_tx_ext_stats;
bp->hw_rx_port_stats_ext = bp->hw_rx_port_stats_ext =
dma_alloc_coherent(&pdev->dev, dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
sizeof(struct rx_port_stats_ext), &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
&bp->hw_rx_port_stats_ext_map,
GFP_KERNEL);
if (!bp->hw_rx_port_stats_ext) if (!bp->hw_rx_port_stats_ext)
return 0; return 0;
alloc_tx_ext_stats: alloc_tx_ext_stats:
if (bp->hw_tx_port_stats_ext) if (bp->hw_tx_port_stats_ext)
return 0; goto alloc_pcie_stats;
if (bp->hwrm_spec_code >= 0x10902) { if (bp->hwrm_spec_code >= 0x10902 ||
(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
bp->hw_tx_port_stats_ext = bp->hw_tx_port_stats_ext =
dma_alloc_coherent(&pdev->dev, dma_alloc_coherent(&pdev->dev,
sizeof(struct tx_port_stats_ext), sizeof(struct tx_port_stats_ext),
...@@ -3489,7 +3499,19 @@ static int bnxt_alloc_stats(struct bnxt *bp) ...@@ -3489,7 +3499,19 @@ static int bnxt_alloc_stats(struct bnxt *bp)
GFP_KERNEL); GFP_KERNEL);
} }
bp->flags |= BNXT_FLAG_PORT_STATS_EXT; bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
alloc_pcie_stats:
if (bp->hw_pcie_stats ||
!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return 0;
bp->hw_pcie_stats =
dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
&bp->hw_pcie_stats_map, GFP_KERNEL);
if (!bp->hw_pcie_stats)
return 0;
bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0; return 0;
} }
...@@ -4208,16 +4230,25 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, ...@@ -4208,16 +4230,25 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr) struct bnxt_ntuple_filter *fltr)
{ {
struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys; struct flow_keys *keys = &fltr->fkeys;
struct bnxt_vnic_info *vnic;
u32 dst_ena = 0;
int rc = 0; int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
vnic = &bp->vnic_info[0];
} else {
vnic = &bp->vnic_info[fltr->rxq + 1];
}
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
req.ethertype = htons(ETH_P_IP); req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
...@@ -4255,7 +4286,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -4255,7 +4286,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_port = keys->ports.dst; req.dst_port = keys->ports.dst;
req.dst_port_mask = cpu_to_be16(0xffff); req.dst_port_mask = cpu_to_be16(0xffff);
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
...@@ -5503,11 +5533,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) ...@@ -5503,11 +5533,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
stat = bnxt_get_func_stat_ctxs(bp); stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) && if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp && (hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5)))) !(bp->flags & BNXT_FLAG_CHIP_P5))))
return true; return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
return true;
return false; return false;
} }
...@@ -6056,6 +6088,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -6056,6 +6088,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_entries_multiple = 1; ctx->tqm_entries_multiple = 1;
ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries); ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
ctx->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
} else { } else {
...@@ -6102,6 +6136,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -6102,6 +6136,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
__le32 *num_entries; __le32 *num_entries;
__le64 *pg_dir; __le64 *pg_dir;
u32 flags = 0;
u8 *pg_attr; u8 *pg_attr;
int i, rc; int i, rc;
u32 ena; u32 ena;
...@@ -6161,6 +6196,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -6161,6 +6196,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem; ctx_pg = &ctx->mrav_mem;
req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
if (ctx->mrav_num_entries_units)
flags |=
FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl, &req.mrav_pg_size_mrav_lvl,
...@@ -6187,6 +6225,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -6187,6 +6225,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
*num_entries = cpu_to_le32(ctx_pg->entries); *num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
} }
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) if (rc)
rc = -EIO; rc = -EIO;
...@@ -6325,6 +6364,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6325,6 +6364,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx; struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries; u32 mem_size, ena, entries;
u32 num_mr, num_ah;
u32 extra_srqs = 0; u32 extra_srqs = 0;
u32 extra_qps = 0; u32 extra_qps = 0;
u8 pg_lvl = 1; u8 pg_lvl = 1;
...@@ -6388,12 +6428,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -6388,12 +6428,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma; goto skip_rdma;
ctx_pg = &ctx->mrav_mem; ctx_pg = &ctx->mrav_mem;
ctx_pg->entries = extra_qps * 4; /* 128K extra is needed to accommodate static AH context
* allocation by f/w.
*/
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries; mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
if (rc) if (rc)
return rc; return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
((num_mr / ctx->mrav_num_entries_units) << 16) |
(num_ah / ctx->mrav_num_entries_units);
ctx_pg = &ctx->tim_mem; ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries; ctx_pg->entries = ctx->qp_mem.entries;
...@@ -6508,6 +6557,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -6508,6 +6557,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP; bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP; bp->flags |= BNXT_FLAG_ROCEV2_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
bp->tx_push_thresh = 0; bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
...@@ -6580,6 +6633,34 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -6580,6 +6633,34 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
return 0; return 0;
} }
static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
{
struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
int rc = 0;
u32 flags;
if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
return 0;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
goto hwrm_cfa_adv_qcaps_exit;
flags = le32_to_cpu(resp->flags);
if (flags &
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
hwrm_cfa_adv_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_hwrm_func_reset(struct bnxt *bp) static int bnxt_hwrm_func_reset(struct bnxt *bp)
{ {
struct hwrm_func_reset_input req = {0}; struct hwrm_func_reset_input req = {0};
...@@ -6671,6 +6752,15 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) ...@@ -6671,6 +6752,15 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b, resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b); resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
if (strlen(resp->active_pkg_name)) {
int fw_ver_len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + fw_ver_len,
FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
resp->active_pkg_name);
bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
}
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout) if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
...@@ -6703,6 +6793,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) ...@@ -6703,6 +6793,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
if (dev_caps_cfg &
VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
hwrm_ver_get_exit: hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
return rc; return rc;
...@@ -6808,6 +6902,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) ...@@ -6808,6 +6902,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
return rc; return rc;
} }
static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
{
struct hwrm_pcie_qstats_input req = {0};
if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{ {
if (bp->vxlan_port_cnt) { if (bp->vxlan_port_cnt) {
...@@ -8655,7 +8762,7 @@ static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, ...@@ -8655,7 +8762,7 @@ static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id); req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr; req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f); req.reg_addr = cpu_to_le16(reg & 0x1f);
if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1; req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr); req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr); req.dev_addr = mdio_phy_id_devad(phy_addr);
...@@ -8682,7 +8789,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, ...@@ -8682,7 +8789,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id); req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr; req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f); req.reg_addr = cpu_to_le16(reg & 0x1f);
if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1; req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr); req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr); req.dev_addr = mdio_phy_id_devad(phy_addr);
...@@ -9000,8 +9107,11 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp) ...@@ -9000,8 +9107,11 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */ /* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp) static bool bnxt_rfs_supported(struct bnxt *bp)
{ {
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
return true;
return false; return false;
}
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true; return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP) if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
...@@ -9016,7 +9126,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) ...@@ -9016,7 +9126,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
int vnics, max_vnics, max_rss_ctxs; int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5) if (bp->flags & BNXT_FLAG_CHIP_P5)
return false; return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false; return false;
...@@ -9398,6 +9508,7 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -9398,6 +9508,7 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp); bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp); bnxt_hwrm_port_qstats_ext(bp);
bnxt_hwrm_pcie_qstats(bp);
} }
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
...@@ -10601,6 +10712,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10601,6 +10712,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1; rc = -1;
goto init_err_pci_clean; goto init_err_pci_clean;
} }
rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
if (rc)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
rc = bnxt_init_mac_addr(bp); rc = bnxt_init_mac_addr(bp);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n"); dev_err(&pdev->dev, "Unable to initialize mac address.\n");
......
...@@ -1227,6 +1227,7 @@ struct bnxt_ctx_mem_info { ...@@ -1227,6 +1227,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_entry_size; u16 mrav_entry_size;
u16 tim_entry_size; u16 tim_entry_size;
u32 tim_max_entries; u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple; u8 tqm_entries_multiple;
u32 flags; u32 flags;
...@@ -1354,6 +1355,7 @@ struct bnxt { ...@@ -1354,6 +1355,7 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000 #define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \ BNXT_FLAG_RFS | \
...@@ -1480,6 +1482,11 @@ struct bnxt { ...@@ -1480,6 +1482,11 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800 #define BNXT_FW_CAP_TRUSTED_VF 0x00000800
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code; u32 hwrm_spec_code;
...@@ -1498,10 +1505,12 @@ struct bnxt { ...@@ -1498,10 +1505,12 @@ struct bnxt {
struct tx_port_stats *hw_tx_port_stats; struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext; struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext; struct tx_port_stats_ext *hw_tx_port_stats_ext;
struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map; dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map; dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map; dma_addr_t hw_tx_port_stats_ext_map;
dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size; int hw_port_stats_size;
u16 fw_rx_stats_ext_size; u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size; u16 fw_tx_stats_ext_size;
...@@ -1634,6 +1643,9 @@ struct bnxt { ...@@ -1634,6 +1643,9 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \ #define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8) (offsetof(struct tx_port_stats_ext, counter) / 8)
#define BNXT_PCIE_STATS_OFFSET(counter) \
(offsetof(struct pcie_ctx_hw_stats, counter) / 8)
#define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2 #define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c #define SFF_DIAG_SUPPORT_OFFSET 0x5c
......
...@@ -235,6 +235,9 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -235,6 +235,9 @@ static int bnxt_set_coalesce(struct net_device *dev,
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7) BNXT_TX_STATS_PRI_ENTRY(counter, 7)
#define BNXT_PCIE_STATS_ENTRY(counter) \
{ BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
enum { enum {
RX_TOTAL_DISCARDS, RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS,
...@@ -345,6 +348,10 @@ static const struct { ...@@ -345,6 +348,10 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
BNXT_RX_STATS_EXT_COS_ENTRIES, BNXT_RX_STATS_EXT_COS_ENTRIES,
BNXT_RX_STATS_EXT_PFC_ENTRIES, BNXT_RX_STATS_EXT_PFC_ENTRIES,
BNXT_RX_STATS_EXT_ENTRY(rx_bits),
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
}; };
static const struct { static const struct {
...@@ -383,6 +390,24 @@ static const struct { ...@@ -383,6 +390,24 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets), BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
}; };
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
} bnxt_pcie_stats_arr[] = {
BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
};
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \ #define BNXT_NUM_STATS_PRI \
...@@ -390,6 +415,7 @@ static const struct { ...@@ -390,6 +415,7 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_stats(struct bnxt *bp) static int bnxt_get_num_stats(struct bnxt *bp)
{ {
...@@ -407,6 +433,9 @@ static int bnxt_get_num_stats(struct bnxt *bp) ...@@ -407,6 +433,9 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI; num_stats += BNXT_NUM_STATS_PRI;
} }
if (bp->flags & BNXT_FLAG_PCIE_STATS)
num_stats += BNXT_NUM_PCIE_STATS;
return num_stats; return num_stats;
} }
...@@ -509,6 +538,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, ...@@ -509,6 +538,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
} }
} }
} }
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
__le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
buf[j] = le64_to_cpu(*(pcie_stats +
bnxt_pcie_stats_arr[i].offset));
}
}
} }
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
...@@ -609,6 +646,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) ...@@ -609,6 +646,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
} }
} }
} }
if (bp->flags & BNXT_FLAG_PCIE_STATS) {
for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
strcpy(buf, bnxt_pcie_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
}
}
break; break;
case ETH_SS_TEST: case ETH_SS_TEST:
if (bp->num_tests) if (bp->num_tests)
...@@ -3262,6 +3305,7 @@ void bnxt_ethtool_init(struct bnxt *bp) ...@@ -3262,6 +3305,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev; struct net_device *dev = bp->dev;
int i, rc; int i, rc;
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev); bnxt_get_pkgver(dev);
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册