提交 5749d6af 编写于 作者: D David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Update for net-next.

Misc. updates including updated firmware interface, some additional
port statistics, a new IRQ assignment scheme for the RDMA driver, support
for VF trust, and other changes and improvements for SRIOV.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
#define DRV_MODULE_VERSION "1.9.0"
#define DRV_MODULE_VERSION "1.9.1"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 9
#define DRV_VER_UPD 0
#define DRV_VER_UPD 1
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
......@@ -573,6 +573,10 @@ struct bnxt_ring_struct {
void **vmem;
u16 fw_ring_id; /* Ring id filled by Chimp FW */
union {
u16 grp_idx;
u16 map_idx; /* Used by cmpl rings */
};
u8 queue_id;
};
......@@ -786,6 +790,7 @@ struct bnxt_hw_resc {
u16 min_tx_rings;
u16 max_tx_rings;
u16 resv_tx_rings;
u16 max_tx_sch_inputs;
u16 min_rx_rings;
u16 max_rx_rings;
u16 resv_rx_rings;
......@@ -815,6 +820,7 @@ struct bnxt_vf_info {
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
#define BNXT_VF_TRUST 0x10
u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
......@@ -1151,7 +1157,9 @@ struct bnxt {
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_NEW_RM 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
......@@ -1271,8 +1279,10 @@ struct bnxt {
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
int hw_port_stats_size;
u16 hwrm_max_req_len;
......@@ -1383,6 +1393,9 @@ struct bnxt {
((offsetof(struct tx_port_stats, counter) + \
sizeof(struct rx_port_stats) + 512) / 8)
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
......@@ -1441,13 +1454,17 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2017 Broadcom Limited
* Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -34,7 +34,8 @@ struct bnxt_cos2bw_cfg {
};
#define BNXT_LLQ(q_profile) \
((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS)
((q_profile) == \
QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
......
......@@ -137,6 +137,9 @@ static int bnxt_set_coalesce(struct net_device *dev,
#define BNXT_TX_STATS_ENTRY(counter) \
{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
#define BNXT_RX_STATS_EXT_ENTRY(counter) \
{ BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
......@@ -181,6 +184,8 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_bytes),
BNXT_RX_STATS_ENTRY(rx_runt_bytes),
BNXT_RX_STATS_ENTRY(rx_runt_frames),
BNXT_RX_STATS_ENTRY(rx_stat_discard),
BNXT_RX_STATS_ENTRY(rx_stat_err),
BNXT_TX_STATS_ENTRY(tx_64b_frames),
BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
......@@ -216,9 +221,24 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
BNXT_TX_STATS_ENTRY(tx_total_collisions),
BNXT_TX_STATS_ENTRY(tx_bytes),
BNXT_TX_STATS_ENTRY(tx_xthol_frames),
BNXT_TX_STATS_ENTRY(tx_stat_discard),
BNXT_TX_STATS_ENTRY(tx_stat_error),
};
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
} bnxt_port_stats_ext_arr[] = {
BNXT_RX_STATS_EXT_ENTRY(link_down_events),
BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
};
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
......@@ -227,6 +247,9 @@ static int bnxt_get_num_stats(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
num_stats += BNXT_NUM_PORT_STATS_EXT;
return num_stats;
}
......@@ -274,6 +297,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
bnxt_port_stats_arr[i].offset));
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
__le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
buf[j] = le64_to_cpu(*(port_stats_ext +
bnxt_port_stats_ext_arr[i].offset));
}
}
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
......@@ -334,6 +365,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
buf += ETH_GSTRING_LEN;
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
strcpy(buf, bnxt_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)
......@@ -388,15 +425,26 @@ static void bnxt_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_rx_rings, max_tx_rings, tcs;
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
if (bp->flags & BNXT_FLAG_NEW_RM)
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
max_tx_rings = 0;
}
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
tcs = netdev_get_num_tc(dev);
if (tcs > 1)
......@@ -2535,16 +2583,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EOPNOTSUPP;
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
if (!rc)
if (!rc) {
netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
*flags = 0;
}
} else if (*flags == ETH_RESET_AP) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code < 0x10803)
return -EOPNOTSUPP;
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
if (!rc)
if (!rc) {
netdev_info(dev, "Reset Application Processor request successful.\n");
*flags = 0;
}
} else {
rc = -EINVAL;
}
......
......@@ -188,6 +188,7 @@ struct cmd_nums {
#define HWRM_STAT_CTX_FREE 0xb1UL
#define HWRM_STAT_CTX_QUERY 0xb2UL
#define HWRM_STAT_CTX_CLR_STATS 0xb3UL
#define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_SET_TIME 0xc8UL
......@@ -199,6 +200,7 @@ struct cmd_nums {
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
#define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
#define HWRM_OEM_CMD 0xd4UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
......@@ -271,6 +273,7 @@ struct cmd_nums {
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
#define HWRM_PCIE_QSTATS 0x204UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
......@@ -341,9 +344,9 @@ struct hwrm_err_output {
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 9
#define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_RSVD 0
#define HWRM_VERSION_STR "1.9.0.0"
#define HWRM_VERSION_UPDATE 1
#define HWRM_VERSION_RSVD 15
#define HWRM_VERSION_STR "1.9.1.15"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
......@@ -616,30 +619,6 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
#define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
};
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
......@@ -854,6 +833,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
#define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
#define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
#define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
......@@ -966,10 +946,14 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
#define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
u8 cache_linesize;
#define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
#define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
#define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
u8 options;
#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
#define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
#define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL
#define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
......@@ -1124,10 +1108,14 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
#define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
#define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
u8 cache_linesize;
#define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
#define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
#define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
u8 options;
#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
#define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
#define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2
__le16 num_mcast_filters;
};
......@@ -1248,7 +1236,7 @@ struct hwrm_func_vf_vnic_ids_query_output {
u8 valid;
};
/* hwrm_func_drv_rgtr_input (size:832b/104B) */
/* hwrm_func_drv_rgtr_input (size:896b/112B) */
struct hwrm_func_drv_rgtr_input {
__le16 req_type;
__le16 cmpl_ring;
......@@ -1258,6 +1246,7 @@ struct hwrm_func_drv_rgtr_input {
__le32 flags;
#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
......@@ -1277,14 +1266,18 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
#define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
#define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
u8 ver_maj_8b;
u8 ver_min_8b;
u8 ver_upd_8b;
u8 unused_0[3];
__le32 timestamp;
u8 unused_1[4];
__le32 vf_req_fwd[8];
__le32 async_event_fwd[8];
__le16 ver_maj;
__le16 ver_min;
__le16 ver_upd;
__le16 ver_patch;
};
/* hwrm_func_drv_rgtr_output (size:128b/16B) */
......@@ -1379,7 +1372,7 @@ struct hwrm_func_drv_qver_input {
u8 unused_0[2];
};
/* hwrm_func_drv_qver_output (size:128b/16B) */
/* hwrm_func_drv_qver_output (size:192b/24B) */
struct hwrm_func_drv_qver_output {
__le16 error_code;
__le16 req_type;
......@@ -1398,11 +1391,15 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
u8 ver_maj_8b;
u8 ver_min_8b;
u8 ver_upd_8b;
u8 unused_0[2];
u8 valid;
__le16 ver_maj;
__le16 ver_min;
__le16 ver_upd;
__le16 ver_patch;
};
/* hwrm_func_resource_qcaps_input (size:192b/24B) */
......@@ -1416,7 +1413,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
/* hwrm_func_resource_qcaps_output (size:384b/48B) */
/* hwrm_func_resource_qcaps_output (size:448b/56B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
......@@ -1427,7 +1424,8 @@ struct hwrm_func_resource_qcaps_output {
__le16 vf_reservation_strategy;
#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
#define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
__le16 min_rsscos_ctx;
__le16 max_rsscos_ctx;
__le16 min_cmpl_rings;
......@@ -1444,7 +1442,8 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_stat_ctx;
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
u8 unused_0;
__le16 max_tx_scheduler_inputs;
u8 unused_0[7];
u8 valid;
};
......@@ -1627,6 +1626,16 @@ struct hwrm_port_phy_cfg_output {
u8 valid;
};
/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
struct hwrm_port_phy_cfg_cmd_err {
u8 code;
#define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
#define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
#define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
#define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
u8 unused_0[7];
};
/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
__le16 req_type;
......@@ -2030,6 +2039,33 @@ struct hwrm_port_qstats_output {
u8 valid;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
struct hwrm_port_qstats_ext_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
__le16 tx_stat_size;
__le16 rx_stat_size;
u8 unused_0[2];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
};
/* hwrm_port_qstats_ext_output (size:128b/16B) */
struct hwrm_port_qstats_ext_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
u8 unused_0[3];
u8 valid;
};
/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
......@@ -2552,7 +2588,11 @@ struct hwrm_queue_qportcfg_input {
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
u8 unused_0[2];
u8 drv_qmap_cap;
#define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
#define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
#define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
u8 unused_0;
};
/* hwrm_queue_qportcfg_output (size:256b/32B) */
......@@ -2572,49 +2612,65 @@ struct hwrm_queue_qportcfg_output {
u8 queue_id0;
u8 queue_id0_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
u8 queue_id1;
u8 queue_id1_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
u8 queue_id2;
u8 queue_id2_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
u8 queue_id3;
u8 queue_id3_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
u8 queue_id4;
u8 queue_id4_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
u8 queue_id5;
u8 queue_id5_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
u8 queue_id6;
u8 queue_id6_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
u8 queue_id7;
u8 queue_id7_service_profile;
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
u8 valid;
......@@ -5180,6 +5236,29 @@ struct hwrm_stat_ctx_clr_stats_output {
u8 valid;
};
/* hwrm_pcie_qstats_input (size:256b/32B) */
struct hwrm_pcie_qstats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 pcie_stat_size;
u8 unused_0[6];
__le64 pcie_stat_host_addr;
};
/* hwrm_pcie_qstats_output (size:128b/16B) */
struct hwrm_pcie_qstats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 pcie_stat_size;
u8 unused_0[5];
u8 valid;
};
/* tx_port_stats (size:3264b/408B) */
struct tx_port_stats {
__le64 tx_64b_frames;
......@@ -5305,6 +5384,30 @@ struct rx_port_stats {
__le64 rx_stat_err;
};
/* rx_port_stats_ext (size:320b/40B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
__le64 resume_pause_events;
__le64 continuous_roce_pause_events;
__le64 resume_roce_pause_events;
};
/* pcie_ctx_hw_stats (size:768b/96B) */
struct pcie_ctx_hw_stats {
__le64 pcie_pl_signal_integrity;
__le64 pcie_dl_signal_integrity;
__le64 pcie_tl_signal_integrity;
__le64 pcie_link_integrity;
__le64 pcie_tx_traffic_rate;
__le64 pcie_rx_traffic_rate;
__le64 pcie_tx_dllp_statistics;
__le64 pcie_rx_dllp_statistics;
__le64 pcie_equalization_time;
__le32 pcie_ltssm_histogram[4];
__le64 pcie_recovery_histogram;
};
/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
__le16 req_type;
......@@ -5320,7 +5423,8 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT
u8 selfrst_status;
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
......@@ -6253,8 +6357,7 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
u8 pcie_lane_num;
u8 unused_0[6];
u8 unused_0[7];
};
/* hwrm_selftest_exec_output (size:128b/16B) */
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2017 Broadcom Limited
* Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -121,6 +121,23 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
return rc;
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_vf_info *vf;
if (bnxt_vf_ndo_prep(bp, vf_id))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
if (trusted)
vf->flags |= BNXT_VF_TRUST;
else
vf->flags &= ~BNXT_VF_TRUST;
return 0;
}
int bnxt_get_vf_config(struct net_device *dev, int vf_id,
struct ifla_vf_info *ivi)
{
......@@ -147,6 +164,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
else
ivi->qos = 0;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->flags & BNXT_VF_LINK_UP)
......@@ -492,18 +510,16 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (pf->active_vfs) {
u16 n = 1;
u16 n = pf->active_vfs;
if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
n = pf->active_vfs;
hw_resc->max_tx_rings -= vf_tx_rings * n;
hw_resc->max_rx_rings -= vf_rx_rings * n;
hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
hw_resc->max_cp_rings -= vf_cp_rings * n;
hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
n;
hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
hw_resc->max_rsscos_ctxs -= pf->active_vfs;
hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
hw_resc->max_vnics -= vf_vnics * n;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
rc = pf->active_vfs;
}
......@@ -886,18 +902,19 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
return rc;
}
static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
struct hwrm_func_vf_cfg_input *req =
(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
/* Only allow VF to set a valid MAC address if the PF assigned MAC
* address is zero
/* Allow VF to set a valid MAC address, if trust is set to on or
* if the PF assigned MAC address is zero
*/
if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
if (is_valid_ether_addr(req->dflt_mac_addr) &&
!is_valid_ether_addr(vf->mac_addr)) {
((vf->flags & BNXT_VF_TRUST) ||
(!is_valid_ether_addr(vf->mac_addr)))) {
ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
}
......@@ -913,11 +930,17 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
bool mac_ok = false;
/* VF MAC address must first match PF MAC address, if it is valid.
if (!is_valid_ether_addr((const u8 *)req->l2_addr))
return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
/* Allow VF to set a valid MAC address, if trust is set to on.
* Or VF MAC address must first match MAC address in PF's context.
* Otherwise, it must match the VF MAC address if firmware spec >=
* 1.2.2
*/
if (is_valid_ether_addr(vf->mac_addr)) {
if (vf->flags & BNXT_VF_TRUST) {
mac_ok = true;
} else if (is_valid_ether_addr(vf->mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
mac_ok = true;
} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
......@@ -951,7 +974,9 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
sizeof(phy_qcfg_resp));
mutex_unlock(&bp->hwrm_cmd_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
/* if physical link is down, force link up on VF */
......@@ -993,7 +1018,7 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
switch (req_type) {
case HWRM_FUNC_VF_CFG:
rc = bnxt_vf_store_mac(bp, vf);
rc = bnxt_vf_configure_mac(bp, vf);
break;
case HWRM_CFA_L2_FILTER_ALLOC:
rc = bnxt_vf_validate_set_mac(bp, vf);
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2016-2017 Broadcom Limited
* Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -17,6 +17,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016 Broadcom Limited
* Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -101,13 +101,28 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
return 0;
}
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
ent[i].db_offset = (idx + i) * 0x80;
}
}
static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
int max_idx, max_cp_rings;
int avail_msix, i, idx;
int avail_msix, idx;
int rc = 0;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
......@@ -116,23 +131,47 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
return -ENODEV;
if (edev->ulp_tbl[ulp_id].msix_requested)
return -EAGAIN;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
avail_msix = max_idx - bp->cp_nr_rings;
avail_msix = bnxt_get_avail_msix(bp, num_msix);
if (!avail_msix)
return -ENOMEM;
if (avail_msix > num_msix)
avail_msix = num_msix;
if (bp->flags & BNXT_FLAG_NEW_RM) {
idx = bp->cp_nr_rings;
} else {
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
idx = max_idx - avail_msix;
for (i = 0; i < avail_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
ent[i].db_offset = (idx + i) * 0x80;
}
bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->ulp_tbl[ulp_id].msix_base = idx;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
if (bp->total_irqs < (idx + avail_msix)) {
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_reserve_rings(bp);
}
}
if (rc) {
edev->ulp_tbl[ulp_id].msix_requested = 0;
return -EAGAIN;
}
if (bp->flags & BNXT_FLAG_NEW_RM) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
}
......@@ -146,11 +185,40 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
if (ulp_id != BNXT_ROCE_ULP)
return -EINVAL;
if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return 0;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
bnxt_set_max_func_irqs(bp, bp->total_irqs);
bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
bnxt_open_nic(bp, true, false);
}
return 0;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
}
return 0;
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_en_dev *edev = bp->edev;
if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
}
return 0;
}
......@@ -287,6 +355,58 @@ void bnxt_ulp_shutdown(struct bnxt *bp)
}
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
if (!ulp->msix_requested)
return;
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
ops->ulp_irq_stop(ulp->handle);
}
}
void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
return;
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_restart)
return;
if (!err) {
ent = kcalloc(ulp->msix_requested, sizeof(*ent),
GFP_KERNEL);
if (!ent)
return;
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
kfree(ent);
}
}
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016 Broadcom Limited
* Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -20,6 +20,12 @@
struct hwrm_async_event_cmpl;
struct bnxt;
struct bnxt_msix_entry {
u32 vector;
u32 ring_idx;
u32 db_offset;
};
struct bnxt_ulp_ops {
/* async_notifier() cannot sleep (in BH context) */
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
......@@ -27,12 +33,8 @@ struct bnxt_ulp_ops {
void (*ulp_start)(void *);
void (*ulp_sriov_config)(void *, int);
void (*ulp_shutdown)(void *);
};
struct bnxt_msix_entry {
u32 vector;
u32 ring_idx;
u32 db_offset;
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
struct bnxt_fw_msg {
......@@ -49,6 +51,7 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
u16 msix_base;
atomic_t ref_count;
};
......@@ -60,6 +63,7 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
......@@ -84,11 +88,15 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册