提交 46d5efa9 编写于 作者: D David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: updates for net-next.

Miscellaneous updates covering SRIOV, IRQ coalescing, firmware logging and
package version for net-next.  Thanks.

v2: Updated description and added more comments for patch 1.  Fixed
function parameters formatting for patch 4.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -1239,13 +1239,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
switch (event_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
break;
goto async_event_process_exit;
}
schedule_work(&bp->sp_task);
async_event_process_exit:
return 0;
}
......@@ -2596,28 +2600,27 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
struct hwrm_cmd_req_hdr *req = request;
struct input *req = request;
req->cmpl_ring_req_type =
cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
req->req_type = cpu_to_le16(req_type);
req->cmpl_ring = cpu_to_le16(cmpl_ring);
req->target_id = cpu_to_le16(target_id);
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
int i, intr_process, rc;
struct hwrm_cmd_req_hdr *req = msg;
struct input *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
HWRM_CMPL_RING_MASK) >>
HWRM_CMPL_RING_SFT;
cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
/* Write request msg to hwrm channel */
......@@ -2628,12 +2631,14 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
/* currently supports only one outstanding message */
if (intr_process)
bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
HWRM_SEQ_ID_MASK;
bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
/* Ring channel doorbell */
writel(1, bp->bar0 + 0x100);
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
i = 0;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
......@@ -2644,7 +2649,7 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
req->cmpl_ring_req_type);
le16_to_cpu(req->req_type));
return -1;
}
} else {
......@@ -2660,8 +2665,8 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
timeout, req->cmpl_ring_req_type,
req->target_id_seq_id, *resp_len);
timeout, le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), *resp_len);
return -1;
}
......@@ -2675,20 +2680,23 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
timeout, req->cmpl_ring_req_type,
req->target_id_seq_id, len, *valid);
timeout, le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len, *valid);
return -1;
}
}
rc = le16_to_cpu(resp->error_code);
if (rc) {
if (rc && !silent)
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
return rc;
}
return 0;
return rc;
}
int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
{
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
}
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
......@@ -2701,6 +2709,17 @@ int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
return rc;
}
int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
int timeout)
{
int rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
......@@ -3517,47 +3536,82 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
req->flags = cpu_to_le16(flags);
req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
/* Minimum time between 2 interrupts set to buf_tmr x 2 */
req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
}
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
req_tx = {0}, *req;
u16 max_buf, max_buf_irq;
u16 buf_tmr, buf_tmr_irq;
u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
-1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
/* Each rx completion (2 records) should be DMAed immediately */
max_buf = min_t(u16, bp->coal_bufs / 4, 2);
/* Each rx completion (2 records) should be DMAed immediately.
* DMA 1/4 of the completion buffers at a time.
*/
max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
/* max_buf must not be zero */
max_buf = clamp_t(u16, max_buf, 1, 63);
max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
/* buf timer set to 1/4 of interrupt timer */
buf_tmr = max_t(u16, buf_tmr / 4, 1);
buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* RING_IDLE generates more IRQs for lower latency. Enable it only
* if coal_ticks is less than 25 us.
*/
if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
if (bp->rx_coal_ticks < 25)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
req.flags = cpu_to_le16(flags);
req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
/* max_buf must not be zero */
max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
/* buf timer set to 1/4 of interrupt timer */
buf_tmr = max_t(u16, buf_tmr / 4, 1);
buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
struct bnxt_napi *bnapi = bp->bnapi[i];
rc = _hwrm_send_message(bp, &req, sizeof(req),
req = &req_rx;
if (!bnapi->rx_ring)
req = &req_tx;
req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
rc = _hwrm_send_message(bp, req, sizeof(*req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
......@@ -3766,10 +3820,14 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
......@@ -5291,10 +5349,16 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
bp->coal_bufs = 20;
bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
bp->coal_bufs_irq = 2;
/* tick values in micro seconds */
bp->rx_coal_ticks = 12;
bp->rx_coal_bufs = 30;
bp->rx_coal_ticks_irq = 1;
bp->rx_coal_bufs_irq = 2;
bp->tx_coal_ticks = 25;
bp->tx_coal_bufs = 30;
bp->tx_coal_ticks_irq = 2;
bp->tx_coal_bufs_irq = 2;
init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp;
......@@ -5559,6 +5623,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
netdev_info(bp->dev, "Receive PF driver unload event!");
}
#else
......@@ -5674,7 +5740,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
char phy_ver[PHY_VER_STR_LEN];
rc = bnxt_update_link(bp, false);
if (rc) {
......@@ -5694,11 +5759,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
link_info->req_duplex = link_info->duplex_setting;
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
link_info->phy_ver[0],
link_info->phy_ver[1],
link_info->phy_ver[2]);
strcat(bp->fw_ver_str, phy_ver);
return rc;
}
......
......@@ -477,12 +477,15 @@ struct rx_tpa_end_cmp_ext {
#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
#define HWRM_CMD_TIMEOUT 500
#define DFLT_HWRM_CMD_TIMEOUT 500
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
#define HWRM_RESP_LEN_OFFSET 4
#define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000
#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
......@@ -644,19 +647,6 @@ struct bnxt_irq {
#define INVALID_STATS_CTX_ID -1
struct hwrm_cmd_req_hdr {
#define HWRM_CMPL_RING_MASK 0xffff0000
#define HWRM_CMPL_RING_SFT 16
__le32 cmpl_ring_req_type;
#define HWRM_SEQ_ID_MASK 0xffff
#define HWRM_SEQ_ID_INVALID -1
#define HWRM_RESP_LEN_OFFSET 4
#define HWRM_TARGET_FID_MASK 0xffff0000
#define HWRM_TARGET_FID_SFT 16
__le32 target_id_seq_id;
__le64 resp_addr;
};
struct bnxt_ring_grp_info {
u16 fw_stats_ctx;
u16 fw_grp_id;
......@@ -957,6 +947,7 @@ struct bnxt {
void *hwrm_dbg_resp_addr;
dma_addr_t hwrm_dbg_resp_dma_addr;
#define HWRM_DBG_REG_BUF_SIZE 128
int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
......@@ -968,13 +959,17 @@ struct bnxt {
__le16 vxlan_fw_dst_port_id;
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
u16 coal_ticks;
u16 coal_ticks_irq;
u16 coal_bufs;
u16 coal_bufs_irq;
u16 rx_coal_ticks;
u16 rx_coal_ticks_irq;
u16 rx_coal_bufs;
u16 rx_coal_bufs_irq;
u16 tx_coal_ticks;
u16 tx_coal_ticks_irq;
u16 tx_coal_bufs;
u16 tx_coal_bufs_irq;
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
struct work_struct sp_task;
unsigned long sp_event;
......@@ -986,6 +981,7 @@ struct bnxt {
#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
#define BNXT_RESET_TASK_SP_EVENT 6
#define BNXT_RST_RING_SP_EVENT 7
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
......@@ -1099,6 +1095,7 @@ void bnxt_set_ring_params(struct bnxt *);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
......
......@@ -7,6 +7,7 @@
* the Free Software Foundation.
*/
#include <linux/ctype.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
......@@ -20,6 +21,8 @@
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
static u32 bnxt_get_msglevel(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
......@@ -41,12 +44,16 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
coal->rx_coalesce_usecs =
max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
coal->rx_coalesce_usecs_irq =
max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
coal->rx_coalesce_usecs = bp->rx_coal_ticks;
/* 2 completion records per rx packet */
coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
coal->tx_coalesce_usecs = bp->tx_coal_ticks;
coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
return 0;
}
......@@ -57,11 +64,16 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
bp->coal_ticks_irq =
BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
bp->rx_coal_ticks = coal->rx_coalesce_usecs;
/* 2 completion records per rx packet */
bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
bp->tx_coal_ticks = coal->tx_coalesce_usecs;
bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
if (netif_running(dev))
rc = bnxt_hwrm_set_coal(bp);
......@@ -460,10 +472,20 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnxt *bp = netdev_priv(dev);
char *pkglog;
char *pkgver = NULL;
pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
if (pkglog)
pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
if (pkgver && *pkgver != 0 && isdigit(*pkgver))
snprintf(info->fw_version, sizeof(info->fw_version) - 1,
"%s pkg %s", bp->fw_ver_str, pkgver);
else
strlcpy(info->fw_version, bp->fw_ver_str,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
info->testinfo_len = BNXT_NUM_TESTS(bp);
......@@ -471,6 +493,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
info->regdump_len = 0;
kfree(pkglog);
}
static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
......@@ -1102,6 +1125,85 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
struct hwrm_nvm_find_dir_entry_input req = {0};
struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
req.enables = 0;
req.dir_idx = 0;
req.dir_type = cpu_to_le16(type);
req.dir_ordinal = cpu_to_le16(ordinal);
req.dir_ext = cpu_to_le16(ext);
req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc == 0) {
if (index)
*index = le16_to_cpu(output->dir_idx);
if (item_length)
*item_length = le32_to_cpu(output->dir_item_length);
if (data_length)
*data_length = le32_to_cpu(output->dir_data_length);
}
return rc;
}
static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
{
char *retval = NULL;
char *p;
char *value;
int field = 0;
if (datalen < 1)
return NULL;
/* null-terminate the log data (removing last '\n'): */
data[datalen - 1] = 0;
for (p = data; *p != 0; p++) {
field = 0;
retval = NULL;
while (*p != 0 && *p != '\n') {
value = p;
while (*p != 0 && *p != '\t' && *p != '\n')
p++;
if (field == desired_field)
retval = value;
if (*p != '\t')
break;
*p = 0;
field++;
p++;
}
if (*p == 0)
break;
*p = 0;
}
return retval;
}
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
{
u16 index = 0;
u32 datalen;
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
&index, NULL, &datalen) != 0)
return NULL;
memset(buf, 0, buflen);
if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
return NULL;
return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
datalen);
}
static int bnxt_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom,
u8 *data)
......
......@@ -50,10 +50,24 @@ enum bnxt_nvm_directory_type {
#define BNX_DIR_ORDINAL_FIRST 0
#define BNX_DIR_EXT_NONE 0
#define BNX_DIR_EXT_INACTIVE (1 << 0)
#define BNX_DIR_EXT_UPDATE (1 << 1)
#define BNX_DIR_ATTR_NONE 0
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
#define BNX_PKG_LOG_MAX_LENGTH 4096
enum bnxnvm_pkglog_field_index {
BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
};
#endif /* Don't add anything after this line */
......@@ -522,6 +522,46 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
return rc;
}
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf,
u16 event_id)
{
int rc = 0;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_async_event_cmpl *async_cmpl;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
if (vf)
req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
else
/* broadcast this async event to all VFs */
req.encap_async_event_target_id = cpu_to_le16(0xffff);
async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
async_cmpl->type =
cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
goto fwd_async_event_cmpl_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
resp->error_code);
rc = -1;
}
fwd_async_event_cmpl_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
......@@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
if (pci_vfs_assigned(bp->pdev)) {
bnxt_hwrm_fwd_async_event_cmpl(
bp, NULL,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
num_vfs);
} else {
......@@ -758,8 +801,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
{
int rc = 0;
struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
struct input *encap_req = vf->hwrm_cmd_req_addr;
u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
case HWRM_CFA_L2_FILTER_ALLOC:
......@@ -809,13 +852,19 @@ void bnxt_update_vf_mac(struct bnxt *bp)
if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
goto update_vf_mac_exit;
if (!is_valid_ether_addr(resp->perm_mac_address))
goto update_vf_mac_exit;
/* Store MAC address from the firmware. There are 2 cases:
* 1. MAC address is valid. It is assigned from the PF and we
* need to override the current VF MAC address with it.
* 2. MAC address is zero. The VF will use a random MAC address by
* default but the stored zero MAC will allow the VF user to change
* the random MAC address using ndo_set_mac_address() if he wants.
*/
if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册