提交 bbf33d1d 编写于 作者: E Edwin Peer 提交者: David S. Miller

bnxt_en: update all firmware calls to use the new APIs

The conversion follows this general pattern for most of the calls:

1. The input message is changed from a stack variable initialized
using bnxt_hwrm_cmd_hdr_init() to a pointer allocated and intialized
using hwrm_req_init().

2. If we don't need to read the firmware response, the hwrm_send_message()
call is replaced with hwrm_req_send().

3. If we need to read the firmware response, the mutex lock is replaced
by hwrm_req_hold() to hold the response.  When the response is read, the
mutex unlock is replaced by hwrm_req_drop().

If additional DMA buffers are needed for firmware response data, the
hwrm_req_dma_slice() is used instead of calling dma_alloc_coherent().

Some minor refactoring is also done while doing these conversions.

v2: Fix unintialized variable warnings in __bnxt_hwrm_get_tx_rings()
and bnxt_approve_mac()
Signed-off-by: NEdwin Peer <edwin.peer@broadcom.com>
Signed-off-by: NMichael Chan <michael.chan@broadcom.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 3c10ed49
......@@ -39,38 +39,43 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_cfg_input req = {0};
struct hwrm_queue_pri2cos_cfg_input *req;
u8 *pri2cos;
int i;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG);
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
pri2cos = &req.pri0_cos_queue_id;
pri2cos = &req->pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 qidx;
req.enables |= cpu_to_le32(
req->enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
}
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pri2cos_qcfg_input req = {0};
int rc = 0;
struct hwrm_queue_pri2cos_qcfg_output *resp;
struct hwrm_queue_pri2cos_qcfg_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i;
......@@ -84,23 +89,26 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
ets->prio_tc[i] = tc;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
u8 max_tc)
{
struct hwrm_queue_cos2bw_cfg_input req = {0};
struct hwrm_queue_cos2bw_cfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
void *data;
int i;
int rc, i;
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG);
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
u8 qidx = bp->tc_to_qidx[i];
req.enables |= cpu_to_le32(
req->enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
qidx);
......@@ -121,30 +129,32 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (qidx == 0) {
req.queue_id0 = cos2bw.queue_id;
req.unused_0 = 0;
req->queue_id0 = cos2bw.queue_id;
req->unused_0 = 0;
}
}
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
{
struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_cos2bw_qcfg_input req = {0};
struct hwrm_queue_cos2bw_qcfg_output *resp;
struct hwrm_queue_cos2bw_qcfg_input *req;
struct bnxt_cos2bw_cfg cos2bw;
void *data;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
......@@ -168,7 +178,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return 0;
}
......@@ -230,11 +240,12 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
struct hwrm_queue_pfcenable_cfg_input req = {0};
struct hwrm_queue_pfcenable_cfg_input *req;
struct ieee_ets *my_ets = bp->ieee_ets;
unsigned int tc_mask = 0, pri_mask = 0;
u8 i, pri, lltc_count = 0;
bool need_q_remap = false;
int rc;
if (!my_ets)
return -EINVAL;
......@@ -267,38 +278,43 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
if (need_q_remap)
bnxt_queue_remap(bp, tc_mask);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
req.flags = cpu_to_le32(pri_mask);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG);
if (rc)
return rc;
req->flags = cpu_to_le32(pri_mask);
return hwrm_req_send(bp, req);
}
static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
{
struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_pfcenable_qcfg_input req = {0};
struct hwrm_queue_pfcenable_qcfg_output *resp;
struct hwrm_queue_pfcenable_qcfg_input *req;
u8 pri_mask;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return 0;
}
static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
bool add)
{
struct hwrm_fw_set_structured_data_input set = {0};
struct hwrm_fw_get_structured_data_input get = {0};
struct hwrm_fw_set_structured_data_input *set;
struct hwrm_fw_get_structured_data_input *get;
struct hwrm_struct_data_dcbx_app *fw_app;
struct hwrm_struct_hdr *data;
dma_addr_t mapping;
......@@ -308,19 +324,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
if (bp->hwrm_spec_code < 0x10601)
return 0;
rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA);
if (rc)
return rc;
hwrm_req_hold(bp, get);
hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO);
n = IEEE_8021QAZ_MAX_TCS;
data_len = sizeof(*data) + sizeof(*fw_app) * n;
data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
GFP_KERNEL);
if (!data)
return -ENOMEM;
data = hwrm_req_dma_slice(bp, get, data_len, &mapping);
if (!data) {
rc = -ENOMEM;
goto set_app_exit;
}
bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
get.dest_data_addr = cpu_to_le64(mapping);
get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
get.count = 0;
rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
get->dest_data_addr = cpu_to_le64(mapping);
get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
get->count = 0;
rc = hwrm_req_send(bp, get);
if (rc)
goto set_app_exit;
......@@ -366,44 +389,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
data->len = cpu_to_le16(sizeof(*fw_app) * n);
data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
set.src_data_addr = cpu_to_le64(mapping);
set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set.hdr_cnt = 1;
rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA);
if (rc)
goto set_app_exit;
set->src_data_addr = cpu_to_le64(mapping);
set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set->hdr_cnt = 1;
rc = hwrm_req_send(bp, set);
set_app_exit:
dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
hwrm_req_drop(bp, get); /* dropping get request and associated slice */
return rc;
}
static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
{
struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_queue_dscp_qcaps_input req = {0};
struct hwrm_queue_dscp_qcaps_output *resp;
struct hwrm_queue_dscp_qcaps_input *req;
int rc;
bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS);
if (rc)
return rc;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send_silent(bp, req);
if (!rc) {
bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
if (bp->max_dscp_value < 0x3f)
bp->max_dscp_value = 0;
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
bool add)
{
struct hwrm_queue_dscp2pri_cfg_input req = {0};
struct hwrm_queue_dscp2pri_cfg_input *req;
struct bnxt_dscp2pri_entry *dscp2pri;
dma_addr_t mapping;
int rc;
......@@ -411,23 +439,25 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
if (bp->hwrm_spec_code < 0x10800)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1);
dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri),
&mapping, GFP_KERNEL);
if (!dscp2pri)
rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG);
if (rc)
return rc;
dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping);
if (!dscp2pri) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
req.src_data_addr = cpu_to_le64(mapping);
req->src_data_addr = cpu_to_le64(mapping);
dscp2pri->dscp = app->protocol;
if (add)
dscp2pri->mask = 0x3f;
else
dscp2pri->mask = 0;
dscp2pri->pri = app->priority;
req.entry_cnt = cpu_to_le16(1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
mapping);
req->entry_cnt = cpu_to_le16(1);
rc = hwrm_req_send(bp, req);
return rc;
}
......
......@@ -355,28 +355,34 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
union devlink_param_value *nvm_cfg_ver)
{
struct hwrm_nvm_get_variable_input req = {0};
struct hwrm_nvm_get_variable_input *req;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
&data_dma_addr, GFP_KERNEL);
if (!data)
return -ENOMEM;
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
if (rc)
return rc;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
if (!data) {
rc = -ENOMEM;
goto exit;
}
req.dest_data_addr = cpu_to_le64(data_dma_addr);
req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
hwrm_req_hold(bp, req);
req->dest_data_addr = cpu_to_le64(data_dma_addr);
req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send_silent(bp, req);
if (!rc)
bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
BNXT_NVM_CFG_VER_BITS,
BNXT_NVM_CFG_VER_BYTES);
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
exit:
hwrm_req_drop(bp, req);
return rc;
}
......@@ -563,17 +569,20 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
int msg_len, union devlink_param_value *val)
union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
int idx = 0, rc, i;
/* Get/Set NVM CFG parameter is supported only on PFs */
if (BNXT_VF(bp))
if (BNXT_VF(bp)) {
hwrm_req_drop(bp, req);
return -EPERM;
}
for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
if (nvm_params[i].id == param_id) {
......@@ -582,18 +591,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
}
if (i == ARRAY_SIZE(nvm_params))
if (i == ARRAY_SIZE(nvm_params)) {
hwrm_req_drop(bp, req);
return -EOPNOTSUPP;
}
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
&data_dma_addr, GFP_KERNEL);
if (!data)
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
if (!data) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
......@@ -602,26 +615,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_send_message_silent(bp, msg, msg_len,
HWRM_CMD_TIMEOUT);
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
nvm_param.nvm_num_bits,
nvm_param.dl_num_bytes);
} else {
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
rc = -EOPNOTSUPP;
}
}
dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
hwrm_req_drop(bp, req);
if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
return rc;
......@@ -630,15 +641,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct hwrm_nvm_get_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_get_variable_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1);
rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
if (!rc)
if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
ctx->val.vbool = !ctx->val.vbool;
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
if (rc)
return rc;
rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
ctx->val.vbool = !ctx->val.vbool;
return rc;
}
......@@ -646,15 +659,18 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct hwrm_nvm_set_variable_input req = {0};
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE);
if (rc)
return rc;
if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK)
ctx->val.vbool = !ctx->val.vbool;
return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val);
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
......
......@@ -86,24 +86,28 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
{
struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_port_ts_query_input req = {0};
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1);
req.flags = cpu_to_le32(flags);
rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY);
if (rc)
return rc;
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
*ts = le64_to_cpu(resp->ptp_msg_ts);
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
......@@ -144,14 +148,17 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct hwrm_port_mac_cfg_input req = {0};
struct hwrm_port_mac_cfg_input *req;
struct bnxt *bp = ptp->bp;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
req.ptp_freq_adj_ppb = cpu_to_le32(ppb);
req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
if (rc)
return rc;
req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
rc = hwrm_req_send(ptp->bp, req);
if (rc)
netdev_err(ptp->bp->dev,
"ptp adjfreq failed. rc = %d\n", rc);
......@@ -187,7 +194,7 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
{
struct hwrm_func_ptp_pin_cfg_input req = {0};
struct hwrm_func_ptp_pin_cfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u8 state = usage != BNXT_PPS_PIN_NONE;
u8 *pin_state, *pin_usg;
......@@ -199,18 +206,21 @@ static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
return -EOPNOTSUPP;
}
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_CFG, -1, -1);
rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG);
if (rc)
return rc;
enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE |
FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2);
req.enables = cpu_to_le32(enables);
req->enables = cpu_to_le32(enables);
pin_state = &req.pin0_state;
pin_usg = &req.pin0_usage;
pin_state = &req->pin0_state;
pin_usg = &req->pin0_usage;
*(pin_state + (pin * 2)) = state;
*(pin_usg + (pin * 2)) = usage;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_send(ptp->bp, req);
if (rc)
return rc;
......@@ -222,12 +232,16 @@ static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage)
static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
{
struct hwrm_func_ptp_cfg_input req = {0};
struct hwrm_func_ptp_cfg_input *req;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
req.enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
req.ptp_pps_event = event;
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT);
req->ptp_pps_event = event;
return hwrm_req_send(bp, req);
}
void bnxt_ptp_reapply_pps(struct bnxt *bp)
......@@ -278,7 +292,7 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
struct ptp_clock_request *rq)
{
struct hwrm_func_ptp_cfg_input req = {0};
struct hwrm_func_ptp_cfg_input *req;
struct bnxt *bp = ptp->bp;
struct timespec64 ts;
u64 target_ns, delta;
......@@ -293,20 +307,22 @@ static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp,
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_CFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
if (rc)
return rc;
enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD |
FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP |
FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE;
req.enables = cpu_to_le16(enables);
req.ptp_pps_event = 0;
req.ptp_freq_adj_dll_source = 0;
req.ptp_freq_adj_dll_phase = 0;
req.ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
req.ptp_freq_adj_ext_up = 0;
req.ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->enables = cpu_to_le16(enables);
req->ptp_pps_event = 0;
req->ptp_freq_adj_dll_source = 0;
req->ptp_freq_adj_dll_phase = 0;
req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC);
req->ptp_freq_adj_ext_up = 0;
req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta);
return hwrm_req_send(bp, req);
}
static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
......@@ -363,11 +379,15 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct hwrm_port_mac_cfg_input req = {0};
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct hwrm_port_mac_cfg_input *req;
u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
if (rc)
return rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
if (ptp->rx_filter)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
else
......@@ -376,11 +396,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
req.flags = cpu_to_le32(flags);
req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
req->flags = cpu_to_le32(flags);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return hwrm_req_send(bp, req);
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
......@@ -631,11 +651,10 @@ static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin,
return -EOPNOTSUPP;
}
/* bp->hwrm_cmd_lock held by the caller */
static int bnxt_ptp_pps_init(struct bnxt *bp)
{
struct hwrm_func_ptp_pin_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_ptp_pin_qcfg_input req = {0};
struct hwrm_func_ptp_pin_qcfg_output *resp;
struct hwrm_func_ptp_pin_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct ptp_clock_info *ptp_info;
struct bnxt_pps *pps_info;
......@@ -643,11 +662,16 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
u32 i, rc;
/* Query current/default PIN CFG */
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_PTP_PIN_QCFG, -1, -1);
rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG);
if (rc)
return rc;
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc || !resp->num_pins)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (rc || !resp->num_pins) {
hwrm_req_drop(bp, req);
return -EOPNOTSUPP;
}
ptp_info = &ptp->ptp_info;
pps_info = &ptp->pps_info;
......@@ -656,8 +680,10 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
ptp_info->pin_config = kcalloc(ptp_info->n_pins,
sizeof(*ptp_info->pin_config),
GFP_KERNEL);
if (!ptp_info->pin_config)
if (!ptp_info->pin_config) {
hwrm_req_drop(bp, req);
return -ENOMEM;
}
/* Report the TSIO capability to kernel */
pin_usg = &resp->pin0_usage;
......@@ -675,6 +701,7 @@ static int bnxt_ptp_pps_init(struct bnxt *bp)
pps_info->pins[i].usage = *pin_usg;
}
hwrm_req_drop(bp, req);
/* Only 1 each of ext_ts and per_out pins is available in HW */
ptp_info->n_ext_ts = 1;
......
......@@ -238,27 +238,33 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct output *resp;
struct input *req;
u32 resp_len;
int rc;
if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
return -EBUSY;
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
fw_msg->timeout);
if (!rc) {
struct output *resp = bp->hwrm_cmd_resp_addr;
u32 len = le16_to_cpu(resp->resp_len);
rc = hwrm_req_init(bp, req, 0 /* don't care */);
if (rc)
return rc;
if (fw_msg->resp_max_len < len)
len = fw_msg->resp_max_len;
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
return rc;
memcpy(fw_msg->resp, resp, len);
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
resp_len = le16_to_cpu(resp->resp_len);
if (resp_len) {
if (fw_msg->resp_max_len < resp_len)
resp_len = fw_msg->resp_max_len;
memcpy(fw_msg->resp, resp, resp_len);
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
......
......@@ -28,38 +28,40 @@
static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
u16 *tx_cfa_action, u16 *rx_cfa_code)
{
struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_vfr_alloc_input req = { 0 };
struct hwrm_cfa_vfr_alloc_output *resp;
struct hwrm_cfa_vfr_alloc_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
req.vf_id = cpu_to_le16(vf_idx);
sprintf(req.vfr_name, "vfr%d", vf_idx);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC);
if (!rc) {
*tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
*rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
} else {
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
req->vf_id = cpu_to_le16(vf_idx);
sprintf(req->vfr_name, "vfr%d", vf_idx);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
*tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
*rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
}
hwrm_req_drop(bp, req);
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
}
static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
{
struct hwrm_cfa_vfr_free_input req = { 0 };
struct hwrm_cfa_vfr_free_input *req;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
sprintf(req.vfr_name, "vfr%d", vf_idx);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE);
if (!rc) {
sprintf(req->vfr_name, "vfr%d", vf_idx);
rc = hwrm_req_send(bp, req);
}
if (rc)
netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
......@@ -68,17 +70,18 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
u16 *max_mtu)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp;
struct hwrm_func_qcfg_input *req;
u16 mtu;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
mutex_lock(&bp->hwrm_cmd_lock);
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
if (rc)
return rc;
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc) {
mtu = le16_to_cpu(resp->max_mtu_configured);
if (!mtu)
......@@ -86,7 +89,7 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
else
*max_mtu = mtu;
}
mutex_unlock(&bp->hwrm_cmd_lock);
hwrm_req_drop(bp, req);
return rc;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册