提交 d545fbaa 编写于 作者: L liaoguojia 提交者: Xie XiuQi

net: hns3: Modify all the mismatch to the specifications which check in reviewbot_c.

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Using reviewbot_c to check the code within hns3 module,
we found some mismatch to the rule set by tools.
Now we fix all the mismatch, including Comments format,
Alignment, The devil figure, etc.

Feature or Bugfix:Bugfix
Signed-off-by: Nliaoguojia <liaoguojia@huawei.com>
Reviewed-by: Nlipeng <lipeng321@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 683857e3
......@@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
int ret;
if (!client)
return -ENODEV;
......@@ -123,7 +122,8 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
ret = hnae3_init_client_instance(client, ae_dev);
int ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
......
......@@ -980,10 +980,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
static int hns3_fill_desc_vtags(struct sk_buff *skb,
struct hns3_enet_ring *tx_ring,
u32 *inner_vlan_flag,
u32 *out_vlan_flag,
u16 *inner_vtag,
u16 *out_vtag)
struct vlan_flag *vflags)
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
......@@ -1020,17 +1017,18 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_8021Q)) {
if (handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_DISABLE) {
hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B,
1);
*out_vtag = vlan_tag;
hns3_set_field(vflags->ol_type_vlan_len_msec,
HNS3_TXD_OVLAN_B, 1);
vflags->out_vtag = vlan_tag;
} else {
hns3_set_field(*inner_vlan_flag,
hns3_set_field(vflags->type_cs_vlan_tso,
HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
vflags->inner_vtag = vlan_tag;
}
} else {
hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
hnae3_set_bit(vflags->type_cs_vlan_tso,
HNS3_TXD_VLAN_B, 1);
vflags->inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *vhdr;
......@@ -1062,18 +1060,18 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
u32 ol_type_vlan_len_msec = 0;
u32 type_cs_vlan_tso = 0;
struct vlan_flag vflags;
u32 paylen = skb->len;
u16 inner_vtag = 0;
u16 out_vtag = 0;
u16 mss = 0;
int ret;
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
&ol_type_vlan_len_msec,
&inner_vtag, &out_vtag);
if (unlikely(ret))
vflags.ol_type_vlan_len_msec = 0;
vflags.type_cs_vlan_tso = 0;
vflags.inner_vtag = 0;
vflags.out_vtag = 0;
ret = hns3_fill_desc_vtags(skb, ring, &vflags);
return ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
......@@ -1086,25 +1084,26 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return ret;
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
&vflags.type_cs_vlan_tso,
&vflags.ol_type_vlan_len_msec);
if (unlikely(ret))
return ret;
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
&vflags.type_cs_vlan_tso);
if (unlikely(ret))
return ret;
}
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
cpu_to_le32(vflags.ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(
vflags.type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
desc->tx.vlan_tag = cpu_to_le16(vflags.inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(vflags.out_vtag);
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
......@@ -4258,8 +4257,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
/* ethtool only support setting and querying one coal
* configuation for now, so save the vector 0' coal
* configuation here in order to restore it.
* configuration for now, so save the vector 0' coal
* configuration here in order to restore it.
*/
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
......
......@@ -614,6 +614,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
struct vlan_flag {
u32 type_cs_vlan_tso;
u32 ol_type_vlan_len_msec;
u16 inner_vtag;
u16 out_vtag;
};
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
......
......@@ -3,11 +3,12 @@
#ifndef __HCLGE_CMD_H
#define __HCLGE_CMD_H
#include <linux/etherdevice.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000
#define HCLGE_DESC_DATA_LEN 6
struct hclge_dev;
struct hclge_desc {
......@@ -19,7 +20,7 @@ struct hclge_desc {
__le16 flag;
__le16 retval;
__le16 rsv;
__le32 data[6];
__le32 data[HCLGE_DESC_DATA_LEN];
};
struct hclge_cmq_ring {
......@@ -431,8 +432,10 @@ struct hclge_rx_pkt_buf_cmd {
#define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
#define HCLGE_VF_RST_STATUS_CMD 4
struct hclge_func_status_cmd {
__le32 vf_rst_state[4];
__le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
u8 pf_state;
u8 mac_id;
u8 rsv1;
......@@ -487,11 +490,12 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
#define HCLGE_CFG_CMD_CNT 4
struct hclge_cfg_param_cmd {
__le32 offset;
__le32 rsv;
__le32 param[4];
__le32 param[HCLGE_CFG_CMD_CNT];
};
#define HCLGE_MAC_MODE 0x0
......@@ -788,20 +792,24 @@ struct hclge_vlan_filter_ctrl_cmd {
u8 rsv2[19];
};
#define HCLGE_VLAN_OFFSET_BITMAP 20
struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset;
u8 vlan_cfg;
u8 rsv[2];
u8 vlan_offset_bitmap[20];
u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
};
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vlan_filter_vf_cfg_cmd {
__le16 vlan_id;
u8 resp_code;
u8 rsv;
u8 vlan_cfg;
u8 rsv1[3];
u8 vf_bitmap[16];
u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
#define HCLGE_ACCEPT_TAG1_B 0
......@@ -811,6 +819,7 @@ struct hclge_vlan_filter_vf_cfg_cmd {
#define HCLGE_CFG_NIC_ROCE_SEL_B 4
#define HCLGE_ACCEPT_TAG2_B 5
#define HCLGE_ACCEPT_UNTAG2_B 6
#define HCLGE_VF_NUM_PER_BYTE 8
struct hclge_vport_vtag_tx_cfg_cmd {
u8 vport_vlan_cfg;
......@@ -818,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd {
u8 rsv1[2];
__le16 def_vlan_tag1;
__le16 def_vlan_tag2;
u8 vf_bitmap[8];
u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
......@@ -830,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd {
u8 vport_vlan_cfg;
u8 vf_offset;
u8 rsv1[6];
u8 vf_bitmap[8];
u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
u8 rsv2[8];
};
......@@ -912,6 +921,13 @@ struct hclge_serdes_lb_cmd {
u8 rsv[21];
};
struct hclge_dbg_reg_common_msg {
int msg_num;
int offset;
enum hclge_opcode_type cmd;
};
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
......
......@@ -11,7 +11,7 @@
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
struct hclge_desc desc[4];
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc;
int index;
int ret;
......@@ -54,8 +54,8 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
struct hclge_dbg_dfx_message *dfx_message,
const char *cmd_buf, int msg_num,
int offset, enum hclge_opcode_type cmd)
const char *cmd_buf,
struct hclge_dbg_reg_common_msg *reg_msg)
{
struct hclge_desc *desc_src;
struct hclge_desc *desc;
......@@ -68,10 +68,10 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
ret = kstrtouint(cmd_buf, 0, &index);
index = (ret != 0) ? 0 : index;
bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
if (bd_num <= 0) {
dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
offset, bd_num);
reg_msg->offset, bd_num);
return;
}
......@@ -83,7 +83,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
if (ret != HCLGE_CMD_EXEC_SUCCESS) {
kfree(desc_src);
return;
......@@ -91,8 +91,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
entries_per_desc = ARRAY_SIZE(desc->data);
max = (bd_num * entries_per_desc) <= msg_num ?
(bd_num * entries_per_desc) : msg_num;
max = (bd_num * entries_per_desc, reg_msg->msg_num);
desc = desc_src;
for (i = 0; i < max; i++) {
......@@ -221,92 +220,125 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
int msg_num;
struct hclge_dbg_reg_common_msg reg_msg;
if (strncmp(cmd_buf, "bios common", 11) == 0) {
msg_num = sizeof(hclge_dbg_bios_common_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_bios_common_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_BIOS_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
&cmd_buf[sizeof("bios common")],
msg_num, HCLGE_DBG_DFX_BIOS_OFFSET,
HCLGE_OPC_DFX_BIOS_COMMON_REG);
&reg_msg);
} else if (strncmp(cmd_buf, "ssu", 3) == 0) {
msg_num = sizeof(hclge_dbg_ssu_reg_0) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_ssu_reg_0) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_SSU_0_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_SSU_REG_0;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
&cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_0_OFFSET,
HCLGE_OPC_DFX_SSU_REG_0);
&cmd_buf[sizeof("ssu")],
&reg_msg);
reg_msg.msg_num = sizeof(hclge_dbg_ssu_reg_1) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_SSU_1_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_SSU_REG_1;
msg_num = sizeof(hclge_dbg_ssu_reg_1) /
sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
&cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_1_OFFSET,
HCLGE_OPC_DFX_SSU_REG_1);
&cmd_buf[sizeof("ssu")],
&reg_msg);
reg_msg.msg_num = sizeof(hclge_dbg_ssu_reg_2) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_SSU_2_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_SSU_REG_2;
msg_num = sizeof(hclge_dbg_ssu_reg_2) /
sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
&cmd_buf[sizeof("ssu")], msg_num,
HCLGE_DBG_DFX_SSU_2_OFFSET,
HCLGE_OPC_DFX_SSU_REG_2);
&cmd_buf[sizeof("ssu")],
&reg_msg);
} else if (strncmp(cmd_buf, "igu egu", 7) == 0) {
msg_num = sizeof(hclge_dbg_igu_egu_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_igu_egu_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_IGU_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_IGU_EGU_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
&cmd_buf[sizeof("igu egu")], msg_num,
HCLGE_DBG_DFX_IGU_OFFSET,
HCLGE_OPC_DFX_IGU_EGU_REG);
&cmd_buf[sizeof("igu egu")],
&reg_msg);
} else if (strncmp(cmd_buf, "rpu", 3) == 0) {
msg_num = sizeof(hclge_dbg_rpu_reg_0) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_rpu_reg_0) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_RPU_0_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_RPU_REG_0;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
&cmd_buf[sizeof("rpu")], msg_num,
HCLGE_DBG_DFX_RPU_0_OFFSET,
HCLGE_OPC_DFX_RPU_REG_0);
&cmd_buf[sizeof("rpu")],
&reg_msg);
reg_msg.msg_num = sizeof(hclge_dbg_rpu_reg_1) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_RPU_1_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_RPU_REG_1;
msg_num = sizeof(hclge_dbg_rpu_reg_1) /
sizeof(struct hclge_dbg_dfx_message);
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
&cmd_buf[sizeof("rpu")], msg_num,
HCLGE_DBG_DFX_RPU_1_OFFSET,
HCLGE_OPC_DFX_RPU_REG_1);
&cmd_buf[sizeof("rpu")],
&reg_msg);
} else if (strncmp(cmd_buf, "ncsi", 4) == 0) {
msg_num = sizeof(hclge_dbg_ncsi_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_ncsi_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_NCSI_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_NCSI_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
&cmd_buf[sizeof("ncsi")], msg_num,
HCLGE_DBG_DFX_NCSI_OFFSET,
HCLGE_OPC_DFX_NCSI_REG);
&cmd_buf[sizeof("ncsi")],
&reg_msg);
} else if (strncmp(cmd_buf, "rtc", 3) == 0) {
msg_num = sizeof(hclge_dbg_rtc_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_rtc_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_RTC_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_RTC_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
&cmd_buf[sizeof("rtc")], msg_num,
HCLGE_DBG_DFX_RTC_OFFSET,
HCLGE_OPC_DFX_RTC_REG);
&cmd_buf[sizeof("rtc")],
&reg_msg);
} else if (strncmp(cmd_buf, "ppp", 3) == 0) {
msg_num = sizeof(hclge_dbg_ppp_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_ppp_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_PPP_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_PPP_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
&cmd_buf[sizeof("ppp")], msg_num,
HCLGE_DBG_DFX_PPP_OFFSET,
HCLGE_OPC_DFX_PPP_REG);
&cmd_buf[sizeof("ppp")],
&reg_msg);
} else if (strncmp(cmd_buf, "rcb", 3) == 0) {
msg_num = sizeof(hclge_dbg_rcb_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_rcb_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_RCB_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_RCB_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
&cmd_buf[sizeof("rcb")], msg_num,
HCLGE_DBG_DFX_RCB_OFFSET,
HCLGE_OPC_DFX_RCB_REG);
&cmd_buf[sizeof("rcb")],
&reg_msg);
} else if (strncmp(cmd_buf, "tqp", 3) == 0) {
msg_num = sizeof(hclge_dbg_tqp_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.msg_num = sizeof(hclge_dbg_tqp_reg) /
sizeof(struct hclge_dbg_dfx_message);
reg_msg.offset = HCLGE_DBG_DFX_TQP_OFFSET;
reg_msg.cmd = HCLGE_OPC_DFX_TQP_REG;
hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
&cmd_buf[sizeof("tqp")], msg_num,
HCLGE_DBG_DFX_TQP_OFFSET,
HCLGE_OPC_DFX_TQP_REG);
&cmd_buf[sizeof("tqp")],
&reg_msg);
} else if (strncmp(cmd_buf, "dcb", 3) == 0) {
hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
} else {
......
......@@ -64,9 +64,11 @@ struct hclge_dbg_bitmap_cmd {
};
};
#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
char message[60];
char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
};
#pragma pack()
......
......@@ -7491,7 +7491,6 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan,
__be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
......@@ -9645,7 +9644,7 @@ static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
u32 type_num)
{
u32 entries_per_desc, desc_index, index, offset, i;
struct hclge_desc desc[4];
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int ret;
ret = hclge_query_bd_num_cmd_send(hdev, desc);
......
......@@ -142,6 +142,8 @@
#define HCLGE_PHY_MDIX_STATUS_B 6
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
#define HCLGE_GET_DFX_REG_TYPE_CNT 4
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_VF_NUM_PER_BYTE 8
......
......@@ -404,8 +404,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
u32 shapping_para;
u8 ir_u, ir_b, ir_s;
u32 shapping_para;
int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
......
......@@ -1714,7 +1714,7 @@ static void hclgevf_service_timer(struct timer_list *t)
static void hclgevf_reset_service_task(struct work_struct *work)
{
#define HCLGEVF_RESET_ATTEMPTS_CNT 3
#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
struct hclgevf_dev *hdev =
container_of(work, struct hclgevf_dev, rst_service_task);
int ret;
......@@ -1767,7 +1767,7 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* We cannot do much for 2. but to check first we can try reset
* our PCIe + stack and see if it alleviates the problem.
*/
if (hdev->reset_attempts > HCLGEVF_RESET_ATTEMPTS_CNT) {
if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
......
......@@ -71,7 +71,7 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_STATS_TIMER_INTERVAL 36
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册