提交 3a873683 编写于 作者: L liaoguojia 提交者: Xie XiuQi

net: hns3: Modify all the mismatch to the specifications which check in reviewbot_c.

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Using reviewbot_c to check the code within hns3 module,
we found some mismatch to the rule set by tools.
Now we fix all the mismatch, including Comments format,
Alignment, The devil figure, etc.
Signed-off-by: Nliaoguojia <liaoguojia@huawei.com>
Reviewed-by: Nlipeng <lipeng321@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 2bcb9bfd
......@@ -48,7 +48,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
HCLGE_MBX_NCSI_ERROR = 202, /* (M7 -> PF) receive a NCSI error */
HCLGE_MBX_NCSI_ERROR = 202, /* (M7 -> PF) receive a NCSI error */
};
/* below are per-VF mac-vlan subcodes */
......
......@@ -104,7 +104,7 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
int ret = 0;
int ret;
if (!client)
return -ENODEV;
......@@ -179,7 +179,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
int ret = 0;
int ret;
if (!ae_algo)
return;
......@@ -273,7 +273,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
int ret = 0;
int ret;
if (!ae_dev)
return -ENODEV;
......
......@@ -60,10 +60,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
......@@ -631,7 +631,7 @@ struct hnae3_handle {
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the status for this handle*/
u64 flags; /* Indicate the status for this handle */
union {
struct net_device *netdev; /* first member */
......
......@@ -226,9 +226,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*
* Default: enable interrupt coalescing self-adaptive and GL
*/
/* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
......@@ -1019,7 +1019,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
if (handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_DISABLE){
HNAE3_PORT_BASE_VLAN_DISABLE) {
hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B,
1);
*out_vtag = vlan_tag;
......
......@@ -301,7 +301,7 @@ struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
/* priv data for the desc, e.g. skb when use with ip stack*/
/* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
......
......@@ -140,6 +140,7 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
static void hns3_lp_setup_skb(struct sk_buff *skb)
{
#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
struct net_device *ndev = skb->dev;
unsigned char *packet;
struct ethhdr *ethh;
......@@ -161,7 +162,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
ethh->h_dest[5] += 0x1f;
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
......@@ -624,7 +625,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
&cmd->base.speed,
&cmd->base.duplex);
/* 2.get link mode*/
/* 2.get link mode */
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
......@@ -740,7 +741,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
int ret = 0;
int ret;
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
......
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 Hisilicon Limited. */
/* This must be outside ifdef _HNS3_TRACE_H */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hns3
......@@ -55,12 +56,12 @@ DEFINE_EVENT(hns3_skb_template, hns3_over_8bd,
TP_ARGS(skb));
DEFINE_EVENT(hns3_skb_template, hns3_gro,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb));
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb));
DEFINE_EVENT(hns3_skb_template, hns3_tso,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb));
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb));
TRACE_EVENT(hns3_tx_desc,
TP_PROTO(struct hns3_enet_ring *ring),
......
......@@ -310,11 +310,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
} while (timeout < hw->cmq.tx_timeout);
}
if (!complete) {
if (!complete)
retval = -EBADE;
} else {
else
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
}
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
......
......@@ -5,6 +5,7 @@
#define __HCLGE_CMD_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000
......@@ -752,7 +753,7 @@ struct hclge_mac_vlan_idx_rd_cmd {
u8 rsv0;
u8 resp_code;
__le16 vlan_tag;
u8 mac_add[6];
u8 mac_addr[ETH_ALEN];
__le16 port;
u8 entry_type;
u8 mc_mac_en;
......@@ -766,7 +767,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
u8 mac_add[6];
u8 mac_addr[ETH_ALEN];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
......
......@@ -99,7 +99,8 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
desc = desc_src;
for (i = 0; i < max; i++) {
((i > 0) && ((i % entries_per_desc) == 0)) ? desc++ : desc;
if (i > 0 && (i % entries_per_desc) == 0)
desc++;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
......@@ -733,7 +734,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
......@@ -755,7 +757,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
i + HCLGE_TC_NUM_ONE_DESC,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
......@@ -825,9 +828,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
req0->index, req0->mac_add[0], req0->mac_add[1],
req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
req0->mac_add[5]);
req0->index, req0->mac_addr[0], req0->mac_addr[1],
req0->mac_addr[2], req0->mac_addr[3],
req0->mac_addr[4], req0->mac_addr[5]);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
......
......@@ -2860,9 +2860,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
*
* check for vector0 reset event sources
*/
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
......@@ -3099,7 +3099,7 @@ int hclge_notify_client(struct hclge_dev *hdev,
static int hclge_notify_roce_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->roce_client;
int ret = 0;
......@@ -3109,7 +3109,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
return 0;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
return -EOPNOTSUPP;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
struct hnae3_handle *handle = &hdev->vport[i].roce;
......@@ -3124,7 +3124,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
}
return ret;
}
}
static int hclge_reset_wait(struct hclge_dev *hdev)
......@@ -7470,7 +7470,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
bool is_kill, u16 vlan, u8 qos,
bool is_kill, u16 vlan,
__be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
......@@ -7581,7 +7581,7 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
}
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id, u8 qos,
u16 vport_id, u16 vlan_id,
bool is_kill)
{
u16 vport_idx, vport_num = 0;
......@@ -7591,7 +7591,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return 0;
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
0, proto);
proto);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set %d vport vlan filter config fail, ret =%d.\n",
......@@ -7654,7 +7654,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
vlan_id, 0, is_kill);
vlan_id, is_kill);
writen_to_tbl = true;
}
......@@ -7920,7 +7920,7 @@ static void hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, 0,
vlan->vlan_id,
false);
vlan->hd_tbl_status = true;
......@@ -7939,7 +7939,7 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
vlan_id, 0,
vlan_id,
true);
list_del(&vlan->node);
......@@ -7959,7 +7959,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, 0,
vlan->vlan_id,
true);
vlan->hd_tbl_status = false;
......@@ -8006,7 +8006,7 @@ void hclge_restore_vlan_table(struct hnae3_handle *handle)
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
vport->vport_id, vlan_id, qos,
vport->vport_id, vlan_id,
false);
continue;
}
......@@ -8016,7 +8016,7 @@ void hclge_restore_vlan_table(struct hnae3_handle *handle)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
vlan->vlan_id, 0,
vlan->vlan_id,
false);
}
}
......@@ -8056,14 +8056,14 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
htons(new_info->vlan_proto),
vport->vport_id,
new_info->vlan_tag,
new_info->qos, false);
false);
} else {
ret = hclge_set_vlan_filter_hw(hdev,
htons(old_info->vlan_proto),
vport->vport_id,
old_info->vlan_tag,
old_info->qos, true);
true);
if (ret)
return ret;
......@@ -8090,10 +8090,10 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
hclge_set_vlan_filter_hw(hdev, old_vlan_info->vlan_proto,
vport->vport_id,
old_vlan_info->vlan_tag,
old_vlan_info->qos, true);
true);
hclge_set_vlan_filter_hw(hdev, vlan_info->vlan_proto,
vport->vport_id, vlan_info->vlan_tag,
vlan_info->qos, false);
false);
goto update;
}
......@@ -8204,7 +8204,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
while (vlan_id != VLAN_N_VID) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
0, true);
true);
if (ret && ret != -EINVAL)
return;
......
......@@ -506,7 +506,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
hclge_reset_vf_queue(vport, queue_id);
/* send response msg to VF after queue reset complete*/
/* send response msg to VF after queue reset complete */
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
......@@ -551,7 +551,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
sizeof(resp_data));
}
static int hclge_get_rss_key(struct hclge_vport *vport,
......@@ -773,8 +774,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_NCSI_ERROR:
ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
ae_dev->ops->set_default_reset_request(ae_dev,
HNAE3_GLOBAL_RESET);
dev_warn(&hdev->pdev->dev,
"requesting reset due to NCSI error\n");
ae_dev->ops->reset_event(hdev->pdev, NULL);
break;
case HCLGE_MBX_PUSH_LINK_STATUS:
......
......@@ -404,7 +404,7 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
u32 shapping_para = 0;
u32 shapping_para;
u8 ir_u, ir_b, ir_s;
int ret;
......
......@@ -44,7 +44,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
int clean = 0;
int clean;
u32 head;
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
......
......@@ -1706,6 +1706,7 @@ static void hclgevf_service_timer(struct timer_list *t)
static void hclgevf_reset_service_task(struct work_struct *work)
{
#define HCLGEVF_RESET_ATTEMPTS_CNT 3
struct hclgevf_dev *hdev =
container_of(work, struct hclgevf_dev, rst_service_task);
int ret;
......@@ -1750,16 +1751,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* 1b and 2. cases but we will not get any intimation about 1a
* from PF as cmdq would be in unreliable state i.e. mailbox
* communication between PF and VF would be broken.
*/
/* if we are never geting into pending state it means either:
*
* if we are never geting into pending state it means either:
* 1. PF is not receiving our request which could be due to IMP
* reset
* 2. PF is screwed
* We cannot do much for 2. but to check first we can try reset
* our PCIe + stack and see if it alleviates the problem.
*/
if (hdev->reset_attempts > 3) {
if (hdev->reset_attempts > HCLGEVF_RESET_ATTEMPTS_CNT) {
/* prepare for full reset of stack + pcie interface */
set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
......
......@@ -71,7 +71,7 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_STATS_TIMER_INTERVAL (36)
#define HCLGEVF_STATS_TIMER_INTERVAL 36
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
......
......@@ -316,7 +316,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_MODE:
idx = (u8)le16_to_cpu(msg_q[1]);
if (idx)
memcpy(&hdev->hw.mac.supported , &msg_q[2],
memcpy(&hdev->hw.mac.supported, &msg_q[2],
sizeof(unsigned long));
else
memcpy(&hdev->hw.mac.advertising, &msg_q[2],
......
......@@ -182,7 +182,8 @@ struct tc_mqprio_qopt_offload {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 \
- is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册