提交 9228d6c1 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

driver: hns3: update hns3 driver from driver team

driver inclusion
category: feature

-----------------------------------------

Based on dde21a4eedd17955f173f055e2d702dadb1e70ea
("performance optimizations")
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 45cfdc31
......@@ -41,6 +41,11 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
};
/* below are per-VF mac-vlan subcodes */
......@@ -58,10 +63,12 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port base vlan configuration */
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,/* get port base vlan state */
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
......
......@@ -146,6 +146,13 @@ enum hnae3_flr_state {
HNAE3_FLR_DONE,
};
enum hnae3_port_base_vlan_state {
HNAE3_PORT_BASE_VLAN_DISABLE,
HNAE3_PORT_BASE_VLAN_ENABLE,
HNAE3_PORT_BASE_VLAN_MODIFY,
HNAE3_PORT_BASE_VLAN_NOCHANGE,
};
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
......@@ -459,7 +466,7 @@ struct hnae3_ae_ops {
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
int (*set_gro_en)(struct hnae3_handle *handle, int enable);
int (*set_gro_en)(struct hnae3_handle *handle, bool enable);
void (*enable_timer_task)(struct hnae3_handle *handle, bool enable);
int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
......@@ -582,6 +589,8 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
enum hnae3_port_base_vlan_state port_base_vlan_state;
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
......
......@@ -184,6 +184,8 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
#define HNS3_TX_LAST_SIZE_M 0xffff
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
......@@ -191,6 +193,7 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_BD_SIZE_OFFSET 16
#define HNS3_MAX_BD_PER_FRAG 8
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
......@@ -441,11 +444,8 @@ struct hns3_nic_ring_data {
};
struct hns3_nic_ops {
int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
int size, int frag_end, enum hns_desc_type type);
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
};
enum hns3_flow_level_range {
......
......@@ -470,8 +470,15 @@ static void hns3_get_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hns3_nic_priv *priv = h->priv;
u64 *p = data;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
hns3_nic_resetting(netdev)) {
netdev_err(netdev, "dev resetting!");
return;
}
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
netdev_err(netdev, "could not get any statistics\n");
return;
......@@ -1077,7 +1084,7 @@ static void hns3_get_regs(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo->ops->get_regs)
if (!h->ae_algo->ops->get_regs || !data)
return;
h->ae_algo->ops->get_regs(h, &cmd->version, data);
......@@ -1096,11 +1103,13 @@ static int hns3_set_phys_id(struct net_device *netdev,
struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_channels = hns3_get_channels,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
......@@ -1108,10 +1117,8 @@ struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce,
.get_link = hns3_get_link,
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
};
......@@ -1127,6 +1134,8 @@ struct ethtool_ops hns3_ethtool_ops = {
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
.get_rxnfc = hns3_get_rxnfc,
.set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
......@@ -1136,8 +1145,6 @@ struct ethtool_ops hns3_ethtool_ops = {
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
.get_coalesce = hns3_get_coalesce,
.set_coalesce = hns3_set_coalesce,
.get_regs_len = hns3_get_regs_len,
......
......@@ -184,6 +184,43 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
static int hclge_cmd_check_retval(struct hclge_hw *hw,
struct hclge_desc *desc,
int num, int *ntc)
{
struct hclge_desc *desc_to_use;
u16 opcode, desc_ret;
int handle = 0;
int retval = 0;
opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[*ntc];
desc[handle] = *desc_to_use;
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else if (desc_ret == HCLGE_CMD_NO_AUTH)
retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP;
else
retval = -EIO;
hw->cmq.last_status = desc_ret;
(*ntc)++;
handle++;
if (*ntc == hw->cmq.csq.desc_num)
*ntc = 0;
}
return retval;
}
/**
* hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct
......@@ -201,7 +238,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
u32 timeout = 0;
int handle = 0;
int retval = 0;
u16 opcode, desc_ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
......@@ -217,7 +253,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle];
......@@ -248,31 +283,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
if (!complete) {
retval = -EAGAIN;
} else {
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else if (desc_ret == HCLGE_CMD_NO_AUTH)
retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP;
else
retval = -EIO;
hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
ntc = 0;
}
retval = hclge_cmd_check_retval(hw, desc, num, &ntc);
}
/* Clean the command send queue */
......
......@@ -523,7 +523,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT;
......
......@@ -482,7 +482,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{true, "CGE_IGU_AFIFO_DFX_1"},
{true, "CGE_EGU_AFIFO_DFX_0"},
{true, "CGE_IGU_AFIFO_DFX_1"},
{true, "CGE_EGU_AFIFO_DFX_1"},
{false, "Reserved"},
{false, "Reserved"},
{false, "Reserved"},
......
......@@ -3,7 +3,7 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
......@@ -16,7 +16,7 @@ static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
......@@ -36,7 +36,7 @@ static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
......@@ -46,19 +46,19 @@ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_int[] = {
const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow" },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
......@@ -68,12 +68,12 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
const struct hclge_hw_error hclge_ncsi_err_int[] = {
{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
......@@ -112,13 +112,13 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
......@@ -128,7 +128,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tm_sch_rint[] = {
const struct hclge_hw_error hclge_tm_sch_rint[] = {
{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
......@@ -173,7 +173,7 @@ static const struct hclge_hw_error hclge_tm_sch_rint[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
......@@ -195,7 +195,7 @@ static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
......@@ -210,7 +210,7 @@ static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
......@@ -228,7 +228,7 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
......@@ -251,7 +251,7 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
......@@ -259,7 +259,7 @@ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
{ .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
......@@ -269,7 +269,7 @@ static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
const struct hclge_hw_error hclge_ssu_com_err_int[] = {
{ .int_msk = BIT(0), .msg = "buf_sum_err" },
{ .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
{ .int_msk = BIT(2), .msg = "ppp_mbid_err" },
......@@ -283,7 +283,43 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
{ .int_msk = BIT(0), .msg = "ssu_mem0_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "ssu_mem1_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "ssu_mem2_ecc_mbit_err" },
{ .int_msk = BIT(3), .msg = "ssu_mem3_ecc_mbit_err" },
{ .int_msk = BIT(4), .msg = "ssu_mem4_ecc_mbit_err" },
{ .int_msk = BIT(5), .msg = "ssu_mem5_ecc_mbit_err" },
{ .int_msk = BIT(6), .msg = "ssu_mem6_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "ssu_mem7_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "ssu_mem8_ecc_mbit_err" },
{ .int_msk = BIT(9), .msg = "ssu_mem9_ecc_mbit_err" },
{ .int_msk = BIT(10), .msg = "ssu_mem10_ecc_mbit_err" },
{ .int_msk = BIT(11), .msg = "ssu_mem11_ecc_mbit_err" },
{ .int_msk = BIT(12), .msg = "ssu_mem12_ecc_mbit_err" },
{ .int_msk = BIT(13), .msg = "ssu_mem13_ecc_mbit_err" },
{ .int_msk = BIT(14), .msg = "ssu_mem14_ecc_mbit_err" },
{ .int_msk = BIT(15), .msg = "ssu_mem15_ecc_mbit_err" },
{ .int_msk = BIT(16), .msg = "ssu_mem16_ecc_mbit_err" },
{ .int_msk = BIT(17), .msg = "ssu_mem17_ecc_mbit_err" },
{ .int_msk = BIT(18), .msg = "ssu_mem18_ecc_mbit_err" },
{ .int_msk = BIT(19), .msg = "ssu_mem19_ecc_mbit_err" },
{ .int_msk = BIT(20), .msg = "ssu_mem20_ecc_mbit_err" },
{ .int_msk = BIT(21), .msg = "ssu_mem21_ecc_mbit_err" },
{ .int_msk = BIT(22), .msg = "ssu_mem22_ecc_mbit_err" },
{ .int_msk = BIT(23), .msg = "ssu_mem23_ecc_mbit_err" },
{ .int_msk = BIT(24), .msg = "ssu_mem24_ecc_mbit_err" },
{ .int_msk = BIT(25), .msg = "ssu_mem25_ecc_mbit_err" },
{ .int_msk = BIT(26), .msg = "ssu_mem26_ecc_mbit_err" },
{ .int_msk = BIT(27), .msg = "ssu_mem27_ecc_mbit_err" },
{ .int_msk = BIT(28), .msg = "ssu_mem28_ecc_mbit_err" },
{ .int_msk = BIT(29), .msg = "ssu_mem29_ecc_mbit_err" },
{ .int_msk = BIT(30), .msg = "ssu_mem30_ecc_mbit_err" },
{ .int_msk = BIT(31), .msg = "ssu_mem31_ecc_mbit_err" },
{ /* sentinel */ }
};
const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
......@@ -300,7 +336,7 @@ static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
{ .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
{ .int_msk = BIT(1), .msg = "ig_host_inf_int" },
{ .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
......@@ -328,7 +364,7 @@ static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
......@@ -336,14 +372,14 @@ static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
{ .int_msk = BIT(9), .msg = "low_water_line_err_port" },
{ .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
{ .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
{ .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
......@@ -367,9 +403,9 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
static void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts)
void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts)
{
while (err->msg) {
if (err->int_msk & err_sts)
......@@ -379,6 +415,49 @@ static void hclge_log_error(struct device *dev, char *reg,
}
}
int hclge_query_error(struct hclge_dev *hdev, struct hclge_desc *desc,
enum hclge_opcode_type opcode, int num)
{
hclge_cmd_setup_basic_desc(&desc[0], opcode, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
return hclge_cmd_send(&hdev->hw, &desc[0], num);
}
int hclge_clear_error(struct hclge_dev *hdev, struct hclge_desc *desc, int num)
{
hclge_cmd_reuse_desc(&desc[0], false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
return hclge_cmd_send(&hdev->hw, &desc[0], num);
}
struct hclge_desc *hclge_query_bd_num(struct hclge_dev *hdev,
struct hclge_bd_num *bd_num,
enum hclge_opcode_type opcode)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
int ret;
/* query the number of registers in the RAS int status */
hclge_cmd_setup_basic_desc(&desc_bd, opcode, true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
dev_err(dev, "fail(%d) to query int status bd num\n", ret);
return NULL;
}
bd_num->mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
bd_num->pf_bd_num = le32_to_cpu(desc_bd.data[1]);
bd_num->max_bd_num = max_t(u32, le32_to_cpu(desc_bd.data[0]),
le32_to_cpu(desc_bd.data[1]));
desc = kcalloc(bd_num->max_bd_num, sizeof(struct hclge_desc),
GFP_KERNEL);
return desc;
}
/* hclge_cmd_query_error: read the error information
* @hdev: pointer to struct hclge_dev
* @desc: descriptor for describing the command
......@@ -793,11 +872,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int ret;
/* query all main PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
ret = hclge_query_error(hdev, &desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
num);
if (ret) {
dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
return ret;
......@@ -841,13 +917,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n");
hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
&hclge_ssu_mem_ecc_err_int[0], status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n");
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
......@@ -935,10 +1013,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
}
/* clear all main PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
ret = hclge_clear_error(hdev, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
......@@ -964,11 +1039,8 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
int ret;
/* query all PF RAS errors */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
ret = hclge_query_error(hdev, &desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
num);
if (ret) {
dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
return ret;
......@@ -1003,11 +1075,15 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
&hclge_igu_egu_tnl_int[0], status);
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
if (status)
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0], status);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
/* clear all PF RAS errors */
ret = hclge_clear_error(hdev, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
......@@ -1016,40 +1092,71 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc desc_bd;
struct hclge_bd_num bd_num;
struct hclge_desc *desc;
int ret;
/* query the number of registers in the RAS int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
true);
ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
desc = hclge_query_bd_num(hdev, &bd_num,
HCLGE_QUERY_RAS_INT_STS_BD_NUM);
if (!desc)
return -ENOMEM;
/* handle all main PF RAS errors */
ret = hclge_handle_mpf_ras_error(hdev, desc, bd_num.mpf_bd_num);
if (ret) {
dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
kfree(desc);
return ret;
}
mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
pf_bd_num = le32_to_cpu(desc_bd.data[1]);
bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
memset(desc, 0, bd_num.max_bd_num * sizeof(struct hclge_desc));
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
/* handle all PF RAS errors */
ret = hclge_handle_pf_ras_error(hdev, desc, bd_num.pf_bd_num);
kfree(desc);
return ret;
}
int hclge_clear_all_ras_errors(struct hclge_dev *hdev)
{
struct hclge_bd_num bd_num;
struct hclge_desc *desc;
int ret;
/* query the number of registers in the RAS int status */
desc = hclge_query_bd_num(hdev, &bd_num,
HCLGE_QUERY_RAS_INT_STS_BD_NUM);
if (!desc)
return -ENOMEM;
/* handle all main PF RAS errors */
ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
/* query all main PF RAS errors */
ret = hclge_query_error(hdev, desc, HCLGE_QUERY_CLEAR_MPF_RAS_INT,
bd_num.mpf_bd_num);
if (ret) {
kfree(desc);
return ret;
}
memset(desc, 0, bd_num * sizeof(struct hclge_desc));
/* handle all PF RAS errors */
ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
kfree(desc);
/* clear all main PF RAS errors */
ret = hclge_clear_error(hdev, desc, bd_num.mpf_bd_num);
if (ret) {
kfree(desc);
return ret;
}
memset(desc, 0, bd_num.max_bd_num * sizeof(struct hclge_desc));
/* query all PF RAS errors */
ret = hclge_query_error(hdev, desc, HCLGE_QUERY_CLEAR_PF_RAS_INT,
bd_num.pf_bd_num);
if (ret) {
kfree(desc);
return ret;
}
/* clear all PF RAS errors */
ret = hclge_clear_error(hdev, desc, bd_num.pf_bd_num);
kfree(desc);
return ret;
}
......@@ -1184,7 +1291,7 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
return ret;
}
static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
......
......@@ -79,6 +79,7 @@
#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
......@@ -113,7 +114,50 @@ struct hclge_hw_error {
const char *msg;
};
struct hclge_bd_num {
u32 mpf_bd_num;
u32 pf_bd_num;
u32 max_bd_num;
};
extern const struct hclge_hw_error hclge_imp_tcm_ecc_int[];
extern const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[];
extern const struct hclge_hw_error hclge_tqp_int_ecc_int[];
extern const struct hclge_hw_error hclge_msix_sram_ecc_int[];
extern const struct hclge_hw_error hclge_igu_int[];
extern const struct hclge_hw_error hclge_igu_egu_tnl_int[];
extern const struct hclge_hw_error hclge_ncsi_err_int[];
extern const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[];
extern const struct hclge_hw_error hclge_ppp_pf_abnormal_int[];
extern const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[];
extern const struct hclge_hw_error hclge_tm_sch_rint[];
extern const struct hclge_hw_error hclge_qcn_fifo_rint[];
extern const struct hclge_hw_error hclge_qcn_ecc_rint[];
extern const struct hclge_hw_error hclge_mac_afifo_tnl_int[];
extern const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[];
extern const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[];
extern const struct hclge_hw_error hclge_ppu_pf_abnormal_int[];
extern const struct hclge_hw_error hclge_ssu_com_err_int[];
extern const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[];
extern const struct hclge_hw_error hclge_ssu_port_based_err_int[];
extern const struct hclge_hw_error hclge_ssu_fifo_overflow_int[];
extern const struct hclge_hw_error hclge_ssu_ets_tcg_int[];
extern const struct hclge_hw_error hclge_ssu_port_based_pf_int[];
extern const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[];
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
int hclge_clear_all_ras_errors(struct hclge_dev *hdev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
void hclge_handle_hw_msix_error(struct hclge_dev *hdev);
int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev);
void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts);
int hclge_query_error(struct hclge_dev *hdev, struct hclge_desc *desc,
enum hclge_opcode_type opcode, int num);
int hclge_clear_error(struct hclge_dev *hdev, struct hclge_desc *desc, int num);
struct hclge_desc *hclge_query_bd_num(struct hclge_dev *hdev,
struct hclge_bd_num *bd_num,
enum hclge_opcode_type opcode);
#endif
......@@ -579,6 +579,24 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
struct hclge_vport_mac_addr_cfg {
struct list_head node;
int vport_id;
int hd_tbl_status;
u8 mac_addr[ETH_ALEN];
};
enum HCLGE_MAC_ADDR_TYPE {
HCLGE_MAC_ADDR_UC,
HCLGE_MAC_ADDR_MC
};
struct hclge_vport_vlan_cfg {
struct list_head node;
int hd_tbl_status;
u16 vlan_id;
};
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
......@@ -719,6 +737,10 @@ struct hclge_dev {
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
};
/* VPort level vlan tag configuration for TX direction */
......@@ -735,10 +757,11 @@ struct hclge_tx_vtag_cfg {
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
bool strip_tag1_en; /* Whether strip inner vlan tag */
bool strip_tag2_en; /* Whether strip outer vlan tag */
bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
u8 strip_tag1_en; /* Whether strip inner vlan tag */
u8 strip_tag2_en; /* Whether strip outer vlan tag */
u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_rss_tuple_cfg {
......@@ -757,6 +780,17 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAX
};
struct hclge_vlan_info {
u16 vlan_proto; /* sofar support 802.1Q only */
u16 qos;
u16 vlan_tag;
};
struct hclge_port_base_vlan_config {
u16 state;
struct hclge_vlan_info vlan_info;
};
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
......@@ -773,6 +807,8 @@ struct hclge_vport {
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
struct list_head vlan_list; /* Store VF vlan table */
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
......@@ -839,4 +875,17 @@ int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
bool is_write_tbl,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto);
#endif
......@@ -224,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
hclge_rm_uc_addr_common(vport, old_addr);
status = hclge_add_uc_addr_common(vport, mac_addr);
if (status)
if (status) {
hclge_add_uc_addr_common(vport, old_addr);
} else {
hclge_rm_vport_mac_table(vport, old_addr,
false, HCLGE_MAC_ADDR_UC);
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
}
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
status = hclge_add_uc_addr_common(vport, mac_addr);
if (!status)
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
status = hclge_rm_uc_addr_common(vport, mac_addr);
if (!status)
hclge_rm_vport_mac_table(vport, mac_addr,
false, HCLGE_MAC_ADDR_UC);
} else {
dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %d\n",
......@@ -255,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
status = hclge_add_mc_addr_common(vport, mac_addr);
if (!status)
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_MC);
} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr);
if (!status)
hclge_rm_vport_mac_table(vport, mac_addr,
false, HCLGE_MAC_ADDR_MC);
} else {
dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %d\n",
......@@ -271,9 +289,24 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return 0;
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto)
{
#define MSG_DATA_SIZE 8
u8 msg_data[MSG_DATA_SIZE];
memcpy(&msg_data[0], &state, sizeof(u16));
memcpy(&msg_data[2], &vlan_tag, sizeof(u16));
memcpy(&msg_data[4], &qos, sizeof(u16));
memcpy(&msg_data[6], &vlan_proto, sizeof(u16));
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HLCGE_MBX_PUSH_VLAN_INFO, vfid);
}
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
int status = 0;
......@@ -292,11 +325,24 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
bool en = mbx_req->msg[2] ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
} else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
struct hclge_vlan_info vlan_info;
u16 state;
memcpy(&state, &mbx_req->msg[2], sizeof(u16));
memcpy(&vlan_info.vlan_tag, &mbx_req->msg[4], sizeof(u16));
memcpy(&vlan_info.qos, &mbx_req->msg[6], sizeof(u16));
memcpy(&vlan_info.vlan_proto, &mbx_req->msg[8], sizeof(u16));
status = hclge_update_port_base_vlan_cfg(vport, state,
&vlan_info);
} else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
u8 state;
state = vport->port_base_vlan_cfg.state;
status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
sizeof(u8));
}
if (gen_resp)
status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
return status;
}
......@@ -369,16 +415,19 @@ static int hclge_get_link_info(struct hclge_vport *vport,
{
struct hclge_dev *hdev = vport->back;
u16 link_status;
u8 msg_data[8];
u8 msg_data[10];
u16 media_type;
u8 dest_vfid;
u16 duplex;
/* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex;
media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */
......@@ -386,6 +435,29 @@ static int hclge_get_link_info(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
}
static void hclge_get_vf_link_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_SUPPORTED 1
struct hclge_dev *hdev = vport->back;
unsigned long advertising;
unsigned long supported;
unsigned long send_data;
u8 msg_data[10];
u8 dest_vfid;
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
msg_data[0] = mbx_req->msg[2];
send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
......@@ -443,6 +515,24 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
}
static int hclge_get_rss_key(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
u8 resp_data[HCLGE_RSS_MBX_RESP_LEN];
struct hclge_dev *hdev = vport->back;
u8 index;
index = mbx_req->msg[2];
memcpy(&resp_data[0],
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
HCLGE_RSS_MBX_RESP_LEN);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
......@@ -514,7 +604,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_VLAN:
ret = hclge_set_vf_vlan_cfg(vport, req, false);
ret = hclge_set_vf_vlan_cfg(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to config VF's VLAN\n",
......@@ -578,6 +668,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to get qid for VF\n",
ret);
break;
case HCLGE_MBX_GET_VF_FLR_STATUS:
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true,
HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true);
break;
case HCLGE_MBX_GET_RSS_KEY:
ret = hclge_get_rss_key(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to set mtu\n", ret);
break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_vf_link_mode(vport, req);
break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
......
......@@ -209,6 +209,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
return 0;
}
static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
u8 resp_msg;
int ret;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
NULL, 0, true, &resp_msg, sizeof(u8));
if (ret) {
dev_err(&hdev->pdev->dev,
"VF request to get port base vlan state failed %d",
ret);
return ret;
}
nic->port_base_vlan_state = resp_msg;
return 0;
}
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
......@@ -368,6 +389,21 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
}
}
void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
#define HCLGEVF_ADVERTISING 0
#define HCLGEVF_SUPPORTED 1
u8 send_msg;
u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
sizeof(u8), false, &resp_msg, sizeof(u8));
send_msg = HCLGEVF_SUPPORTED;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
sizeof(u8), false, &resp_msg, sizeof(u8));
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
{
struct hnae3_handle *nic = &hdev->nic;
......@@ -569,12 +605,50 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
return status;
}
/* for revision 0x20, vf shared the same rss config with pf */
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
u16 msg_num, hash_key_index;
u8 index;
int ret;
msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0,
&index, sizeof(index),
true, resp_msg,
HCLGEVF_RSS_MBX_RESP_LEN);
if (ret) {
dev_err(&hdev->pdev->dev,
"VF get rss hash key from PF failed, ret=%d",
ret);
return ret;
}
hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
if (index == msg_num - 1)
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0],
HCLGEVF_RSS_KEY_SIZE - hash_key_index);
else
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
}
return 0;
}
static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int i;
int i, ret;
if (handle->pdev->revision >= HNAE3_REVISION_ID_21) {
/* Get hash algorithm */
......@@ -596,6 +670,16 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
if (key)
memcpy(key, rss_cfg->rss_hash_key,
HCLGEVF_RSS_KEY_SIZE);
} else {
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (key) {
ret = hclgevf_get_rss_hash_key(hdev);
if (ret)
return ret;
memcpy(key, rss_cfg->rss_hash_key,
HCLGEVF_RSS_KEY_SIZE);
}
}
if (indir)
......@@ -1650,6 +1734,8 @@ static void hclgevf_service_task(struct work_struct *work)
*/
hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
hclgevf_deferred_task_schedule(hdev);
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
......@@ -1730,7 +1816,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
/* get current port base vlan state from PF */
ret = hclgevf_get_port_base_vlan_filter_state(hdev);
if (ret)
return ret;
/* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev);
......@@ -1885,6 +1974,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
return 0;
......@@ -2067,9 +2158,10 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
static int hclgevf_init_nic_client_instance(struct hnae3_client *client,
struct hclgevf_dev *hdev)
static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
int rst_cnt = hdev->reset_count;
int ret = 0;
......@@ -2083,10 +2175,35 @@ static int hclgevf_init_nic_client_instance(struct hnae3_client *client,
clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
client->ops->uninit_instance(&hdev->nic, 0);
ret = -EBUSY;
return -EBUSY;
}
return ret;
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
}
static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
!hdev->nic_client)
return 0;
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
return ret;
ret = client->ops->init_instance(&hdev->roce);
if (ret)
return ret;
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
}
static int hclgevf_init_client_instance(struct hnae3_client *client,
......@@ -2100,25 +2217,15 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hdev->nic_client = client;
hdev->nic.client = client;
ret = hclgevf_init_nic_client_instance(client, hdev);
ret = hclgevf_init_nic_client_instance(ae_dev, client);
if (ret)
goto clear_nic;
hnae3_set_client_init_flag(client, ae_dev, 1);
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
goto clear_roce;
ret = rc->ops->init_instance(&hdev->roce);
if (ret)
goto clear_roce;
ret = hclgevf_init_roce_client_instance(ae_dev,
hdev->roce_client);
if (ret)
goto clear_roce;
hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
1);
}
break;
case HNAE3_CLIENT_UNIC:
hdev->nic_client = client;
......@@ -2136,17 +2243,10 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hdev->roce.client = client;
}
if (hdev->roce_client && hdev->nic_client) {
ret = hclgevf_init_roce_base_info(hdev);
if (ret)
goto clear_roce;
ret = client->ops->init_instance(&hdev->roce);
if (ret)
goto clear_roce;
}
ret = hclgevf_init_roce_client_instance(ae_dev, client);
if (ret)
goto clear_roce;
hnae3_set_client_init_flag(client, ae_dev, 1);
break;
default:
return -EINVAL;
......@@ -2589,7 +2689,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
......@@ -2605,6 +2705,16 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
*media_type = hdev->hw.mac.media_type;
}
static void hclgevf_get_link_mode(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
*supported = hdev->hw.mac.supported;
*advertising = hdev->hw.mac.advertising;
}
static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
......@@ -2691,6 +2801,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
}
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size)
{
struct hnae3_handle *nic = &hdev->nic;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
// send msg to pf and wait update port base vlan info
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG,
port_base_vlan_info, data_size,
false, NULL, 0);
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
......@@ -2743,6 +2878,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
.set_mtu = hclgevf_set_mtu,
.get_global_queue_id = hclgevf_get_qid_global,
.get_link_mode = hclgevf_get_link_mode,
};
static struct hnae3_ae_algo ae_algovf = {
......
......@@ -93,6 +93,8 @@ struct hclgevf_mac {
int link;
u8 duplex;
u32 speed;
u64 supported;
u64 advertising;
};
struct hclgevf_hw {
......@@ -236,4 +238,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex);
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
u8 *port_base_vlan_info, u8 data_size);
#endif
......@@ -197,6 +197,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
case HLCGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
......@@ -242,11 +244,12 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u16 link_status, state;
u16 *msg_q, *vlan_info;
u8 duplex;
u32 speed;
u32 tail;
u8 idx;
/* we can safely clear it now as we are at start of the async message
* processing
......@@ -270,11 +273,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]);
hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
hclgevf_update_speed_duplex(hdev, speed, duplex);
break;
case HCLGE_MBX_LINK_STAT_MODE:
idx = (u8)le16_to_cpu(msg_q[1]);
if (idx)
memcpy(&hdev->hw.mac.supported , &msg_q[2],
sizeof(unsigned long));
else
memcpy(&hdev->hw.mac.advertising, &msg_q[2],
sizeof(unsigned long));
break;
case HCLGE_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending
......@@ -288,6 +302,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
case HLCGE_MBX_PUSH_VLAN_INFO:
state = le16_to_cpu(msg_q[1]);
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info,
8);
break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%d) message from arq\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册