未验证 提交 ac94e68b 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!525 net: hns3: add supports storage product scustomization requirements

Merge Pull Request from: @svishen 
 
The PR incorporates the customization code framework to support hns3 customization requirements. The following functions are supported:

1.add support check whether the network port device is HNS3
2.add support clear mac statistics
3.add support configuring function-level interrupt affinity
4.add supports pfc storm detection and suppression
5.add support query port ext information
6.add supports fast reporting of faulty nodes
7.add support to get/set 1d torus param
8.add support customized exception handling interfaces

issue:
https://gitee.com/openeuler/kernel/issues/I6QRKC 
 
Link:https://gitee.com/openeuler/kernel/pulls/525 

Reviewed-by: Yue Haibing <yuehaibing@huawei.com> 
Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
......@@ -12,6 +12,7 @@ obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-objs += hns3_ext.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
......@@ -24,6 +25,6 @@ obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_sysfs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
hclge-objs += hns3pf/hclge_ext.o
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
......@@ -103,6 +103,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_LANE_NUM_B,
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_VF_FAULT_B,
HNAE3_DEV_SUPPORT_NOTIFY_PKT_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
......@@ -174,6 +175,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_vf_fault_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps)
#define hnae3_ae_dev_notify_pkt_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_NOTIFY_PKT_B, (ae_dev)->caps)
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
......@@ -785,6 +789,8 @@ struct hnae3_ae_ops {
struct ethtool_wolinfo *wol);
int (*set_wol)(struct hnae3_handle *handle,
struct ethtool_wolinfo *wol);
int (*priv_ops)(struct hnae3_handle *handle, int opcode,
void *data, size_t length);
};
struct hnae3_dcb_ops {
......
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2023 Hisilicon Limited.
#ifndef __HNAE3_EXT_H
#define __HNAE3_EXT_H
enum hnae3_event_type_custom {
HNAE3_VF_RESET_CUSTOM,
HNAE3_VF_FUNC_RESET_CUSTOM,
HNAE3_VF_PF_FUNC_RESET_CUSTOM,
HNAE3_VF_FULL_RESET_CUSTOM,
HNAE3_FLR_RESET_CUSTOM,
HNAE3_FUNC_RESET_CUSTOM,
HNAE3_GLOBAL_RESET_CUSTOM,
HNAE3_IMP_RESET_CUSTOM,
HNAE3_UNKNOWN_RESET_CUSTOM,
HNAE3_NONE_RESET_CUSTOM,
HNAE3_PORT_FAULT,
HNAE3_RESET_DONE_CUSTOM,
HNAE3_FUNC_RESET_FAIL_CUSTOM,
HNAE3_GLOBAL_RESET_FAIL_CUSTOM,
HNAE3_IMP_RESET_FAIL_CUSTOM,
HNAE3_PPU_POISON_CUSTOM,
HNAE3_IMP_RD_POISON_CUSTOM,
HNAE3_INVALID_EVENT_CUSTOM,
};
enum hnae3_ext_opcode {
HNAE3_EXT_OPC_RESET,
HNAE3_EXT_OPC_EVENT_CALLBACK,
HNAE3_EXT_OPC_GET_PFC_STORM_PARA,
HNAE3_EXT_OPC_SET_PFC_STORM_PARA,
HNAE3_EXT_OPC_SET_NOTIFY_PARAM,
HNAE3_EXT_OPC_SET_NOTIFY_START,
HNAE3_EXT_OPC_SET_TORUS_PARAM,
HNAE3_EXT_OPC_GET_TORUS_PARAM,
HNAE3_EXT_OPC_CLEAN_STATS64,
HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO,
HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO,
HNAE3_EXT_OPC_GET_PORT_NUM,
};
struct hnae3_pfc_storm_para {
u32 dir;
u32 enable;
u32 period_ms;
u32 times;
u32 recovery_period_ms;
};
struct hnae3_notify_pkt_param {
u32 ipg; /* inter-packet gap of sending, the unit is one cycle of clock */
u16 num; /* packet number of sending */
u8 enable; /* send enable, 0=Disable, 1=Enable */
u8 init; /* initialization flag, product does not need to set value */
u8 data[64]; /* note packet data */
};
struct hnae3_torus_param {
u32 enable; /* 1d torus mode enable */
u32 mac_id; /* export mac id of port */
u8 is_node0; /* if current node is node0 */
};
struct hane3_port_ext_id_info {
u32 chip_id;
u32 mac_id;
u32 io_die_id;
};
struct hane3_port_ext_num_info {
u32 chip_num;
u32 io_die_num;
};
#endif
......@@ -156,6 +156,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
{HCLGE_COMM_CAP_NOTIFY_PKT_B, HNAE3_DEV_SUPPORT_NOTIFY_PKT_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
......
......@@ -348,6 +348,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_VF_FAULT_B = 26,
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_NOTIFY_PKT_B = 29,
};
enum HCLGE_COMM_API_CAP_BITS {
......
......@@ -24,6 +24,7 @@
#include <net/geneve.h>
#include "hnae3.h"
#include "hnae3_ext.h"
#include "hns3_enet.h"
/* All hns3 tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with
......@@ -6002,12 +6003,16 @@ static void hns3_process_hw_error(struct hnae3_handle *handle,
if (hns3_hw_err[i].type == type) {
dev_err(&handle->pdev->dev, "Detected %s!\n",
hns3_hw_err[i].msg);
if (handle->ae_algo->ops->priv_ops)
handle->ae_algo->ops->priv_ops(handle,
HNAE3_EXT_OPC_EVENT_CALLBACK, &type,
sizeof(type));
break;
}
}
}
static const struct hnae3_client_ops client_ops = {
const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
......
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2023 Hisilicon Limited.
#include "hns3_ext.h"
int nic_netdev_match_check(struct net_device *ndev)
{
#define HNS3_DRIVER_NAME_LEN 5
struct ethtool_drvinfo drv_info;
struct hnae3_handle *h;
if (!ndev || !ndev->ethtool_ops ||
!ndev->ethtool_ops->get_drvinfo)
return -EINVAL;
ndev->ethtool_ops->get_drvinfo(ndev, &drv_info);
if (strncmp(drv_info.driver, "hns3", HNS3_DRIVER_NAME_LEN))
return -EINVAL;
h = hns3_get_handle(ndev);
if (h->flags & HNAE3_SUPPORT_VF)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(nic_netdev_match_check);
static int nic_invoke_pri_ops(struct net_device *ndev, int opcode,
void *data, size_t length)
{
struct hnae3_handle *h;
int ret;
if ((!data && length) || (data && !length)) {
netdev_err(ndev, "failed to check data and length");
return -EINVAL;
}
if (nic_netdev_match_check(ndev))
return -ENODEV;
h = hns3_get_handle(ndev);
if (!h->ae_algo->ops->priv_ops)
return -EOPNOTSUPP;
ret = h->ae_algo->ops->priv_ops(h, opcode, data, length);
if (ret)
netdev_err(ndev,
"failed to invoke pri ops, opcode = %#x, ret = %d\n",
opcode, ret);
return ret;
}
void nic_chip_recover_handler(struct net_device *ndev,
enum hnae3_event_type_custom event_t)
{
dev_info(&ndev->dev, "reset type is %d!!\n", event_t);
if (event_t == HNAE3_PPU_POISON_CUSTOM)
event_t = HNAE3_FUNC_RESET_CUSTOM;
if (event_t != HNAE3_FUNC_RESET_CUSTOM &&
event_t != HNAE3_GLOBAL_RESET_CUSTOM &&
event_t != HNAE3_IMP_RESET_CUSTOM) {
dev_err(&ndev->dev, "reset type err!!\n");
return;
}
nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_RESET, &event_t, sizeof(event_t));
}
EXPORT_SYMBOL(nic_chip_recover_handler);
static int nic_check_pfc_storm_para(int dir, int enable, int period_ms,
int times, int recovery_period_ms)
{
if ((dir != HNS3_PFC_STORM_PARA_DIR_RX &&
dir != HNS3_PFC_STORM_PARA_DIR_TX) ||
(enable != HNS3_PFC_STORM_PARA_DISABLE &&
enable != HNS3_PFC_STORM_PARA_ENABLE))
return -EINVAL;
if (period_ms < HNS3_PFC_STORM_PARA_PERIOD_MIN ||
period_ms > HNS3_PFC_STORM_PARA_PERIOD_MAX ||
recovery_period_ms < HNS3_PFC_STORM_PARA_PERIOD_MIN ||
recovery_period_ms > HNS3_PFC_STORM_PARA_PERIOD_MAX ||
times <= 0)
return -EINVAL;
return 0;
}
int nic_set_pfc_storm_para(struct net_device *ndev, int dir, int enable,
int period_ms, int times, int recovery_period_ms)
{
struct hnae3_pfc_storm_para para;
if (nic_check_pfc_storm_para(dir, enable, period_ms, times,
recovery_period_ms)) {
dev_err(&ndev->dev,
"set pfc storm para failed because invalid input param.\n");
return -EINVAL;
}
para.dir = dir;
para.enable = enable;
para.period_ms = period_ms;
para.times = times;
para.recovery_period_ms = recovery_period_ms;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PFC_STORM_PARA,
&para, sizeof(para));
}
EXPORT_SYMBOL(nic_set_pfc_storm_para);
int nic_get_pfc_storm_para(struct net_device *ndev, int dir, int *enable,
int *period_ms, int *times, int *recovery_period_ms)
{
struct hnae3_pfc_storm_para para;
int ret;
if (!enable || !period_ms || !times || !recovery_period_ms ||
(dir != HNS3_PFC_STORM_PARA_DIR_RX &&
dir != HNS3_PFC_STORM_PARA_DIR_TX)) {
dev_err(&ndev->dev,
"get pfc storm para failed because invalid input param.\n");
return -EINVAL;
}
para.dir = dir;
ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PFC_STORM_PARA,
&para, sizeof(para));
if (ret)
return ret;
*enable = para.enable;
*period_ms = para.period_ms;
*times = para.times;
*recovery_period_ms = para.recovery_period_ms;
return 0;
}
EXPORT_SYMBOL(nic_get_pfc_storm_para);
int nic_set_notify_pkt_param(struct net_device *ndev,
struct hnae3_notify_pkt_param *param)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_NOTIFY_PARAM,
param, sizeof(*param));
}
EXPORT_SYMBOL(nic_set_notify_pkt_param);
int nic_set_notify_pkt_start(struct net_device *ndev)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_NOTIFY_START, NULL, 0);
}
EXPORT_SYMBOL(nic_set_notify_pkt_start);
int nic_set_torus_param(struct net_device *ndev, struct hnae3_torus_param *param)
{
if (!param || (param->enable != 0 && param->enable != 1))
return -EINVAL;
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_TORUS_PARAM,
param, sizeof(*param));
}
EXPORT_SYMBOL(nic_set_torus_param);
int nic_get_torus_param(struct net_device *ndev, struct hnae3_torus_param *param)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_TORUS_PARAM,
param, sizeof(*param));
}
EXPORT_SYMBOL(nic_get_torus_param);
int nic_clean_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
struct hnae3_knic_private_info *kinfo;
struct hns3_enet_ring *ring;
struct hns3_nic_priv *priv;
struct hnae3_handle *h;
int i, ret;
priv = netdev_priv(ndev);
h = hns3_get_handle(ndev);
kinfo = &h->kinfo;
rtnl_lock();
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
ret = -EBUSY;
goto end_unlock;
}
ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_CLEAN_STATS64,
NULL, 0);
if (ret)
goto end_unlock;
for (i = 0; i < kinfo->num_tqps; i++) {
ring = &priv->ring[i];
memset(&ring->stats, 0, sizeof(struct ring_stats));
ring = &priv->ring[i + kinfo->num_tqps];
memset(&ring->stats, 0, sizeof(struct ring_stats));
}
memset(&ndev->stats, 0, sizeof(struct net_device_stats));
netdev_info(ndev, "clean stats succ\n");
end_unlock:
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(nic_clean_stats64);
int nic_set_cpu_affinity(struct net_device *ndev, cpumask_t *affinity_mask)
{
struct hns3_enet_tqp_vector *tqp_vector;
struct hns3_nic_priv *priv;
int ret = 0;
u16 i;
if (!ndev || !affinity_mask) {
netdev_err(ndev,
"Invalid input param when set ethernet cpu affinity\n");
return -EINVAL;
}
if (nic_netdev_match_check(ndev))
return -ENODEV;
priv = netdev_priv(ndev);
rtnl_lock();
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
ret = -EBUSY;
goto err_unlock;
}
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
if (tqp_vector->irq_init_flag != HNS3_VECTOR_INITED)
continue;
tqp_vector->affinity_mask = *affinity_mask;
ret = irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
if (ret) {
netdev_err(ndev,
"failed to reset affinity hint, ret = %d\n", ret);
goto err_unlock;
}
ret = irq_set_affinity_hint(tqp_vector->vector_irq,
&tqp_vector->affinity_mask);
if (ret) {
netdev_err(ndev,
"failed to set affinity hint, ret = %d\n", ret);
goto err_unlock;
}
}
netdev_info(ndev, "set nic cpu affinity %*pb succeed\n",
cpumask_pr_args(affinity_mask));
err_unlock:
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL(nic_set_cpu_affinity);
static int nic_get_ext_id_info(struct net_device *ndev,
struct hane3_port_ext_id_info *id_info)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO,
id_info, sizeof(*id_info));
}
int nic_get_chipid(struct net_device *ndev, u32 *chip_id)
{
struct hane3_port_ext_id_info info;
int ret;
if (!chip_id)
return -EINVAL;
ret = nic_get_ext_id_info(ndev, &info);
if (ret)
return ret;
*chip_id = info.chip_id;
return 0;
}
EXPORT_SYMBOL(nic_get_chipid);
int nic_get_mac_id(struct net_device *ndev, u32 *mac_id)
{
struct hane3_port_ext_id_info info;
int ret;
if (!mac_id)
return -EINVAL;
ret = nic_get_ext_id_info(ndev, &info);
if (ret)
return ret;
*mac_id = info.mac_id;
return 0;
}
EXPORT_SYMBOL(nic_get_mac_id);
int nic_get_io_die_id(struct net_device *ndev, u32 *io_die_id)
{
struct hane3_port_ext_id_info info;
int ret;
if (!io_die_id)
return -EINVAL;
ret = nic_get_ext_id_info(ndev, &info);
if (ret)
return ret;
*io_die_id = info.io_die_id;
return 0;
}
EXPORT_SYMBOL(nic_get_io_die_id);
static int nic_get_ext_num_info(struct net_device *ndev,
struct hane3_port_ext_num_info *num_info)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO,
num_info, sizeof(*num_info));
}
int nic_get_chip_num(struct net_device *ndev, u32 *chip_num)
{
struct hane3_port_ext_num_info info;
int ret;
if (!chip_num)
return -EINVAL;
ret = nic_get_ext_num_info(ndev, &info);
if (ret)
return ret;
*chip_num = info.chip_num;
return 0;
}
EXPORT_SYMBOL(nic_get_chip_num);
int nic_get_io_die_num(struct net_device *ndev, u32 *io_die_num)
{
struct hane3_port_ext_num_info info;
int ret;
if (!io_die_num)
return -EINVAL;
ret = nic_get_ext_num_info(ndev, &info);
if (ret)
return ret;
*io_die_num = info.io_die_num;
return 0;
}
EXPORT_SYMBOL(nic_get_io_die_num);
int nic_get_port_num_of_die(struct net_device *ndev, u32 *port_num)
{
return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_NUM,
port_num, sizeof(*port_num));
}
EXPORT_SYMBOL(nic_get_port_num_of_die);
int nic_get_port_num_per_chip(struct net_device *ndev, u32 *port_num)
{
return nic_get_port_num_of_die(ndev, port_num);
}
EXPORT_SYMBOL(nic_get_port_num_per_chip);
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2023 Hisilicon Limited. */
#ifndef __HNS3_EXT_H
#define __HNS3_EXT_H
#include <linux/types.h>
#include "hns3_enet.h"
#include "hnae3_ext.h"
#define HNS3_PFC_STORM_PARA_DIR_RX 0
#define HNS3_PFC_STORM_PARA_DIR_TX 1
#define HNS3_PFC_STORM_PARA_DISABLE 0
#define HNS3_PFC_STORM_PARA_ENABLE 1
#define HNS3_PFC_STORM_PARA_PERIOD_MIN 5
#define HNS3_PFC_STORM_PARA_PERIOD_MAX 2000
int nic_netdev_match_check(struct net_device *netdev);
void nic_chip_recover_handler(struct net_device *ndev,
enum hnae3_event_type_custom event_t);
int nic_set_pfc_storm_para(struct net_device *ndev, int dir, int enable,
int period_ms, int times, int recovery_period_ms);
int nic_get_pfc_storm_para(struct net_device *ndev, int dir, int *enable,
int *period_ms, int *times, int *recovery_period_ms);
int nic_set_notify_pkt_param(struct net_device *ndev,
struct hnae3_notify_pkt_param *param);
int nic_set_notify_pkt_start(struct net_device *ndev);
int nic_set_torus_param(struct net_device *ndev, struct hnae3_torus_param *param);
int nic_get_torus_param(struct net_device *ndev, struct hnae3_torus_param *param);
int nic_clean_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats);
int nic_set_cpu_affinity(struct net_device *ndev, cpumask_t *affinity_mask);
int nic_get_chipid(struct net_device *ndev, u32 *chip_id);
int nic_get_mac_id(struct net_device *ndev, u32 *mac_id);
int nic_get_io_die_id(struct net_device *ndev, u32 *io_die_id);
int nic_get_chip_num(struct net_device *ndev, u32 *chip_num);
int nic_get_io_die_num(struct net_device *ndev, u32 *io_die_num);
int nic_get_port_num_of_die(struct net_device *ndev, u32 *port_num);
int nic_get_port_num_per_chip(struct net_device *ndev, u32 *port_num);
#endif
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2023 Hisilicon Limited.
#include "hclge_main.h"
#include "hnae3.h"
#include "hnae3_ext.h"
#include "hclge_cmd.h"
#include "hclge_ext.h"
static nic_event_fn_t nic_event_call;
/* We use a lock to ensure that the address of the nic_event_call function
* is valid when it is called. Avoid null pointer exceptions caused by
* external unregister during invoking.
*/
static DEFINE_MUTEX(hclge_nic_event_lock);
static int hclge_set_pfc_storm_para(struct hclge_dev *hdev, void *data,
size_t length)
{
struct hclge_pfc_storm_para_cmd *para_cmd;
struct hnae3_pfc_storm_para *para;
struct hclge_desc desc;
int ret;
if (length != sizeof(struct hnae3_pfc_storm_para))
return -EINVAL;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PAUSE_STORM_PARA,
false);
para = (struct hnae3_pfc_storm_para *)data;
para_cmd = (struct hclge_pfc_storm_para_cmd *)desc.data;
para_cmd->dir = cpu_to_le32(para->dir);
para_cmd->enable = cpu_to_le32(para->enable);
para_cmd->period_ms = cpu_to_le32(para->period_ms);
para_cmd->times = cpu_to_le32(para->times);
para_cmd->recovery_period_ms = cpu_to_le32(para->recovery_period_ms);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to set pfc storm para, ret = %d\n", ret);
return ret;
}
static int hclge_get_pfc_storm_para(struct hclge_dev *hdev, void *data,
size_t length)
{
struct hclge_pfc_storm_para_cmd *para_cmd;
struct hnae3_pfc_storm_para *para;
struct hclge_desc desc;
int ret;
if (length != sizeof(struct hnae3_pfc_storm_para))
return -EINVAL;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PAUSE_STORM_PARA, true);
para = (struct hnae3_pfc_storm_para *)data;
para_cmd = (struct hclge_pfc_storm_para_cmd *)desc.data;
para_cmd->dir = cpu_to_le32(para->dir);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get pfc storm para, ret = %d\n", ret);
return ret;
}
para->enable = le32_to_cpu(para_cmd->enable);
para->period_ms = le32_to_cpu(para_cmd->period_ms);
para->times = le32_to_cpu(para_cmd->times);
para->recovery_period_ms = le32_to_cpu(para_cmd->recovery_period_ms);
return 0;
}
static int hclge_notify_packet_para_cmd_send(struct hclge_dev *hdev,
struct hclge_notify_pkt_param_cmd *param_cmd)
{
#define HCLGE_NOTIFY_PKT_DESC_NUM 4
struct hclge_desc desc[HCLGE_NOTIFY_PKT_DESC_NUM];
u32 i, desc_data_len;
desc_data_len = ARRAY_SIZE(desc[0].data);
for (i = 0; i < HCLGE_NOTIFY_PKT_DESC_NUM; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_SET_NOTIFY_PKT,
false);
if (i != HCLGE_NOTIFY_PKT_DESC_NUM - 1)
desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
for (i = 0; i < HCLGE_NOTIFY_PKT_DESC_NUM * desc_data_len; i++)
desc[i / desc_data_len].data[i % desc_data_len] =
*((__le32 *)param_cmd + i);
return hclge_cmd_send(&hdev->hw, desc, HCLGE_NOTIFY_PKT_DESC_NUM);
}
static int hclge_set_notify_packet_para(struct hclge_dev *hdev,
void *data, size_t length)
{
struct hnae3_notify_pkt_param *param = (struct hnae3_notify_pkt_param *)data;
struct hclge_notify_pkt_param_cmd param_cmd;
u32 i, pkt_cfg = 0;
int ret;
if (length != sizeof(struct hnae3_notify_pkt_param))
return -EINVAL;
if (!hnae3_ae_dev_notify_pkt_supported(hdev->ae_dev))
return -EOPNOTSUPP;
if (param->enable)
pkt_cfg = HCLGE_NOTIFY_PARA_CFG_PKT_EN;
hnae3_set_field(pkt_cfg, HCLGE_NOTIFY_PARA_CFG_PKT_NUM_M,
HCLGE_NOTIFY_PARA_CFG_PKT_NUM_S, param->num);
param_cmd.cfg = cpu_to_le32(pkt_cfg);
param_cmd.ipg = cpu_to_le32(param->ipg);
for (i = 0; i < ARRAY_SIZE(param_cmd.data); i++)
param_cmd.data[i] = cpu_to_le32(*((u32 *)param->data + i));
hnae3_set_bit(param_cmd.vld_cfg, 0, 1);
hnae3_set_bit(param_cmd.vld_ipg, 0, 1);
hnae3_set_bit(param_cmd.vld_data, 0, 1);
ret = hclge_notify_packet_para_cmd_send(hdev, &param_cmd);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to set notify packet content, ret = %d\n", ret);
return ret;
}
param->init = 1;
memcpy(&hdev->notify_param, param, sizeof(*param));
return 0;
}
static int hclge_set_notify_packet_start(struct hclge_dev *hdev,
void *data, size_t length)
{
u32 pkt_cfg = HCLGE_NOTIFY_PARA_CFG_START_EN;
struct hclge_notify_pkt_param_cmd param_cmd;
int ret;
if (!hnae3_ae_dev_notify_pkt_supported(hdev->ae_dev))
return -EOPNOTSUPP;
memset(&param_cmd, 0, sizeof(param_cmd));
param_cmd.cfg = cpu_to_le32(pkt_cfg);
hnae3_set_bit(param_cmd.vld_cfg, 0, 1);
ret = hclge_notify_packet_para_cmd_send(hdev, &param_cmd);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to send notify packet, ret = %d\n", ret);
return ret;
}
static int hclge_torus_cfg_switch(struct hclge_dev *hdev, bool is_rocee,
bool enabled)
{
struct hclge_mac_vlan_switch_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SWITCH_PARAM, true);
req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
req->roce_sel = is_rocee ? 1 : 0;
/* set 0 to let firmware choose current function */
req->func_id = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get switch param, ret = %d\n", ret);
return ret;
}
hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ALW_LPBK_B, 1);
hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ALW_LCL_LPBK_B, 0);
hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ANTI_SPOOF_B, enabled);
if (!is_rocee)
hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ALW_DST_OVRD_B,
enabled);
hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to set switch param, ret = %d\n", ret);
return ret;
}
static int hclge_torus_cfg_vlan_filter(struct hclge_dev *hdev,
bool enabled)
{
struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_VLAN_FILTER, true);
req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = HCLGE_FILTER_TYPE_PORT;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get torus vlan filter, ret = %d\n", ret);
return ret;
}
hnae3_set_bit(req->vlan_fe, HCLGE_VLAN_FE_NIC_INGRESS, !enabled);
hnae3_set_bit(req->vlan_fe, HCLGE_VLAN_FE_ROCEE_INGRESS, !enabled);
req->vlan_type = HCLGE_FILTER_TYPE_PORT;
hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"failed to set torus vlan filter, ret = %d\n", ret);
return ret;
}
static int hclge_torus_cfg(struct hclge_dev *hdev,
struct hnae3_torus_param *param)
{
struct hclge_torus_cfg_cmd *req;
struct hclge_desc desc;
u32 lan_fwd_tc_cfg = 0;
u32 lan_port_pair = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_1D_TORUS, true);
req = (struct hclge_torus_cfg_cmd *)desc.data;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get torus config, ret = %d\n", ret);
return ret;
}
req->lan_port_pair = cpu_to_le32(param->mac_id &
HCLGE_TORUS_MAC_ID_MASK);
hnae3_set_bit(lan_port_pair, HCLGE_UC_LAN_PAIR_EN, 1);
hnae3_set_bit(lan_port_pair, HCLGE_MC_BC_LAN_PAIR_EN, 1);
hnae3_set_bit(lan_port_pair, HCLGE_LLDP_LAN_PAIR_EN, 1);
hnae3_set_bit(lan_port_pair, HCLGE_TC2VLANPRI_MAPPING_EN, 1);
hnae3_set_bit(lan_port_pair, HCLGE_TORUS_LPBK_DROP_EN, 1);
if (param->enable)
req->lan_port_pair |= cpu_to_le32(lan_port_pair);
if (!param->is_node0) {
req->lan_fwd_tc_cfg &= cpu_to_le32(~HCLGE_TORUS_TC1_DROP_EN);
lan_fwd_tc_cfg &= ~HCLGE_TOURS_TCX_MAP_TCY_MASK;
lan_fwd_tc_cfg |= HCLGE_TOURS_TCX_MAP_TCY_INIT &
HCLGE_TOURS_TCX_MAP_TCY_MASK;
req->lan_fwd_tc_cfg |= cpu_to_le32(lan_fwd_tc_cfg);
} else {
req->lan_fwd_tc_cfg |= cpu_to_le32(HCLGE_TORUS_TC1_DROP_EN);
lan_fwd_tc_cfg &= ~HCLGE_TOURS_TCX_MAP_TCY_MASK;
lan_fwd_tc_cfg |= HCLGE_TOURS_TCX_MAP_TCY_NODE0_INIT &
HCLGE_TOURS_TCX_MAP_TCY_MASK;
req->lan_fwd_tc_cfg |= cpu_to_le32(lan_fwd_tc_cfg);
}
req->torus_en = cpu_to_le32(param->enable);
hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev, "failed to set torus, ret = %d\n",
ret);
return ret;
}
static int hclge_set_torus_param(struct hclge_dev *hdev, void *data,
size_t length)
{
struct hnae3_torus_param *param = (struct hnae3_torus_param *)data;
int ret;
if (length != sizeof(struct hnae3_torus_param))
return -EINVAL;
ret = hclge_torus_cfg_switch(hdev, false, !!param->enable);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to config nic switch param, ret = %d\n", ret);
return ret;
}
ret = hclge_torus_cfg_switch(hdev, true, !!param->enable);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to config roce switch param, ret = %d\n", ret);
return ret;
}
ret = hclge_torus_cfg_vlan_filter(hdev, !!param->enable);
if (ret)
return ret;
ret = hclge_torus_cfg(hdev, param);
if (ret)
return ret;
hdev->torus_param = *param;
return 0;
}
static int hclge_get_torus_param(struct hclge_dev *hdev, void *data,
size_t length)
{
struct hnae3_torus_param *param = (struct hnae3_torus_param *)data;
struct hclge_torus_cfg_cmd *req;
struct hclge_desc desc;
int ret;
if (length != sizeof(struct hnae3_torus_param))
return -EINVAL;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_1D_TORUS, true);
req = (struct hclge_torus_cfg_cmd *)desc.data;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get torus param, ret = %d\n", ret);
return ret;
}
param->mac_id =
le32_to_cpu(req->lan_port_pair) & HCLGE_TORUS_MAC_ID_MASK;
param->enable = le32_to_cpu(req->torus_en);
return 0;
}
static int hclge_clean_stats64(struct hclge_dev *hdev, void *data,
size_t length)
{
struct hnae3_knic_private_info *kinfo;
struct hclge_comm_tqp *tqp;
int i;
kinfo = &hdev->vport[0].nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(struct hclge_comm_tqp_stats));
}
memset(&hdev->mac_stats, 0, sizeof(struct hclge_mac_stats));
return 0;
}
static int hclge_get_info_from_cmd(struct hclge_dev *hdev,
struct hclge_desc *desc, u32 num, int opcode)
{
u32 i;
for (i = 0; i < num; i++) {
hclge_cmd_setup_basic_desc(desc + i, opcode, true);
if (i != num - 1)
desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
return hclge_cmd_send(&hdev->hw, desc, num);
}
static int hclge_get_extend_port_id_info(struct hclge_dev *hdev,
void *data, size_t length)
{
struct hane3_port_ext_id_info *info;
struct hclge_id_info_cmd *info_cmd;
struct hclge_desc desc;
int ret;
if (length != sizeof(struct hane3_port_ext_id_info))
return -EINVAL;
ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_CHIP_ID_GET);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get extend port id info, ret = %d\n",
ret);
return ret;
}
info_cmd = (struct hclge_id_info_cmd *)desc.data;
info = (struct hane3_port_ext_id_info *)data;
info->chip_id = le32_to_cpu(info_cmd->chip_id);
info->mac_id = le32_to_cpu(info_cmd->mac_id);
info->io_die_id = le32_to_cpu(info_cmd->io_die_id);
return 0;
}
static int hclge_get_extend_port_num_info(struct hclge_dev *hdev,
void *data, size_t length)
{
struct hane3_port_ext_num_info *num_info;
struct hclge_num_info_cmd *resp;
struct hclge_desc desc;
int ret;
if (length != sizeof(struct hane3_port_ext_num_info))
return -EINVAL;
ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_GET_CHIP_NUM);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get extend port number info, ret = %d\n", ret);
return ret;
}
resp = (struct hclge_num_info_cmd *)(desc.data);
num_info = (struct hane3_port_ext_num_info *)data;
num_info->chip_num = le32_to_cpu(resp->chip_num);
num_info->io_die_num = le32_to_cpu(resp->io_die_num);
return 0;
}
static int hclge_get_port_num(struct hclge_dev *hdev, void *data,
size_t length)
{
struct hclge_port_num_info_cmd *resp;
struct hclge_desc desc;
int ret;
if (length != sizeof(u32))
return -EINVAL;
ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_GET_PORT_NUM);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to get port number, ret = %d\n", ret);
return ret;
}
resp = (struct hclge_port_num_info_cmd *)(desc.data);
*(u32 *)data = le32_to_cpu(resp->port_num);
return 0;
}
static void hclge_ext_resotre_config(struct hclge_dev *hdev)
{
if (hdev->reset_type != HNAE3_IMP_RESET &&
hdev->reset_type != HNAE3_GLOBAL_RESET)
return;
if (hdev->notify_param.init)
hclge_set_notify_packet_para(hdev, &hdev->notify_param,
sizeof(hdev->notify_param));
hclge_set_torus_param(hdev, &hdev->torus_param,
sizeof(hdev->torus_param));
}
static int hclge_set_reset_task(struct hclge_dev *hdev, void *data,
size_t length)
{
u32 *reset_level = (u32 *)data;
if (length != sizeof(u32))
return -EINVAL;
dev_warn(&hdev->pdev->dev, "reset level is %u\n", *reset_level);
/* request reset & schedule reset task */
set_bit(*reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
return 0;
}
int hclge_ext_call_event(struct hclge_dev *hdev,
enum hnae3_event_type_custom event_t)
{
if (event_t >= HNAE3_INVALID_EVENT_CUSTOM)
return -EINVAL;
mutex_lock(&hclge_nic_event_lock);
if (!nic_event_call) {
mutex_unlock(&hclge_nic_event_lock);
return -EOPNOTSUPP;
}
nic_event_call(hdev->vport[0].nic.netdev, event_t);
mutex_unlock(&hclge_nic_event_lock);
return 0;
}
int nic_register_event(nic_event_fn_t event_call)
{
if (!event_call) {
pr_err("hns3: register event handle is null\n");
return -EINVAL;
}
mutex_lock(&hclge_nic_event_lock);
if (nic_event_call) {
mutex_unlock(&hclge_nic_event_lock);
pr_err("hns3: event already register\n");
return -EBUSY;
}
nic_event_call = event_call;
mutex_unlock(&hclge_nic_event_lock);
pr_info("hns3: event register success\n");
return 0;
}
EXPORT_SYMBOL(nic_register_event);
int nic_unregister_event(void)
{
mutex_lock(&hclge_nic_event_lock);
nic_event_call = NULL;
mutex_unlock(&hclge_nic_event_lock);
pr_info("hns3: event unregister success\n");
return 0;
}
EXPORT_SYMBOL(nic_unregister_event);
static int hclge_nic_call_event(struct hclge_dev *hdev, void *data,
size_t length)
{
#define ERROR_EVENT_TYPE_NUM 3
u32 event_type[ERROR_EVENT_TYPE_NUM] = {
HNAE3_PPU_POISON_CUSTOM,
HNAE3_IMP_RESET_CUSTOM,
HNAE3_IMP_RD_POISON_CUSTOM
};
u32 *index = (u32 *)data;
if (length != sizeof(u32))
return -EINVAL;
if ((*index) >= ERROR_EVENT_TYPE_NUM)
return 0;
return hclge_ext_call_event(hdev, event_type[*index]);
}
static enum hnae3_event_type_custom
hclge_get_reset_fail_type(enum hnae3_reset_type reset_type)
{
const struct hclge_reset_fail_type_map fail_type_map[] = {
{HNAE3_FUNC_RESET, HNAE3_FUNC_RESET_FAIL_CUSTOM},
{HNAE3_GLOBAL_RESET, HNAE3_GLOBAL_RESET_FAIL_CUSTOM},
{HNAE3_IMP_RESET, HNAE3_IMP_RESET_FAIL_CUSTOM},
};
u32 i;
for (i = 0; i < ARRAY_SIZE(fail_type_map); i++)
if (fail_type_map[i].reset_type == reset_type)
return fail_type_map[i].custom_type;
return HNAE3_INVALID_EVENT_CUSTOM;
}
static void hclge_report_reset_fail_custom(struct hclge_dev *hdev)
{
#define HCLGE_RESET_MAX_FAIL_CNT_CUSTOM 1
u32 max_fail_custom_cnt = HCLGE_RESET_MAX_FAIL_CNT;
mutex_lock(&hclge_nic_event_lock);
if (nic_event_call)
max_fail_custom_cnt = HCLGE_RESET_MAX_FAIL_CNT_CUSTOM;
mutex_unlock(&hclge_nic_event_lock);
if (hdev->rst_stats.reset_fail_cnt < max_fail_custom_cnt)
return;
dev_err(&hdev->pdev->dev, "failed to report reset!\n");
hclge_ext_call_event(hdev, hclge_get_reset_fail_type(hdev->reset_type));
}
void hclge_ext_reset_end(struct hclge_dev *hdev, bool done)
{
if (!done) {
hclge_report_reset_fail_custom(hdev);
return;
}
hclge_ext_resotre_config(hdev);
hclge_ext_call_event(hdev, HNAE3_RESET_DONE_CUSTOM);
dev_info(&hdev->pdev->dev, "report reset done!\n");
}
static const hclge_priv_ops_fn hclge_ext_func_arr[] = {
[HNAE3_EXT_OPC_RESET] = hclge_set_reset_task,
[HNAE3_EXT_OPC_EVENT_CALLBACK] = hclge_nic_call_event,
[HNAE3_EXT_OPC_GET_PFC_STORM_PARA] = hclge_get_pfc_storm_para,
[HNAE3_EXT_OPC_SET_PFC_STORM_PARA] = hclge_set_pfc_storm_para,
[HNAE3_EXT_OPC_SET_NOTIFY_PARAM] = hclge_set_notify_packet_para,
[HNAE3_EXT_OPC_SET_NOTIFY_START] = hclge_set_notify_packet_start,
[HNAE3_EXT_OPC_SET_TORUS_PARAM] = hclge_set_torus_param,
[HNAE3_EXT_OPC_GET_TORUS_PARAM] = hclge_get_torus_param,
[HNAE3_EXT_OPC_CLEAN_STATS64] = hclge_clean_stats64,
[HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO] = hclge_get_extend_port_id_info,
[HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO] = hclge_get_extend_port_num_info,
[HNAE3_EXT_OPC_GET_PORT_NUM] = hclge_get_port_num,
};
int hclge_ext_ops_handle(struct hnae3_handle *handle, int opcode,
void *data, size_t length)
{
struct hclge_vport *vport = hclge_get_vport(handle);
int cmd_num = ARRAY_SIZE(hclge_ext_func_arr);
struct hclge_dev *hdev = vport->back;
hclge_priv_ops_fn ext_opcode_func;
if (opcode >= cmd_num) {
dev_err(&hdev->pdev->dev, "invalid opcode %d\n", opcode);
return -EINVAL;
}
ext_opcode_func = hclge_ext_func_arr[opcode];
if (!ext_opcode_func) {
dev_err(&hdev->pdev->dev, "unsupported opcode %d\n", opcode);
return -EOPNOTSUPP;
}
return ext_opcode_func(hdev, data, length);
}
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2016-2017 Hisilicon Limited. */
#ifndef __HCLGE_EXT_H
#define __HCLGE_EXT_H
#include <linux/types.h>
#define HCLGE_NOTIFY_PARA_CFG_PKT_EN BIT(0)
#define HCLGE_NOTIFY_PARA_CFG_START_EN BIT(1)
#define HCLGE_NOTIFY_PARA_CFG_PKT_NUM_M GENMASK(5, 2)
#define HCLGE_NOTIFY_PARA_CFG_PKT_NUM_S 2
#define HCLGE_TORUS_MAC_ID_MASK 0x3
#define HCLGE_TOURS_TCX_MAP_TCY_INIT 0x1c6144
#define HCLGE_TOURS_TCX_MAP_TCY_NODE0_INIT 0x1c6141
#define HCLGE_VLAN_FE_NIC_INGRESS 0
#define HCLGE_VLAN_FE_ROCEE_INGRESS 2
#define HCLGE_TORUS_LPBK_DROP_EN 20
#define HCLGE_TC2VLANPRI_MAPPING_EN 19
#define HCLGE_LLDP_LAN_PAIR_EN 18
#define HCLGE_MC_BC_LAN_PAIR_EN 17
#define HCLGE_UC_LAN_PAIR_EN 16
#define HCLGE_TORUS_TC1_DROP_EN BIT(26)
#define HCLGE_TOURS_TCX_MAP_TCY_MASK 0x1c71c7
struct hclge_id_info_cmd {
__le32 chip_id;
__le32 mac_id;
__le32 io_die_id;
u8 rsv[12];
};
struct hclge_num_info_cmd {
__le32 chip_num;
__le32 io_die_num;
u8 rsv[16];
};
struct hclge_port_num_info_cmd {
__le32 port_num;
u8 rsv[20];
};
struct hclge_pfc_storm_para_cmd {
__le32 dir;
__le32 enable;
__le32 period_ms;
__le32 times;
__le32 recovery_period_ms;
__le32 rsv;
};
struct hclge_notify_pkt_param_cmd {
__le32 cfg;
__le32 ipg;
__le32 data[16];
u8 vld_cfg;
u8 vld_ipg;
u8 vld_data;
u8 rsv[21];
};
struct hclge_torus_cfg_cmd {
u8 rsv[4];
__le32 lan_port_pair;
__le32 lan_fwd_tc_cfg;
__le32 pause_time_out;
__le32 pause_time_out_en;
__le32 torus_en;
};
enum hclge_ext_opcode_type {
HCLGE_OPC_CONFIG_SWITCH_PARAM = 0x1033,
HCLGE_OPC_CONFIG_VLAN_FILTER = 0x1100,
HCLGE_OPC_SET_NOTIFY_PKT = 0x180A,
HCLGE_OPC_CONFIG_1D_TORUS = 0x2300,
HCLGE_OPC_CHIP_ID_GET = 0x7003,
HCLGE_OPC_GET_CHIP_NUM = 0x7005,
HCLGE_OPC_GET_PORT_NUM = 0x7006,
HCLGE_OPC_CFG_PAUSE_STORM_PARA = 0x7019,
};
struct hclge_reset_fail_type_map {
enum hnae3_reset_type reset_type;
enum hnae3_event_type_custom custom_type;
};
typedef int (*hclge_priv_ops_fn)(struct hclge_dev *hdev, void *data,
size_t length);
/**
* nic_event_fn_t - nic event handler prototype
* @netdev: net device
* @hnae3_event_type_custom: nic device event type
*/
typedef void (*nic_event_fn_t) (struct net_device *netdev,
enum hnae3_event_type_custom);
/**
* nic_register_event - register for nic event handling
* @event_call: nic event handler
* return 0 - success , negative - fail
*/
int nic_register_event(nic_event_fn_t event_call);
/**
* nic_unregister_event - unregister for nic event handling
* return 0 - success , negative - fail
*/
int nic_unregister_event(void);
int hclge_ext_call_event(struct hclge_dev *hdev,
enum hnae3_event_type_custom event_t);
void hclge_ext_reset_end(struct hclge_dev *hdev, bool done);
int hclge_ext_ops_handle(struct hnae3_handle *handle, int opcode,
void *data, size_t length);
#endif
......@@ -18,6 +18,7 @@
#include <net/vxlan.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_ext.h"
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hclge_mdio.h"
......@@ -36,7 +37,6 @@
#define BUF_MAX_PERCENT 100
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
#define HCLGE_RESET_SYNC_TIME 100
#define HCLGE_PF_RESET_SYNC_TIME 20
#define HCLGE_PF_RESET_SYNC_CNT 1500
......@@ -72,6 +72,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
static void hclge_reset_end(struct hnae3_handle *handle, bool done);
static struct hnae3_ae_algo ae_algo;
......@@ -2922,7 +2923,7 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
}
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
......@@ -4139,7 +4140,7 @@ static void hclge_show_rst_info(struct hclge_dev *hdev)
static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
struct hnae3_handle *handle = &hdev->vport[0].nic;
if (hdev->reset_pending) {
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
......@@ -4151,7 +4152,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
"reset failed because new reset interrupt\n");
hclge_clear_reset_cause(hdev);
return false;
} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
} else if (hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT) {
hdev->rst_stats.reset_fail_cnt++;
set_bit(hdev->reset_type, &hdev->reset_pending);
dev_info(&hdev->pdev->dev,
......@@ -4165,7 +4166,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev)
/* recover the handshake status when reset fail */
hclge_reset_handshake(hdev, true);
dev_err(&hdev->pdev->dev, "Reset fail!\n");
hclge_reset_end(handle, false);
hclge_show_rst_info(hdev);
......@@ -4286,6 +4287,7 @@ static int hclge_reset_prepare(struct hclge_dev *hdev)
static int hclge_reset_rebuild(struct hclge_dev *hdev)
{
struct hnae3_handle *handle = &hdev->vport[0].nic;
int ret;
hdev->rst_stats.hw_reset_done_cnt++;
......@@ -4347,6 +4349,8 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hclge_update_reset_level(hdev);
hclge_reset_end(handle, true);
return 0;
}
......@@ -4368,10 +4372,11 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
struct hclge_dev *hdev = ae_dev->priv;
int ret;
/* We might end up getting called broadly because of 2 below cases:
* 1. Recoverable error was conveyed through APEI and only way to bring
......@@ -4405,9 +4410,12 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
hdev->reset_level);
/* request reset & schedule reset task */
set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
ret = hclge_ext_call_event(hdev, hdev->reset_level);
if (ret) {
/* request reset & schedule reset task */
set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
}
if (hdev->reset_level < HNAE3_GLOBAL_RESET)
hdev->reset_level++;
......@@ -4433,7 +4441,15 @@ static void hclge_reset_timer(struct timer_list *t)
dev_info(&hdev->pdev->dev,
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
hclge_reset_event(hdev->pdev, &hdev->vport[0].nic);
}
static void hclge_reset_end(struct hnae3_handle *handle, bool done)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
hclge_ext_reset_end(hdev, done);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
......@@ -4473,8 +4489,8 @@ static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
hclge_set_def_reset_request(ae_dev, reset_type);
}
if (hdev->default_reset_request && ae_dev->ops->reset_event)
ae_dev->ops->reset_event(hdev->pdev, NULL);
if (hdev->default_reset_request)
hclge_reset_event(hdev->pdev, &hdev->vport[0].nic);
/* enable interrupt after error handling complete */
hclge_enable_vector(&hdev->misc_vector, true);
......@@ -13753,7 +13769,7 @@ static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
return 0;
}
static const struct hnae3_ae_ops hclge_ops = {
struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
.reset_prepare = hclge_reset_prepare_general,
......@@ -13860,6 +13876,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_dscp_prio = hclge_get_dscp_prio,
.get_wol = hclge_get_wol,
.set_wol = hclge_set_wol,
.priv_ops = hclge_ext_ops_handle,
};
static struct hnae3_ae_algo ae_algo = {
......
......@@ -12,7 +12,7 @@
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
#include "hnae3_ext.h"
#include "hclge_comm_rss.h"
#include "hclge_comm_tqp_stats.h"
......@@ -26,6 +26,8 @@
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
#define HCLGE_RESET_MAX_FAIL_CNT 5
#define HCLGE_INVALID_VPORT 0xffff
#define HCLGE_PF_CFG_BLOCK_SIZE 32
......@@ -957,6 +959,8 @@ struct hclge_dev {
struct hclge_ptp *ptp;
struct devlink *devlink;
struct hclge_comm_rss_cfg rss_cfg;
struct hnae3_notify_pkt_param notify_param;
struct hnae3_torus_param torus_param;
};
/* VPort level vlan tag configuration for TX direction */
......@@ -1159,4 +1163,6 @@ int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, u8 duplex,
int hclge_get_wol_supported_mode(struct hclge_dev *hdev, u32 *wol_supported);
int hclge_get_wol_cfg(struct hclge_dev *hdev, u32 *mode);
struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf);
void hclge_reset_task_schedule(struct hclge_dev *hdev);
void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle);
#endif
......@@ -811,7 +811,7 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
ae_dev->ops->reset_event(hdev->pdev, NULL);
hclge_reset_event(hdev->pdev, &hdev->vport[0].nic);
}
static void hclge_handle_vf_tbl(struct hclge_vport *vport,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册